blob: 4646cc7aa93aafd9eb1b7e6634e7d2396ad55396 [file] [log] [blame]
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001/*
Zhang Weid02443a2008-04-18 13:33:38 -07002 * Freescale MPC85xx/MPC86xx RapidIO support
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08003 *
Alexandre Bounine5b2074a2010-05-26 14:44:00 -07004 * Copyright 2009 Integrated Device Technology, Inc.
5 * Alex Bounine <alexandre.bounine@idt.com>
6 * - Added Port-Write message handling
7 * - Added Machine Check exception handling
8 *
Zhang Weiad1e9382008-04-18 13:33:41 -07009 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
10 * Zhang Wei <wei.zhang@freescale.com>
11 *
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080012 * Copyright 2005 MontaVista Software, Inc.
13 * Matt Porter <mporter@kernel.crashing.org>
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 */
20
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080021#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/dma-mapping.h>
25#include <linux/interrupt.h>
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +040026#include <linux/device.h>
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080027#include <linux/rio.h>
28#include <linux/rio_drv.h>
Zhang Weicc2bb692008-04-18 13:33:41 -070029#include <linux/of_platform.h>
Zhang Wei61b26912008-04-18 13:33:44 -070030#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/slab.h>
Alexandre Bounine5b2074a2010-05-26 14:44:00 -070032#include <linux/kfifo.h>
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080033
34#include <asm/io.h>
Alexandre Bouninea52c8f52010-05-26 14:44:00 -070035#include <asm/machdep.h>
36#include <asm/uaccess.h>
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080037
Alexandre Bounine5b2074a2010-05-26 14:44:00 -070038#undef DEBUG_PW /* Port-Write debugging */
39
Zhang Weiad1e9382008-04-18 13:33:41 -070040/* RapidIO definition irq, which read from OF-tree */
41#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq)
42#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq)
43#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
Alexandre Bounine5b2074a2010-05-26 14:44:00 -070044#define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq)
Zhang Weiad1e9382008-04-18 13:33:41 -070045
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080046#define RIO_ATMU_REGS_OFFSET 0x10c00
Zhang Wei61b26912008-04-18 13:33:44 -070047#define RIO_P_MSG_REGS_OFFSET 0x11000
48#define RIO_S_MSG_REGS_OFFSET 0x13000
49#define RIO_ESCSR 0x158
50#define RIO_CCSR 0x15c
Alexandre Bounine5b2074a2010-05-26 14:44:00 -070051#define RIO_LTLEDCSR 0x0608
Alexandre Bouninea52c8f52010-05-26 14:44:00 -070052#define RIO_LTLEDCSR_IER 0x80000000
53#define RIO_LTLEDCSR_PRT 0x01000000
Alexandre Bounine5b2074a2010-05-26 14:44:00 -070054#define RIO_LTLEECSR 0x060c
55#define RIO_EPWISR 0x10010
Zhang Wei61b26912008-04-18 13:33:44 -070056#define RIO_ISR_AACR 0x10120
57#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080058#define RIO_MAINT_WIN_SIZE 0x400000
59#define RIO_DBELL_WIN_SIZE 0x1000
60
61#define RIO_MSG_OMR_MUI 0x00000002
62#define RIO_MSG_OSR_TE 0x00000080
63#define RIO_MSG_OSR_QOI 0x00000020
64#define RIO_MSG_OSR_QFI 0x00000010
65#define RIO_MSG_OSR_MUB 0x00000004
66#define RIO_MSG_OSR_EOMI 0x00000002
67#define RIO_MSG_OSR_QEI 0x00000001
68
69#define RIO_MSG_IMR_MI 0x00000002
70#define RIO_MSG_ISR_TE 0x00000080
71#define RIO_MSG_ISR_QFI 0x00000010
72#define RIO_MSG_ISR_DIQI 0x00000001
73
Alexandre Bounine5b2074a2010-05-26 14:44:00 -070074#define RIO_IPWMR_SEN 0x00100000
75#define RIO_IPWMR_QFIE 0x00000100
76#define RIO_IPWMR_EIE 0x00000020
77#define RIO_IPWMR_CQ 0x00000002
78#define RIO_IPWMR_PWE 0x00000001
79
80#define RIO_IPWSR_QF 0x00100000
81#define RIO_IPWSR_TE 0x00000080
82#define RIO_IPWSR_QFI 0x00000010
83#define RIO_IPWSR_PWD 0x00000008
84#define RIO_IPWSR_PWB 0x00000004
85
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080086#define RIO_MSG_DESC_SIZE 32
87#define RIO_MSG_BUFFER_SIZE 4096
88#define RIO_MIN_TX_RING_SIZE 2
89#define RIO_MAX_TX_RING_SIZE 2048
90#define RIO_MIN_RX_RING_SIZE 2
91#define RIO_MAX_RX_RING_SIZE 2048
92
93#define DOORBELL_DMR_DI 0x00000002
94#define DOORBELL_DSR_TE 0x00000080
95#define DOORBELL_DSR_QFI 0x00000010
96#define DOORBELL_DSR_DIQI 0x00000001
Zhang Wei6c391032008-04-18 13:33:48 -070097#define DOORBELL_TID_OFFSET 0x02
98#define DOORBELL_SID_OFFSET 0x04
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080099#define DOORBELL_INFO_OFFSET 0x06
100
101#define DOORBELL_MESSAGE_SIZE 0x08
Zhang Wei6c391032008-04-18 13:33:48 -0700102#define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET))
103#define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET))
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800104#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET))
105
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800106struct rio_atmu_regs {
107 u32 rowtar;
Zhang Wei61b26912008-04-18 13:33:44 -0700108 u32 rowtear;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800109 u32 rowbar;
110 u32 pad2;
111 u32 rowar;
112 u32 pad3[3];
113};
114
115struct rio_msg_regs {
116 u32 omr;
117 u32 osr;
118 u32 pad1;
119 u32 odqdpar;
120 u32 pad2;
121 u32 osar;
122 u32 odpr;
123 u32 odatr;
124 u32 odcr;
125 u32 pad3;
126 u32 odqepar;
127 u32 pad4[13];
128 u32 imr;
129 u32 isr;
130 u32 pad5;
131 u32 ifqdpar;
132 u32 pad6;
133 u32 ifqepar;
Zhang Wei61b26912008-04-18 13:33:44 -0700134 u32 pad7[226];
135 u32 odmr;
136 u32 odsr;
137 u32 res0[4];
138 u32 oddpr;
139 u32 oddatr;
140 u32 res1[3];
141 u32 odretcr;
142 u32 res2[12];
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800143 u32 dmr;
144 u32 dsr;
145 u32 pad8;
146 u32 dqdpar;
147 u32 pad9;
148 u32 dqepar;
149 u32 pad10[26];
150 u32 pwmr;
151 u32 pwsr;
Alexandre Bounine5b2074a2010-05-26 14:44:00 -0700152 u32 epwqbar;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800153 u32 pwqbar;
154};
155
156struct rio_tx_desc {
157 u32 res1;
158 u32 saddr;
159 u32 dport;
160 u32 dattr;
161 u32 res2;
162 u32 res3;
163 u32 dwcnt;
164 u32 res4;
165};
166
Zhang Weiad1e9382008-04-18 13:33:41 -0700167struct rio_dbell_ring {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800168 void *virt;
169 dma_addr_t phys;
Zhang Weiad1e9382008-04-18 13:33:41 -0700170};
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800171
Zhang Weiad1e9382008-04-18 13:33:41 -0700172struct rio_msg_tx_ring {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800173 void *virt;
174 dma_addr_t phys;
175 void *virt_buffer[RIO_MAX_TX_RING_SIZE];
176 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
177 int tx_slot;
178 int size;
Matt Porter6978bbc2005-11-07 01:00:20 -0800179 void *dev_id;
Zhang Weiad1e9382008-04-18 13:33:41 -0700180};
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800181
Zhang Weiad1e9382008-04-18 13:33:41 -0700182struct rio_msg_rx_ring {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800183 void *virt;
184 dma_addr_t phys;
185 void *virt_buffer[RIO_MAX_RX_RING_SIZE];
186 int rx_slot;
187 int size;
Matt Porter6978bbc2005-11-07 01:00:20 -0800188 void *dev_id;
Zhang Weiad1e9382008-04-18 13:33:41 -0700189};
190
Alexandre Bounine5b2074a2010-05-26 14:44:00 -0700191struct rio_port_write_msg {
192 void *virt;
193 dma_addr_t phys;
194 u32 msg_count;
195 u32 err_count;
196 u32 discard_count;
197};
198
Zhang Weiad1e9382008-04-18 13:33:41 -0700199struct rio_priv {
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400200 struct device *dev;
Zhang Weiad1e9382008-04-18 13:33:41 -0700201 void __iomem *regs_win;
202 struct rio_atmu_regs __iomem *atmu_regs;
203 struct rio_atmu_regs __iomem *maint_atmu_regs;
204 struct rio_atmu_regs __iomem *dbell_atmu_regs;
205 void __iomem *dbell_win;
206 void __iomem *maint_win;
207 struct rio_msg_regs __iomem *msg_regs;
208 struct rio_dbell_ring dbell_ring;
209 struct rio_msg_tx_ring msg_tx_ring;
210 struct rio_msg_rx_ring msg_rx_ring;
Alexandre Bounine5b2074a2010-05-26 14:44:00 -0700211 struct rio_port_write_msg port_write_msg;
Zhang Weiad1e9382008-04-18 13:33:41 -0700212 int bellirq;
213 int txirq;
214 int rxirq;
Alexandre Bounine5b2074a2010-05-26 14:44:00 -0700215 int pwirq;
216 struct work_struct pw_work;
217 struct kfifo pw_fifo;
218 spinlock_t pw_fifo_lock;
Zhang Weiad1e9382008-04-18 13:33:41 -0700219};
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800220
Alexandre Bouninea52c8f52010-05-26 14:44:00 -0700221#define __fsl_read_rio_config(x, addr, err, op) \
222 __asm__ __volatile__( \
223 "1: "op" %1,0(%2)\n" \
224 " eieio\n" \
225 "2:\n" \
226 ".section .fixup,\"ax\"\n" \
227 "3: li %1,-1\n" \
228 " li %0,%3\n" \
229 " b 2b\n" \
230 ".section __ex_table,\"a\"\n" \
231 " .align 2\n" \
232 " .long 1b,3b\n" \
233 ".text" \
234 : "=r" (err), "=r" (x) \
235 : "b" (addr), "i" (-EFAULT), "0" (err))
236
237static void __iomem *rio_regs_win;
238
239static int (*saved_mcheck_exception)(struct pt_regs *regs);
240
241static int fsl_rio_mcheck_exception(struct pt_regs *regs)
242{
243 const struct exception_table_entry *entry = NULL;
244 unsigned long reason = (mfspr(SPRN_MCSR) & MCSR_MASK);
245
246 if (reason & MCSR_BUS_RBERR) {
247 reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
248 if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
249 /* Check if we are prepared to handle this fault */
250 entry = search_exception_tables(regs->nip);
251 if (entry) {
252 pr_debug("RIO: %s - MC Exception handled\n",
253 __func__);
254 out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
255 0);
256 regs->msr |= MSR_RI;
257 regs->nip = entry->fixup;
258 return 1;
259 }
260 }
261 }
262
263 if (saved_mcheck_exception)
264 return saved_mcheck_exception(regs);
265 else
266 return cur_cpu_spec->machine_check(regs);
267}
268
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800269/**
Zhang Weid02443a2008-04-18 13:33:38 -0700270 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
Randy Dunlap9941d942008-04-30 16:45:58 -0700271 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800272 * @index: ID of RapidIO interface
273 * @destid: Destination ID of target device
274 * @data: 16-bit info field of RapidIO doorbell message
275 *
276 * Sends a MPC85xx doorbell message. Returns %0 on success or
277 * %-EINVAL on failure.
278 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700279static int fsl_rio_doorbell_send(struct rio_mport *mport,
280 int index, u16 destid, u16 data)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800281{
Zhang Weiad1e9382008-04-18 13:33:41 -0700282 struct rio_priv *priv = mport->priv;
Zhang Weid02443a2008-04-18 13:33:38 -0700283 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800284 index, destid, data);
Zhang Wei61b26912008-04-18 13:33:44 -0700285 switch (mport->phy_type) {
286 case RIO_PHY_PARALLEL:
287 out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22);
288 out_be16(priv->dbell_win, data);
289 break;
290 case RIO_PHY_SERIAL:
291 /* In the serial version silicons, such as MPC8548, MPC8641,
292 * below operations is must be.
293 */
294 out_be32(&priv->msg_regs->odmr, 0x00000000);
295 out_be32(&priv->msg_regs->odretcr, 0x00000004);
296 out_be32(&priv->msg_regs->oddpr, destid << 16);
297 out_be32(&priv->msg_regs->oddatr, data);
298 out_be32(&priv->msg_regs->odmr, 0x00000001);
299 break;
300 }
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800301
302 return 0;
303}
304
305/**
Zhang Weid02443a2008-04-18 13:33:38 -0700306 * fsl_local_config_read - Generate a MPC85xx local config space read
Randy Dunlap9941d942008-04-30 16:45:58 -0700307 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800308 * @index: ID of RapdiIO interface
309 * @offset: Offset into configuration space
310 * @len: Length (in bytes) of the maintenance transaction
311 * @data: Value to be read into
312 *
313 * Generates a MPC85xx local configuration space read. Returns %0 on
314 * success or %-EINVAL on failure.
315 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700316static int fsl_local_config_read(struct rio_mport *mport,
317 int index, u32 offset, int len, u32 *data)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800318{
Zhang Weiad1e9382008-04-18 13:33:41 -0700319 struct rio_priv *priv = mport->priv;
Zhang Weid02443a2008-04-18 13:33:38 -0700320 pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index,
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800321 offset);
Zhang Weiad1e9382008-04-18 13:33:41 -0700322 *data = in_be32(priv->regs_win + offset);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800323
324 return 0;
325}
326
327/**
Zhang Weid02443a2008-04-18 13:33:38 -0700328 * fsl_local_config_write - Generate a MPC85xx local config space write
Randy Dunlap9941d942008-04-30 16:45:58 -0700329 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800330 * @index: ID of RapdiIO interface
331 * @offset: Offset into configuration space
332 * @len: Length (in bytes) of the maintenance transaction
333 * @data: Value to be written
334 *
335 * Generates a MPC85xx local configuration space write. Returns %0 on
336 * success or %-EINVAL on failure.
337 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700338static int fsl_local_config_write(struct rio_mport *mport,
339 int index, u32 offset, int len, u32 data)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800340{
Zhang Weiad1e9382008-04-18 13:33:41 -0700341 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800342 pr_debug
Zhang Weid02443a2008-04-18 13:33:38 -0700343 ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n",
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800344 index, offset, data);
Zhang Weiad1e9382008-04-18 13:33:41 -0700345 out_be32(priv->regs_win + offset, data);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800346
347 return 0;
348}
349
350/**
Zhang Weid02443a2008-04-18 13:33:38 -0700351 * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction
Randy Dunlap9941d942008-04-30 16:45:58 -0700352 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800353 * @index: ID of RapdiIO interface
354 * @destid: Destination ID of transaction
355 * @hopcount: Number of hops to target device
356 * @offset: Offset into configuration space
357 * @len: Length (in bytes) of the maintenance transaction
358 * @val: Location to be read into
359 *
360 * Generates a MPC85xx read maintenance transaction. Returns %0 on
361 * success or %-EINVAL on failure.
362 */
363static int
Zhang Weiad1e9382008-04-18 13:33:41 -0700364fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
365 u8 hopcount, u32 offset, int len, u32 *val)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800366{
Zhang Weiad1e9382008-04-18 13:33:41 -0700367 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800368 u8 *data;
Alexandre Bouninea52c8f52010-05-26 14:44:00 -0700369 u32 rval, err = 0;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800370
371 pr_debug
Zhang Weid02443a2008-04-18 13:33:38 -0700372 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800373 index, destid, hopcount, offset, len);
Zhang Weiad1e9382008-04-18 13:33:41 -0700374 out_be32(&priv->maint_atmu_regs->rowtar,
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800375 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
376
Zhang Weiad1e9382008-04-18 13:33:41 -0700377 data = (u8 *) priv->maint_win + offset;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800378 switch (len) {
379 case 1:
Alexandre Bouninea52c8f52010-05-26 14:44:00 -0700380 __fsl_read_rio_config(rval, data, err, "lbz");
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800381 break;
382 case 2:
Alexandre Bouninea52c8f52010-05-26 14:44:00 -0700383 __fsl_read_rio_config(rval, data, err, "lhz");
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800384 break;
385 default:
Alexandre Bouninea52c8f52010-05-26 14:44:00 -0700386 __fsl_read_rio_config(rval, data, err, "lwz");
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800387 break;
388 }
389
Alexandre Bouninea52c8f52010-05-26 14:44:00 -0700390 if (err) {
391 pr_debug("RIO: cfg_read error %d for %x:%x:%x\n",
392 err, destid, hopcount, offset);
393 }
394
395 *val = rval;
396
397 return err;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800398}
399
400/**
Zhang Weid02443a2008-04-18 13:33:38 -0700401 * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction
Randy Dunlap9941d942008-04-30 16:45:58 -0700402 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800403 * @index: ID of RapdiIO interface
404 * @destid: Destination ID of transaction
405 * @hopcount: Number of hops to target device
406 * @offset: Offset into configuration space
407 * @len: Length (in bytes) of the maintenance transaction
408 * @val: Value to be written
409 *
410 * Generates an MPC85xx write maintenance transaction. Returns %0 on
411 * success or %-EINVAL on failure.
412 */
413static int
Zhang Weiad1e9382008-04-18 13:33:41 -0700414fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
415 u8 hopcount, u32 offset, int len, u32 val)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800416{
Zhang Weiad1e9382008-04-18 13:33:41 -0700417 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800418 u8 *data;
419 pr_debug
Zhang Weid02443a2008-04-18 13:33:38 -0700420 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800421 index, destid, hopcount, offset, len, val);
Zhang Weiad1e9382008-04-18 13:33:41 -0700422 out_be32(&priv->maint_atmu_regs->rowtar,
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800423 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
424
Zhang Weiad1e9382008-04-18 13:33:41 -0700425 data = (u8 *) priv->maint_win + offset;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800426 switch (len) {
427 case 1:
428 out_8((u8 *) data, val);
429 break;
430 case 2:
431 out_be16((u16 *) data, val);
432 break;
433 default:
434 out_be32((u32 *) data, val);
435 break;
436 }
437
438 return 0;
439}
440
441/**
442 * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue
443 * @mport: Master port with outbound message queue
444 * @rdev: Target of outbound message
445 * @mbox: Outbound mailbox
446 * @buffer: Message to add to outbound queue
447 * @len: Length of message
448 *
449 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
450 * %0 on success or %-EINVAL on failure.
451 */
452int
453rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
454 void *buffer, size_t len)
455{
Zhang Weiad1e9382008-04-18 13:33:41 -0700456 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800457 u32 omr;
Zhang Weiad1e9382008-04-18 13:33:41 -0700458 struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt
459 + priv->msg_tx_ring.tx_slot;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800460 int ret = 0;
461
462 pr_debug
463 ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n",
464 rdev->destid, mbox, (int)buffer, len);
465
466 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
467 ret = -EINVAL;
468 goto out;
469 }
470
471 /* Copy and clear rest of buffer */
Zhang Weiad1e9382008-04-18 13:33:41 -0700472 memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer,
473 len);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800474 if (len < (RIO_MAX_MSG_SIZE - 4))
Zhang Weiad1e9382008-04-18 13:33:41 -0700475 memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot]
476 + len, 0, RIO_MAX_MSG_SIZE - len);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800477
Zhang Wei61b26912008-04-18 13:33:44 -0700478 switch (mport->phy_type) {
479 case RIO_PHY_PARALLEL:
480 /* Set mbox field for message */
481 desc->dport = mbox & 0x3;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800482
Zhang Wei61b26912008-04-18 13:33:44 -0700483 /* Enable EOMI interrupt, set priority, and set destid */
484 desc->dattr = 0x28000000 | (rdev->destid << 2);
485 break;
486 case RIO_PHY_SERIAL:
487 /* Set mbox field for message, and set destid */
488 desc->dport = (rdev->destid << 16) | (mbox & 0x3);
489
490 /* Enable EOMI interrupt and priority */
491 desc->dattr = 0x28000000;
492 break;
493 }
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800494
495 /* Set transfer size aligned to next power of 2 (in double words) */
496 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
497
498 /* Set snooping and source buffer address */
Zhang Weiad1e9382008-04-18 13:33:41 -0700499 desc->saddr = 0x00000004
500 | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot];
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800501
502 /* Increment enqueue pointer */
Zhang Weiad1e9382008-04-18 13:33:41 -0700503 omr = in_be32(&priv->msg_regs->omr);
504 out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800505
506 /* Go to next descriptor */
Zhang Weiad1e9382008-04-18 13:33:41 -0700507 if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size)
508 priv->msg_tx_ring.tx_slot = 0;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800509
510 out:
511 return ret;
512}
513
514EXPORT_SYMBOL_GPL(rio_hw_add_outb_message);
515
516/**
Zhang Weid02443a2008-04-18 13:33:38 -0700517 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800518 * @irq: Linux interrupt number
519 * @dev_instance: Pointer to interrupt-specific data
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800520 *
521 * Handles outbound message interrupts. Executes a register outbound
Simon Arlotta8de5ce2007-05-12 05:42:54 +1000522 * mailbox event handler and acks the interrupt occurrence.
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800523 */
524static irqreturn_t
Zhang Weid02443a2008-04-18 13:33:38 -0700525fsl_rio_tx_handler(int irq, void *dev_instance)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800526{
527 int osr;
528 struct rio_mport *port = (struct rio_mport *)dev_instance;
Zhang Weiad1e9382008-04-18 13:33:41 -0700529 struct rio_priv *priv = port->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800530
Zhang Weiad1e9382008-04-18 13:33:41 -0700531 osr = in_be32(&priv->msg_regs->osr);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800532
533 if (osr & RIO_MSG_OSR_TE) {
534 pr_info("RIO: outbound message transmission error\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700535 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800536 goto out;
537 }
538
539 if (osr & RIO_MSG_OSR_QOI) {
540 pr_info("RIO: outbound message queue overflow\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700541 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800542 goto out;
543 }
544
545 if (osr & RIO_MSG_OSR_EOMI) {
Zhang Weiad1e9382008-04-18 13:33:41 -0700546 u32 dqp = in_be32(&priv->msg_regs->odqdpar);
547 int slot = (dqp - priv->msg_tx_ring.phys) >> 5;
548 port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1,
549 slot);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800550
551 /* Ack the end-of-message interrupt */
Zhang Weiad1e9382008-04-18 13:33:41 -0700552 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800553 }
554
555 out:
556 return IRQ_HANDLED;
557}
558
559/**
560 * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox
561 * @mport: Master port implementing the outbound message unit
Matt Porter6978bbc2005-11-07 01:00:20 -0800562 * @dev_id: Device specific pointer to pass on event
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800563 * @mbox: Mailbox to open
564 * @entries: Number of entries in the outbound mailbox ring
565 *
566 * Initializes buffer ring, request the outbound message interrupt,
567 * and enables the outbound message unit. Returns %0 on success and
568 * %-EINVAL or %-ENOMEM on failure.
569 */
Matt Porter6978bbc2005-11-07 01:00:20 -0800570int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800571{
572 int i, j, rc = 0;
Zhang Weiad1e9382008-04-18 13:33:41 -0700573 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800574
575 if ((entries < RIO_MIN_TX_RING_SIZE) ||
576 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
577 rc = -EINVAL;
578 goto out;
579 }
580
581 /* Initialize shadow copy ring */
Zhang Weiad1e9382008-04-18 13:33:41 -0700582 priv->msg_tx_ring.dev_id = dev_id;
583 priv->msg_tx_ring.size = entries;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800584
Zhang Weiad1e9382008-04-18 13:33:41 -0700585 for (i = 0; i < priv->msg_tx_ring.size; i++) {
586 priv->msg_tx_ring.virt_buffer[i] =
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400587 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700588 &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
589 if (!priv->msg_tx_ring.virt_buffer[i]) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800590 rc = -ENOMEM;
Zhang Weiad1e9382008-04-18 13:33:41 -0700591 for (j = 0; j < priv->msg_tx_ring.size; j++)
592 if (priv->msg_tx_ring.virt_buffer[j])
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400593 dma_free_coherent(priv->dev,
Zhang Weiad1e9382008-04-18 13:33:41 -0700594 RIO_MSG_BUFFER_SIZE,
595 priv->msg_tx_ring.
596 virt_buffer[j],
597 priv->msg_tx_ring.
598 phys_buffer[j]);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800599 goto out;
600 }
601 }
602
603 /* Initialize outbound message descriptor ring */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400604 priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
Zhang Weiad1e9382008-04-18 13:33:41 -0700605 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
606 &priv->msg_tx_ring.phys, GFP_KERNEL);
607 if (!priv->msg_tx_ring.virt) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800608 rc = -ENOMEM;
609 goto out_dma;
610 }
Zhang Weiad1e9382008-04-18 13:33:41 -0700611 memset(priv->msg_tx_ring.virt, 0,
612 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
613 priv->msg_tx_ring.tx_slot = 0;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800614
615 /* Point dequeue/enqueue pointers at first entry in ring */
Zhang Weiad1e9382008-04-18 13:33:41 -0700616 out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys);
617 out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800618
619 /* Configure for snooping */
Zhang Weiad1e9382008-04-18 13:33:41 -0700620 out_be32(&priv->msg_regs->osar, 0x00000004);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800621
622 /* Clear interrupt status */
Zhang Weiad1e9382008-04-18 13:33:41 -0700623 out_be32(&priv->msg_regs->osr, 0x000000b3);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800624
625 /* Hook up outbound message handler */
Zhang Weiad1e9382008-04-18 13:33:41 -0700626 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
627 "msg_tx", (void *)mport);
628 if (rc < 0)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800629 goto out_irq;
630
631 /*
632 * Configure outbound message unit
633 * Snooping
634 * Interrupts (all enabled, except QEIE)
635 * Chaining mode
636 * Disable
637 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700638 out_be32(&priv->msg_regs->omr, 0x00100220);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800639
640 /* Set number of entries */
Zhang Weiad1e9382008-04-18 13:33:41 -0700641 out_be32(&priv->msg_regs->omr,
642 in_be32(&priv->msg_regs->omr) |
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800643 ((get_bitmask_order(entries) - 2) << 12));
644
645 /* Now enable the unit */
Zhang Weiad1e9382008-04-18 13:33:41 -0700646 out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800647
648 out:
649 return rc;
650
651 out_irq:
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400652 dma_free_coherent(priv->dev,
653 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700654 priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800655
656 out_dma:
Zhang Weiad1e9382008-04-18 13:33:41 -0700657 for (i = 0; i < priv->msg_tx_ring.size; i++)
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400658 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700659 priv->msg_tx_ring.virt_buffer[i],
660 priv->msg_tx_ring.phys_buffer[i]);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800661
662 return rc;
663}
664
665/**
666 * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox
667 * @mport: Master port implementing the outbound message unit
668 * @mbox: Mailbox to close
669 *
670 * Disables the outbound message unit, free all buffers, and
671 * frees the outbound message interrupt.
672 */
673void rio_close_outb_mbox(struct rio_mport *mport, int mbox)
674{
Zhang Weiad1e9382008-04-18 13:33:41 -0700675 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800676 /* Disable inbound message unit */
Zhang Weiad1e9382008-04-18 13:33:41 -0700677 out_be32(&priv->msg_regs->omr, 0);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800678
679 /* Free ring */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400680 dma_free_coherent(priv->dev,
681 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700682 priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800683
684 /* Free interrupt */
Zhang Weiad1e9382008-04-18 13:33:41 -0700685 free_irq(IRQ_RIO_TX(mport), (void *)mport);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800686}
687
688/**
Zhang Weid02443a2008-04-18 13:33:38 -0700689 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800690 * @irq: Linux interrupt number
691 * @dev_instance: Pointer to interrupt-specific data
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800692 *
693 * Handles inbound message interrupts. Executes a registered inbound
Simon Arlotta8de5ce2007-05-12 05:42:54 +1000694 * mailbox event handler and acks the interrupt occurrence.
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800695 */
696static irqreturn_t
Zhang Weid02443a2008-04-18 13:33:38 -0700697fsl_rio_rx_handler(int irq, void *dev_instance)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800698{
699 int isr;
700 struct rio_mport *port = (struct rio_mport *)dev_instance;
Zhang Weiad1e9382008-04-18 13:33:41 -0700701 struct rio_priv *priv = port->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800702
Zhang Weiad1e9382008-04-18 13:33:41 -0700703 isr = in_be32(&priv->msg_regs->isr);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800704
705 if (isr & RIO_MSG_ISR_TE) {
706 pr_info("RIO: inbound message reception error\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700707 out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800708 goto out;
709 }
710
711 /* XXX Need to check/dispatch until queue empty */
712 if (isr & RIO_MSG_ISR_DIQI) {
713 /*
714 * We implement *only* mailbox 0, but can receive messages
715 * for any mailbox/letter to that mailbox destination. So,
716 * make the callback with an unknown/invalid mailbox number
717 * argument.
718 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700719 port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800720
721 /* Ack the queueing interrupt */
Zhang Weiad1e9382008-04-18 13:33:41 -0700722 out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800723 }
724
725 out:
726 return IRQ_HANDLED;
727}
728
729/**
730 * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox
731 * @mport: Master port implementing the inbound message unit
Matt Porter6978bbc2005-11-07 01:00:20 -0800732 * @dev_id: Device specific pointer to pass on event
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800733 * @mbox: Mailbox to open
734 * @entries: Number of entries in the inbound mailbox ring
735 *
736 * Initializes buffer ring, request the inbound message interrupt,
737 * and enables the inbound message unit. Returns %0 on success
738 * and %-EINVAL or %-ENOMEM on failure.
739 */
Matt Porter6978bbc2005-11-07 01:00:20 -0800740int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800741{
742 int i, rc = 0;
Zhang Weiad1e9382008-04-18 13:33:41 -0700743 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800744
745 if ((entries < RIO_MIN_RX_RING_SIZE) ||
746 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
747 rc = -EINVAL;
748 goto out;
749 }
750
751 /* Initialize client buffer ring */
Zhang Weiad1e9382008-04-18 13:33:41 -0700752 priv->msg_rx_ring.dev_id = dev_id;
753 priv->msg_rx_ring.size = entries;
754 priv->msg_rx_ring.rx_slot = 0;
755 for (i = 0; i < priv->msg_rx_ring.size; i++)
756 priv->msg_rx_ring.virt_buffer[i] = NULL;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800757
758 /* Initialize inbound message ring */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400759 priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
Zhang Weiad1e9382008-04-18 13:33:41 -0700760 priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
761 &priv->msg_rx_ring.phys, GFP_KERNEL);
762 if (!priv->msg_rx_ring.virt) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800763 rc = -ENOMEM;
764 goto out;
765 }
766
767 /* Point dequeue/enqueue pointers at first entry in ring */
Zhang Weiad1e9382008-04-18 13:33:41 -0700768 out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys);
769 out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800770
771 /* Clear interrupt status */
Zhang Weiad1e9382008-04-18 13:33:41 -0700772 out_be32(&priv->msg_regs->isr, 0x00000091);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800773
774 /* Hook up inbound message handler */
Zhang Weiad1e9382008-04-18 13:33:41 -0700775 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
776 "msg_rx", (void *)mport);
777 if (rc < 0) {
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400778 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700779 priv->msg_tx_ring.virt_buffer[i],
780 priv->msg_tx_ring.phys_buffer[i]);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800781 goto out;
782 }
783
784 /*
785 * Configure inbound message unit:
786 * Snooping
787 * 4KB max message size
788 * Unmask all interrupt sources
789 * Disable
790 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700791 out_be32(&priv->msg_regs->imr, 0x001b0060);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800792
793 /* Set number of queue entries */
Zhang Weiad1e9382008-04-18 13:33:41 -0700794 setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800795
796 /* Now enable the unit */
Zhang Weiad1e9382008-04-18 13:33:41 -0700797 setbits32(&priv->msg_regs->imr, 0x1);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800798
799 out:
800 return rc;
801}
802
803/**
804 * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox
805 * @mport: Master port implementing the inbound message unit
806 * @mbox: Mailbox to close
807 *
808 * Disables the inbound message unit, free all buffers, and
809 * frees the inbound message interrupt.
810 */
811void rio_close_inb_mbox(struct rio_mport *mport, int mbox)
812{
Zhang Weiad1e9382008-04-18 13:33:41 -0700813 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800814 /* Disable inbound message unit */
Zhang Weiad1e9382008-04-18 13:33:41 -0700815 out_be32(&priv->msg_regs->imr, 0);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800816
817 /* Free ring */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400818 dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700819 priv->msg_rx_ring.virt, priv->msg_rx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800820
821 /* Free interrupt */
Zhang Weiad1e9382008-04-18 13:33:41 -0700822 free_irq(IRQ_RIO_RX(mport), (void *)mport);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800823}
824
825/**
826 * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
827 * @mport: Master port implementing the inbound message unit
828 * @mbox: Inbound mailbox number
829 * @buf: Buffer to add to inbound queue
830 *
831 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
832 * %0 on success or %-EINVAL on failure.
833 */
834int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
835{
836 int rc = 0;
Zhang Weiad1e9382008-04-18 13:33:41 -0700837 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800838
839 pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
Zhang Weiad1e9382008-04-18 13:33:41 -0700840 priv->msg_rx_ring.rx_slot);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800841
Zhang Weiad1e9382008-04-18 13:33:41 -0700842 if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800843 printk(KERN_ERR
844 "RIO: error adding inbound buffer %d, buffer exists\n",
Zhang Weiad1e9382008-04-18 13:33:41 -0700845 priv->msg_rx_ring.rx_slot);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800846 rc = -EINVAL;
847 goto out;
848 }
849
Zhang Weiad1e9382008-04-18 13:33:41 -0700850 priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf;
851 if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size)
852 priv->msg_rx_ring.rx_slot = 0;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800853
854 out:
855 return rc;
856}
857
858EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer);
859
860/**
861 * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit
862 * @mport: Master port implementing the inbound message unit
863 * @mbox: Inbound mailbox number
864 *
865 * Gets the next available inbound message from the inbound message queue.
866 * A pointer to the message is returned on success or NULL on failure.
867 */
868void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
869{
Zhang Weiad1e9382008-04-18 13:33:41 -0700870 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800871 u32 phys_buf, virt_buf;
872 void *buf = NULL;
873 int buf_idx;
874
Zhang Weiad1e9382008-04-18 13:33:41 -0700875 phys_buf = in_be32(&priv->msg_regs->ifqdpar);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800876
877 /* If no more messages, then bail out */
Zhang Weiad1e9382008-04-18 13:33:41 -0700878 if (phys_buf == in_be32(&priv->msg_regs->ifqepar))
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800879 goto out2;
880
Zhang Weiad1e9382008-04-18 13:33:41 -0700881 virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf
882 - priv->msg_rx_ring.phys);
883 buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
884 buf = priv->msg_rx_ring.virt_buffer[buf_idx];
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800885
886 if (!buf) {
887 printk(KERN_ERR
888 "RIO: inbound message copy failed, no buffers\n");
889 goto out1;
890 }
891
892 /* Copy max message size, caller is expected to allocate that big */
893 memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
894
895 /* Clear the available buffer */
Zhang Weiad1e9382008-04-18 13:33:41 -0700896 priv->msg_rx_ring.virt_buffer[buf_idx] = NULL;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800897
898 out1:
Zhang Weiad1e9382008-04-18 13:33:41 -0700899 setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800900
901 out2:
902 return buf;
903}
904
905EXPORT_SYMBOL_GPL(rio_hw_get_inb_message);
906
907/**
Zhang Weid02443a2008-04-18 13:33:38 -0700908 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800909 * @irq: Linux interrupt number
910 * @dev_instance: Pointer to interrupt-specific data
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800911 *
912 * Handles doorbell interrupts. Parses a list of registered
913 * doorbell event handlers and executes a matching event handler.
914 */
915static irqreturn_t
Zhang Weid02443a2008-04-18 13:33:38 -0700916fsl_rio_dbell_handler(int irq, void *dev_instance)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800917{
918 int dsr;
919 struct rio_mport *port = (struct rio_mport *)dev_instance;
Zhang Weiad1e9382008-04-18 13:33:41 -0700920 struct rio_priv *priv = port->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800921
Zhang Weiad1e9382008-04-18 13:33:41 -0700922 dsr = in_be32(&priv->msg_regs->dsr);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800923
924 if (dsr & DOORBELL_DSR_TE) {
925 pr_info("RIO: doorbell reception error\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700926 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800927 goto out;
928 }
929
930 if (dsr & DOORBELL_DSR_QFI) {
931 pr_info("RIO: doorbell queue full\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700932 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800933 goto out;
934 }
935
936 /* XXX Need to check/dispatch until queue empty */
937 if (dsr & DOORBELL_DSR_DIQI) {
938 u32 dmsg =
Zhang Weiad1e9382008-04-18 13:33:41 -0700939 (u32) priv->dbell_ring.virt +
940 (in_be32(&priv->msg_regs->dqdpar) & 0xfff);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800941 struct rio_dbell *dbell;
942 int found = 0;
943
944 pr_debug
945 ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n",
946 DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
947
948 list_for_each_entry(dbell, &port->dbells, node) {
949 if ((dbell->res->start <= DBELL_INF(dmsg)) &&
950 (dbell->res->end >= DBELL_INF(dmsg))) {
951 found = 1;
952 break;
953 }
954 }
955 if (found) {
Matt Porter6978bbc2005-11-07 01:00:20 -0800956 dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg),
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800957 DBELL_INF(dmsg));
958 } else {
959 pr_debug
960 ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n",
961 DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
962 }
Zhang Weiad1e9382008-04-18 13:33:41 -0700963 setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI);
964 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800965 }
966
967 out:
968 return IRQ_HANDLED;
969}
970
971/**
Zhang Weid02443a2008-04-18 13:33:38 -0700972 * fsl_rio_doorbell_init - MPC85xx doorbell interface init
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800973 * @mport: Master port implementing the inbound doorbell unit
974 *
975 * Initializes doorbell unit hardware and inbound DMA buffer
Zhang Weid02443a2008-04-18 13:33:38 -0700976 * ring. Called from fsl_rio_setup(). Returns %0 on success
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800977 * or %-ENOMEM on failure.
978 */
Zhang Weid02443a2008-04-18 13:33:38 -0700979static int fsl_rio_doorbell_init(struct rio_mport *mport)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800980{
Zhang Weiad1e9382008-04-18 13:33:41 -0700981 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800982 int rc = 0;
983
984 /* Map outbound doorbell window immediately after maintenance window */
Zhang Weiad1e9382008-04-18 13:33:41 -0700985 priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
986 RIO_DBELL_WIN_SIZE);
987 if (!priv->dbell_win) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800988 printk(KERN_ERR
989 "RIO: unable to map outbound doorbell window\n");
990 rc = -ENOMEM;
991 goto out;
992 }
993
994 /* Initialize inbound doorbells */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400995 priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 *
Zhang Weiad1e9382008-04-18 13:33:41 -0700996 DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL);
997 if (!priv->dbell_ring.virt) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800998 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
999 rc = -ENOMEM;
Zhang Weiad1e9382008-04-18 13:33:41 -07001000 iounmap(priv->dbell_win);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001001 goto out;
1002 }
1003
1004 /* Point dequeue/enqueue pointers at first entry in ring */
Zhang Weiad1e9382008-04-18 13:33:41 -07001005 out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys);
1006 out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001007
1008 /* Clear interrupt status */
Zhang Weiad1e9382008-04-18 13:33:41 -07001009 out_be32(&priv->msg_regs->dsr, 0x00000091);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001010
1011 /* Hook up doorbell handler */
Zhang Weiad1e9382008-04-18 13:33:41 -07001012 rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0,
1013 "dbell_rx", (void *)mport);
1014 if (rc < 0) {
1015 iounmap(priv->dbell_win);
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +04001016 dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -07001017 priv->dbell_ring.virt, priv->dbell_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001018 printk(KERN_ERR
1019 "MPC85xx RIO: unable to request inbound doorbell irq");
1020 goto out;
1021 }
1022
1023 /* Configure doorbells for snooping, 512 entries, and enable */
Zhang Weiad1e9382008-04-18 13:33:41 -07001024 out_be32(&priv->msg_regs->dmr, 0x00108161);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001025
1026 out:
1027 return rc;
1028}
1029
Alexandre Bounine5b2074a2010-05-26 14:44:00 -07001030/**
1031 * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
1032 * @irq: Linux interrupt number
1033 * @dev_instance: Pointer to interrupt-specific data
1034 *
1035 * Handles port write interrupts. Parses a list of registered
1036 * port write event handlers and executes a matching event handler.
1037 */
1038static irqreturn_t
1039fsl_rio_port_write_handler(int irq, void *dev_instance)
1040{
1041 u32 ipwmr, ipwsr;
1042 struct rio_mport *port = (struct rio_mport *)dev_instance;
1043 struct rio_priv *priv = port->priv;
1044 u32 epwisr, tmp;
1045
1046 ipwmr = in_be32(&priv->msg_regs->pwmr);
1047 ipwsr = in_be32(&priv->msg_regs->pwsr);
1048
1049 epwisr = in_be32(priv->regs_win + RIO_EPWISR);
1050 if (epwisr & 0x80000000) {
1051 tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
1052 pr_info("RIO_LTLEDCSR = 0x%x\n", tmp);
1053 out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
1054 }
1055
1056 if (!(epwisr & 0x00000001))
1057 return IRQ_HANDLED;
1058
1059#ifdef DEBUG_PW
1060 pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
1061 if (ipwsr & RIO_IPWSR_QF)
1062 pr_debug(" QF");
1063 if (ipwsr & RIO_IPWSR_TE)
1064 pr_debug(" TE");
1065 if (ipwsr & RIO_IPWSR_QFI)
1066 pr_debug(" QFI");
1067 if (ipwsr & RIO_IPWSR_PWD)
1068 pr_debug(" PWD");
1069 if (ipwsr & RIO_IPWSR_PWB)
1070 pr_debug(" PWB");
1071 pr_debug(" )\n");
1072#endif
1073 out_be32(&priv->msg_regs->pwsr,
1074 ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1075
1076 if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
1077 priv->port_write_msg.err_count++;
1078 pr_info("RIO: Port-Write Transaction Err (%d)\n",
1079 priv->port_write_msg.err_count);
1080 }
1081 if (ipwsr & RIO_IPWSR_PWD) {
1082 priv->port_write_msg.discard_count++;
1083 pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
1084 priv->port_write_msg.discard_count);
1085 }
1086
1087 /* Schedule deferred processing if PW was received */
1088 if (ipwsr & RIO_IPWSR_QFI) {
1089 /* Save PW message (if there is room in FIFO),
1090 * otherwise discard it.
1091 */
1092 if (kfifo_avail(&priv->pw_fifo) >= RIO_PW_MSG_SIZE) {
1093 priv->port_write_msg.msg_count++;
1094 kfifo_in(&priv->pw_fifo, priv->port_write_msg.virt,
1095 RIO_PW_MSG_SIZE);
1096 } else {
1097 priv->port_write_msg.discard_count++;
1098 pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
1099 priv->port_write_msg.discard_count);
1100 }
1101 schedule_work(&priv->pw_work);
1102 }
1103
1104 /* Issue Clear Queue command. This allows another
1105 * port-write to be received.
1106 */
1107 out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
1108
1109 return IRQ_HANDLED;
1110}
1111
1112static void fsl_pw_dpc(struct work_struct *work)
1113{
1114 struct rio_priv *priv = container_of(work, struct rio_priv, pw_work);
1115 unsigned long flags;
1116 u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)];
1117
1118 /*
1119 * Process port-write messages
1120 */
1121 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1122 while (kfifo_out(&priv->pw_fifo, (unsigned char *)msg_buffer,
1123 RIO_PW_MSG_SIZE)) {
1124 /* Process one message */
1125 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1126#ifdef DEBUG_PW
1127 {
1128 u32 i;
1129 pr_debug("%s : Port-Write Message:", __func__);
1130 for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
1131 if ((i%4) == 0)
1132 pr_debug("\n0x%02x: 0x%08x", i*4,
1133 msg_buffer[i]);
1134 else
1135 pr_debug(" 0x%08x", msg_buffer[i]);
1136 }
1137 pr_debug("\n");
1138 }
1139#endif
1140 /* Pass the port-write message to RIO core for processing */
1141 rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
1142 spin_lock_irqsave(&priv->pw_fifo_lock, flags);
1143 }
1144 spin_unlock_irqrestore(&priv->pw_fifo_lock, flags);
1145}
1146
1147/**
1148 * fsl_rio_pw_enable - enable/disable port-write interface init
1149 * @mport: Master port implementing the port write unit
1150 * @enable: 1=enable; 0=disable port-write message handling
1151 */
1152static int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
1153{
1154 struct rio_priv *priv = mport->priv;
1155 u32 rval;
1156
1157 rval = in_be32(&priv->msg_regs->pwmr);
1158
1159 if (enable)
1160 rval |= RIO_IPWMR_PWE;
1161 else
1162 rval &= ~RIO_IPWMR_PWE;
1163
1164 out_be32(&priv->msg_regs->pwmr, rval);
1165
1166 return 0;
1167}
1168
1169/**
1170 * fsl_rio_port_write_init - MPC85xx port write interface init
1171 * @mport: Master port implementing the port write unit
1172 *
1173 * Initializes port write unit hardware and DMA buffer
1174 * ring. Called from fsl_rio_setup(). Returns %0 on success
1175 * or %-ENOMEM on failure.
1176 */
1177static int fsl_rio_port_write_init(struct rio_mport *mport)
1178{
1179 struct rio_priv *priv = mport->priv;
1180 int rc = 0;
1181
1182 /* Following configurations require a disabled port write controller */
1183 out_be32(&priv->msg_regs->pwmr,
1184 in_be32(&priv->msg_regs->pwmr) & ~RIO_IPWMR_PWE);
1185
1186 /* Initialize port write */
1187 priv->port_write_msg.virt = dma_alloc_coherent(priv->dev,
1188 RIO_PW_MSG_SIZE,
1189 &priv->port_write_msg.phys, GFP_KERNEL);
1190 if (!priv->port_write_msg.virt) {
1191 pr_err("RIO: unable allocate port write queue\n");
1192 return -ENOMEM;
1193 }
1194
1195 priv->port_write_msg.err_count = 0;
1196 priv->port_write_msg.discard_count = 0;
1197
1198 /* Point dequeue/enqueue pointers at first entry */
1199 out_be32(&priv->msg_regs->epwqbar, 0);
1200 out_be32(&priv->msg_regs->pwqbar, (u32) priv->port_write_msg.phys);
1201
1202 pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
1203 in_be32(&priv->msg_regs->epwqbar),
1204 in_be32(&priv->msg_regs->pwqbar));
1205
1206 /* Clear interrupt status IPWSR */
1207 out_be32(&priv->msg_regs->pwsr,
1208 (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
1209
1210 /* Configure port write contoller for snooping enable all reporting,
1211 clear queue full */
1212 out_be32(&priv->msg_regs->pwmr,
1213 RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
1214
1215
1216 /* Hook up port-write handler */
1217 rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0,
1218 "port-write", (void *)mport);
1219 if (rc < 0) {
1220 pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
1221 goto err_out;
1222 }
1223
1224 INIT_WORK(&priv->pw_work, fsl_pw_dpc);
1225 spin_lock_init(&priv->pw_fifo_lock);
1226 if (kfifo_alloc(&priv->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
1227 pr_err("FIFO allocation failed\n");
1228 rc = -ENOMEM;
1229 goto err_out_irq;
1230 }
1231
1232 pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
1233 in_be32(&priv->msg_regs->pwmr),
1234 in_be32(&priv->msg_regs->pwsr));
1235
1236 return rc;
1237
1238err_out_irq:
1239 free_irq(IRQ_RIO_PW(mport), (void *)mport);
1240err_out:
1241 dma_free_coherent(priv->dev, RIO_PW_MSG_SIZE,
1242 priv->port_write_msg.virt,
1243 priv->port_write_msg.phys);
1244 return rc;
1245}
1246
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001247static char *cmdline = NULL;
1248
Zhang Weid02443a2008-04-18 13:33:38 -07001249static int fsl_rio_get_hdid(int index)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001250{
1251 /* XXX Need to parse multiple entries in some format */
1252 if (!cmdline)
1253 return -1;
1254
1255 return simple_strtol(cmdline, NULL, 0);
1256}
1257
Zhang Weid02443a2008-04-18 13:33:38 -07001258static int fsl_rio_get_cmdline(char *s)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001259{
1260 if (!s)
1261 return 0;
1262
1263 cmdline = s;
1264 return 1;
1265}
1266
Zhang Weid02443a2008-04-18 13:33:38 -07001267__setup("riohdid=", fsl_rio_get_cmdline);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001268
Zhang Wei7f620df2008-04-18 13:33:44 -07001269static inline void fsl_rio_info(struct device *dev, u32 ccsr)
1270{
1271 const char *str;
1272 if (ccsr & 1) {
1273 /* Serial phy */
1274 switch (ccsr >> 30) {
1275 case 0:
1276 str = "1";
1277 break;
1278 case 1:
1279 str = "4";
1280 break;
1281 default:
1282 str = "Unknown";
Joe Perchesd258e642009-06-28 06:26:10 +00001283 break;
Zhang Wei7f620df2008-04-18 13:33:44 -07001284 }
1285 dev_info(dev, "Hardware port width: %s\n", str);
1286
1287 switch ((ccsr >> 27) & 7) {
1288 case 0:
1289 str = "Single-lane 0";
1290 break;
1291 case 1:
1292 str = "Single-lane 2";
1293 break;
1294 case 2:
1295 str = "Four-lane";
1296 break;
1297 default:
1298 str = "Unknown";
1299 break;
1300 }
1301 dev_info(dev, "Training connection status: %s\n", str);
1302 } else {
1303 /* Parallel phy */
1304 if (!(ccsr & 0x80000000))
1305 dev_info(dev, "Output port operating in 8-bit mode\n");
1306 if (!(ccsr & 0x08000000))
1307 dev_info(dev, "Input port operating in 8-bit mode\n");
1308 }
1309}
1310
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001311/**
Randy Dunlap9941d942008-04-30 16:45:58 -07001312 * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface
1313 * @dev: of_device pointer
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001314 *
1315 * Initializes MPC85xx RapidIO hardware interface, configures
1316 * master port with system-specific info, and registers the
1317 * master port with the RapidIO subsystem.
1318 */
Zhang Weicc2bb692008-04-18 13:33:41 -07001319int fsl_rio_setup(struct of_device *dev)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001320{
1321 struct rio_ops *ops;
1322 struct rio_mport *port;
Zhang Weicc2bb692008-04-18 13:33:41 -07001323 struct rio_priv *priv;
1324 int rc = 0;
1325 const u32 *dt_range, *cell;
1326 struct resource regs;
1327 int rlen;
Zhang Wei61b26912008-04-18 13:33:44 -07001328 u32 ccsr;
Zhang Weicc2bb692008-04-18 13:33:41 -07001329 u64 law_start, law_size;
1330 int paw, aw, sw;
1331
Grant Likely61c7a082010-04-13 16:12:29 -07001332 if (!dev->dev.of_node) {
Zhang Weicc2bb692008-04-18 13:33:41 -07001333 dev_err(&dev->dev, "Device OF-Node is NULL");
1334 return -EFAULT;
1335 }
1336
Grant Likely61c7a082010-04-13 16:12:29 -07001337 rc = of_address_to_resource(dev->dev.of_node, 0, &regs);
Zhang Weicc2bb692008-04-18 13:33:41 -07001338 if (rc) {
1339 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
Grant Likely61c7a082010-04-13 16:12:29 -07001340 dev->dev.of_node->full_name);
Zhang Weicc2bb692008-04-18 13:33:41 -07001341 return -EFAULT;
1342 }
Grant Likely61c7a082010-04-13 16:12:29 -07001343 dev_info(&dev->dev, "Of-device full name %s\n", dev->dev.of_node->full_name);
Kumar Galafc274a12009-05-13 17:02:24 -05001344 dev_info(&dev->dev, "Regs: %pR\n", &regs);
Zhang Weicc2bb692008-04-18 13:33:41 -07001345
Grant Likely61c7a082010-04-13 16:12:29 -07001346 dt_range = of_get_property(dev->dev.of_node, "ranges", &rlen);
Zhang Weicc2bb692008-04-18 13:33:41 -07001347 if (!dt_range) {
1348 dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
Grant Likely61c7a082010-04-13 16:12:29 -07001349 dev->dev.of_node->full_name);
Zhang Weicc2bb692008-04-18 13:33:41 -07001350 return -EFAULT;
1351 }
1352
1353 /* Get node address wide */
Grant Likely61c7a082010-04-13 16:12:29 -07001354 cell = of_get_property(dev->dev.of_node, "#address-cells", NULL);
Zhang Weicc2bb692008-04-18 13:33:41 -07001355 if (cell)
1356 aw = *cell;
1357 else
Grant Likely61c7a082010-04-13 16:12:29 -07001358 aw = of_n_addr_cells(dev->dev.of_node);
Zhang Weicc2bb692008-04-18 13:33:41 -07001359 /* Get node size wide */
Grant Likely61c7a082010-04-13 16:12:29 -07001360 cell = of_get_property(dev->dev.of_node, "#size-cells", NULL);
Zhang Weicc2bb692008-04-18 13:33:41 -07001361 if (cell)
1362 sw = *cell;
1363 else
Grant Likely61c7a082010-04-13 16:12:29 -07001364 sw = of_n_size_cells(dev->dev.of_node);
Zhang Weicc2bb692008-04-18 13:33:41 -07001365 /* Get parent address wide wide */
Grant Likely61c7a082010-04-13 16:12:29 -07001366 paw = of_n_addr_cells(dev->dev.of_node);
Zhang Weicc2bb692008-04-18 13:33:41 -07001367
1368 law_start = of_read_number(dt_range + aw, paw);
1369 law_size = of_read_number(dt_range + aw + paw, sw);
1370
1371 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
1372 law_start, law_size);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001373
Alexandre Bouninee5cabeb2010-05-26 14:43:59 -07001374 ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
Julia Lawall6c759332009-08-07 09:00:34 +02001375 if (!ops) {
1376 rc = -ENOMEM;
1377 goto err_ops;
1378 }
Zhang Weid02443a2008-04-18 13:33:38 -07001379 ops->lcread = fsl_local_config_read;
1380 ops->lcwrite = fsl_local_config_write;
1381 ops->cread = fsl_rio_config_read;
1382 ops->cwrite = fsl_rio_config_write;
1383 ops->dsend = fsl_rio_doorbell_send;
Alexandre Bounine5b2074a2010-05-26 14:44:00 -07001384 ops->pwenable = fsl_rio_pw_enable;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001385
Zhang Weiad1e9382008-04-18 13:33:41 -07001386 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
Julia Lawall6c759332009-08-07 09:00:34 +02001387 if (!port) {
1388 rc = -ENOMEM;
1389 goto err_port;
1390 }
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001391 port->id = 0;
1392 port->index = 0;
Zhang Weiad1e9382008-04-18 13:33:41 -07001393
1394 priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
1395 if (!priv) {
1396 printk(KERN_ERR "Can't alloc memory for 'priv'\n");
1397 rc = -ENOMEM;
Julia Lawall6c759332009-08-07 09:00:34 +02001398 goto err_priv;
Zhang Weiad1e9382008-04-18 13:33:41 -07001399 }
1400
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001401 INIT_LIST_HEAD(&port->dbells);
1402 port->iores.start = law_start;
Li Yang186e74b2009-05-12 16:35:59 +08001403 port->iores.end = law_start + law_size - 1;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001404 port->iores.flags = IORESOURCE_MEM;
Li Yang186e74b2009-05-12 16:35:59 +08001405 port->iores.name = "rio_io_win";
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001406
Alexandre Bounine5b2074a2010-05-26 14:44:00 -07001407 priv->pwirq = irq_of_parse_and_map(dev->node, 0);
Grant Likely61c7a082010-04-13 16:12:29 -07001408 priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2);
1409 priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3);
1410 priv->rxirq = irq_of_parse_and_map(dev->dev.of_node, 4);
Alexandre Bounine5b2074a2010-05-26 14:44:00 -07001411 dev_info(&dev->dev, "pwirq: %d, bellirq: %d, txirq: %d, rxirq %d\n",
1412 priv->pwirq, priv->bellirq, priv->txirq, priv->rxirq);
Zhang Weicc2bb692008-04-18 13:33:41 -07001413
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001414 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1415 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
1416 rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
1417 strcpy(port->name, "RIO0 mport");
1418
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +04001419 priv->dev = &dev->dev;
1420
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001421 port->ops = ops;
Zhang Weid02443a2008-04-18 13:33:38 -07001422 port->host_deviceid = fsl_rio_get_hdid(port->id);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001423
Zhang Weiad1e9382008-04-18 13:33:41 -07001424 port->priv = priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001425 rio_register_mport(port);
1426
Zhang Weicc2bb692008-04-18 13:33:41 -07001427 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
Alexandre Bouninea52c8f52010-05-26 14:44:00 -07001428 rio_regs_win = priv->regs_win;
Zhang Weie0423232008-04-18 13:33:42 -07001429
Zhang Wei61b26912008-04-18 13:33:44 -07001430 /* Probe the master port phy type */
1431 ccsr = in_be32(priv->regs_win + RIO_CCSR);
1432 port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
1433 dev_info(&dev->dev, "RapidIO PHY type: %s\n",
1434 (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" :
1435 ((port->phy_type == RIO_PHY_SERIAL) ? "serial" :
1436 "unknown"));
Zhang Wei7f620df2008-04-18 13:33:44 -07001437 /* Checking the port training status */
1438 if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
1439 dev_err(&dev->dev, "Port is not ready. "
1440 "Try to restart connection...\n");
1441 switch (port->phy_type) {
1442 case RIO_PHY_SERIAL:
1443 /* Disable ports */
1444 out_be32(priv->regs_win + RIO_CCSR, 0);
1445 /* Set 1x lane */
1446 setbits32(priv->regs_win + RIO_CCSR, 0x02000000);
1447 /* Enable ports */
1448 setbits32(priv->regs_win + RIO_CCSR, 0x00600000);
1449 break;
1450 case RIO_PHY_PARALLEL:
1451 /* Disable ports */
1452 out_be32(priv->regs_win + RIO_CCSR, 0x22000000);
1453 /* Enable ports */
1454 out_be32(priv->regs_win + RIO_CCSR, 0x44000000);
1455 break;
1456 }
1457 msleep(100);
1458 if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
1459 dev_err(&dev->dev, "Port restart failed.\n");
1460 rc = -ENOLINK;
1461 goto err;
1462 }
1463 dev_info(&dev->dev, "Port restart success!\n");
1464 }
1465 fsl_rio_info(&dev->dev, ccsr);
Zhang Wei61b26912008-04-18 13:33:44 -07001466
Zhang Weie0423232008-04-18 13:33:42 -07001467 port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR))
1468 & RIO_PEF_CTLS) >> 4;
1469 dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
1470 port->sys_size ? 65536 : 256);
1471
Zhang Weiad1e9382008-04-18 13:33:41 -07001472 priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
1473 + RIO_ATMU_REGS_OFFSET);
1474 priv->maint_atmu_regs = priv->atmu_regs + 1;
1475 priv->dbell_atmu_regs = priv->atmu_regs + 2;
Zhang Wei61b26912008-04-18 13:33:44 -07001476 priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win +
1477 ((port->phy_type == RIO_PHY_SERIAL) ?
1478 RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET));
1479
1480 /* Set to receive any dist ID for serial RapidIO controller. */
1481 if (port->phy_type == RIO_PHY_SERIAL)
1482 out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001483
1484 /* Configure maintenance transaction window */
Li Yang186e74b2009-05-12 16:35:59 +08001485 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
1486 out_be32(&priv->maint_atmu_regs->rowar, 0x80077015); /* 4M */
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001487
Zhang Weiad1e9382008-04-18 13:33:41 -07001488 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001489
1490 /* Configure outbound doorbell window */
Li Yang186e74b2009-05-12 16:35:59 +08001491 out_be32(&priv->dbell_atmu_regs->rowbar,
1492 (law_start + RIO_MAINT_WIN_SIZE) >> 12);
1493 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */
Zhang Weid02443a2008-04-18 13:33:38 -07001494 fsl_rio_doorbell_init(port);
Alexandre Bounine5b2074a2010-05-26 14:44:00 -07001495 fsl_rio_port_write_init(port);
Zhang Weiad1e9382008-04-18 13:33:41 -07001496
Alexandre Bouninea52c8f52010-05-26 14:44:00 -07001497 saved_mcheck_exception = ppc_md.machine_check_exception;
1498 ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
1499 /* Ensure that RFXE is set */
1500 mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
1501
Zhang Weicc2bb692008-04-18 13:33:41 -07001502 return 0;
Zhang Weiad1e9382008-04-18 13:33:41 -07001503err:
Julia Lawall6c759332009-08-07 09:00:34 +02001504 iounmap(priv->regs_win);
Zhang Weiad1e9382008-04-18 13:33:41 -07001505 kfree(priv);
Julia Lawall6c759332009-08-07 09:00:34 +02001506err_priv:
Zhang Weiad1e9382008-04-18 13:33:41 -07001507 kfree(port);
Julia Lawall6c759332009-08-07 09:00:34 +02001508err_port:
1509 kfree(ops);
1510err_ops:
Zhang Weicc2bb692008-04-18 13:33:41 -07001511 return rc;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001512}
Zhang Weicc2bb692008-04-18 13:33:41 -07001513
1514/* The probe function for RapidIO peer-to-peer network.
1515 */
1516static int __devinit fsl_of_rio_rpn_probe(struct of_device *dev,
1517 const struct of_device_id *match)
1518{
1519 int rc;
1520 printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
Grant Likely61c7a082010-04-13 16:12:29 -07001521 dev->dev.of_node->full_name);
Zhang Weicc2bb692008-04-18 13:33:41 -07001522
1523 rc = fsl_rio_setup(dev);
1524 if (rc)
1525 goto out;
1526
1527 /* Enumerate all registered ports */
1528 rc = rio_init_mports();
1529out:
1530 return rc;
1531};
1532
1533static const struct of_device_id fsl_of_rio_rpn_ids[] = {
1534 {
1535 .compatible = "fsl,rapidio-delta",
1536 },
1537 {},
1538};
1539
1540static struct of_platform_driver fsl_of_rio_rpn_driver = {
Grant Likely40182942010-04-13 16:13:02 -07001541 .driver = {
1542 .name = "fsl-of-rio",
1543 .owner = THIS_MODULE,
1544 .of_match_table = fsl_of_rio_rpn_ids,
1545 },
Zhang Weicc2bb692008-04-18 13:33:41 -07001546 .probe = fsl_of_rio_rpn_probe,
1547};
1548
1549static __init int fsl_of_rio_rpn_init(void)
1550{
1551 return of_register_platform_driver(&fsl_of_rio_rpn_driver);
1552}
1553
1554subsys_initcall(fsl_of_rio_rpn_init);