blob: 71fba88f50db86e5946593fe0f8407ffc078974b [file] [log] [blame]
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001/*
Zhang Weid02443a2008-04-18 13:33:38 -07002 * Freescale MPC85xx/MPC86xx RapidIO support
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08003 *
Zhang Weiad1e9382008-04-18 13:33:41 -07004 * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
5 * Zhang Wei <wei.zhang@freescale.com>
6 *
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08007 * Copyright 2005 MontaVista Software, Inc.
8 * Matt Porter <mporter@kernel.crashing.org>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080016#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/dma-mapping.h>
20#include <linux/interrupt.h>
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +040021#include <linux/device.h>
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080022#include <linux/rio.h>
23#include <linux/rio_drv.h>
Zhang Weicc2bb692008-04-18 13:33:41 -070024#include <linux/of_platform.h>
Zhang Wei61b26912008-04-18 13:33:44 -070025#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080027
28#include <asm/io.h>
29
Zhang Weiad1e9382008-04-18 13:33:41 -070030/* RapidIO definition irq, which read from OF-tree */
31#define IRQ_RIO_BELL(m) (((struct rio_priv *)(m->priv))->bellirq)
32#define IRQ_RIO_TX(m) (((struct rio_priv *)(m->priv))->txirq)
33#define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq)
34
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080035#define RIO_ATMU_REGS_OFFSET 0x10c00
Zhang Wei61b26912008-04-18 13:33:44 -070036#define RIO_P_MSG_REGS_OFFSET 0x11000
37#define RIO_S_MSG_REGS_OFFSET 0x13000
38#define RIO_ESCSR 0x158
39#define RIO_CCSR 0x15c
40#define RIO_ISR_AACR 0x10120
41#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080042#define RIO_MAINT_WIN_SIZE 0x400000
43#define RIO_DBELL_WIN_SIZE 0x1000
44
45#define RIO_MSG_OMR_MUI 0x00000002
46#define RIO_MSG_OSR_TE 0x00000080
47#define RIO_MSG_OSR_QOI 0x00000020
48#define RIO_MSG_OSR_QFI 0x00000010
49#define RIO_MSG_OSR_MUB 0x00000004
50#define RIO_MSG_OSR_EOMI 0x00000002
51#define RIO_MSG_OSR_QEI 0x00000001
52
53#define RIO_MSG_IMR_MI 0x00000002
54#define RIO_MSG_ISR_TE 0x00000080
55#define RIO_MSG_ISR_QFI 0x00000010
56#define RIO_MSG_ISR_DIQI 0x00000001
57
58#define RIO_MSG_DESC_SIZE 32
59#define RIO_MSG_BUFFER_SIZE 4096
60#define RIO_MIN_TX_RING_SIZE 2
61#define RIO_MAX_TX_RING_SIZE 2048
62#define RIO_MIN_RX_RING_SIZE 2
63#define RIO_MAX_RX_RING_SIZE 2048
64
65#define DOORBELL_DMR_DI 0x00000002
66#define DOORBELL_DSR_TE 0x00000080
67#define DOORBELL_DSR_QFI 0x00000010
68#define DOORBELL_DSR_DIQI 0x00000001
Zhang Wei6c391032008-04-18 13:33:48 -070069#define DOORBELL_TID_OFFSET 0x02
70#define DOORBELL_SID_OFFSET 0x04
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080071#define DOORBELL_INFO_OFFSET 0x06
72
73#define DOORBELL_MESSAGE_SIZE 0x08
Zhang Wei6c391032008-04-18 13:33:48 -070074#define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET))
75#define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET))
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080076#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET))
77
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080078struct rio_atmu_regs {
79 u32 rowtar;
Zhang Wei61b26912008-04-18 13:33:44 -070080 u32 rowtear;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -080081 u32 rowbar;
82 u32 pad2;
83 u32 rowar;
84 u32 pad3[3];
85};
86
87struct rio_msg_regs {
88 u32 omr;
89 u32 osr;
90 u32 pad1;
91 u32 odqdpar;
92 u32 pad2;
93 u32 osar;
94 u32 odpr;
95 u32 odatr;
96 u32 odcr;
97 u32 pad3;
98 u32 odqepar;
99 u32 pad4[13];
100 u32 imr;
101 u32 isr;
102 u32 pad5;
103 u32 ifqdpar;
104 u32 pad6;
105 u32 ifqepar;
Zhang Wei61b26912008-04-18 13:33:44 -0700106 u32 pad7[226];
107 u32 odmr;
108 u32 odsr;
109 u32 res0[4];
110 u32 oddpr;
111 u32 oddatr;
112 u32 res1[3];
113 u32 odretcr;
114 u32 res2[12];
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800115 u32 dmr;
116 u32 dsr;
117 u32 pad8;
118 u32 dqdpar;
119 u32 pad9;
120 u32 dqepar;
121 u32 pad10[26];
122 u32 pwmr;
123 u32 pwsr;
124 u32 pad11;
125 u32 pwqbar;
126};
127
128struct rio_tx_desc {
129 u32 res1;
130 u32 saddr;
131 u32 dport;
132 u32 dattr;
133 u32 res2;
134 u32 res3;
135 u32 dwcnt;
136 u32 res4;
137};
138
Zhang Weiad1e9382008-04-18 13:33:41 -0700139struct rio_dbell_ring {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800140 void *virt;
141 dma_addr_t phys;
Zhang Weiad1e9382008-04-18 13:33:41 -0700142};
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800143
Zhang Weiad1e9382008-04-18 13:33:41 -0700144struct rio_msg_tx_ring {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800145 void *virt;
146 dma_addr_t phys;
147 void *virt_buffer[RIO_MAX_TX_RING_SIZE];
148 dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
149 int tx_slot;
150 int size;
Matt Porter6978bbc2005-11-07 01:00:20 -0800151 void *dev_id;
Zhang Weiad1e9382008-04-18 13:33:41 -0700152};
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800153
Zhang Weiad1e9382008-04-18 13:33:41 -0700154struct rio_msg_rx_ring {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800155 void *virt;
156 dma_addr_t phys;
157 void *virt_buffer[RIO_MAX_RX_RING_SIZE];
158 int rx_slot;
159 int size;
Matt Porter6978bbc2005-11-07 01:00:20 -0800160 void *dev_id;
Zhang Weiad1e9382008-04-18 13:33:41 -0700161};
162
163struct rio_priv {
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400164 struct device *dev;
Zhang Weiad1e9382008-04-18 13:33:41 -0700165 void __iomem *regs_win;
166 struct rio_atmu_regs __iomem *atmu_regs;
167 struct rio_atmu_regs __iomem *maint_atmu_regs;
168 struct rio_atmu_regs __iomem *dbell_atmu_regs;
169 void __iomem *dbell_win;
170 void __iomem *maint_win;
171 struct rio_msg_regs __iomem *msg_regs;
172 struct rio_dbell_ring dbell_ring;
173 struct rio_msg_tx_ring msg_tx_ring;
174 struct rio_msg_rx_ring msg_rx_ring;
175 int bellirq;
176 int txirq;
177 int rxirq;
178};
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800179
180/**
Zhang Weid02443a2008-04-18 13:33:38 -0700181 * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
Randy Dunlap9941d942008-04-30 16:45:58 -0700182 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800183 * @index: ID of RapidIO interface
184 * @destid: Destination ID of target device
185 * @data: 16-bit info field of RapidIO doorbell message
186 *
187 * Sends a MPC85xx doorbell message. Returns %0 on success or
188 * %-EINVAL on failure.
189 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700190static int fsl_rio_doorbell_send(struct rio_mport *mport,
191 int index, u16 destid, u16 data)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800192{
Zhang Weiad1e9382008-04-18 13:33:41 -0700193 struct rio_priv *priv = mport->priv;
Zhang Weid02443a2008-04-18 13:33:38 -0700194 pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800195 index, destid, data);
Zhang Wei61b26912008-04-18 13:33:44 -0700196 switch (mport->phy_type) {
197 case RIO_PHY_PARALLEL:
198 out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22);
199 out_be16(priv->dbell_win, data);
200 break;
201 case RIO_PHY_SERIAL:
202 /* In the serial version silicons, such as MPC8548, MPC8641,
203 * below operations is must be.
204 */
205 out_be32(&priv->msg_regs->odmr, 0x00000000);
206 out_be32(&priv->msg_regs->odretcr, 0x00000004);
207 out_be32(&priv->msg_regs->oddpr, destid << 16);
208 out_be32(&priv->msg_regs->oddatr, data);
209 out_be32(&priv->msg_regs->odmr, 0x00000001);
210 break;
211 }
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800212
213 return 0;
214}
215
216/**
Zhang Weid02443a2008-04-18 13:33:38 -0700217 * fsl_local_config_read - Generate a MPC85xx local config space read
Randy Dunlap9941d942008-04-30 16:45:58 -0700218 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800219 * @index: ID of RapdiIO interface
220 * @offset: Offset into configuration space
221 * @len: Length (in bytes) of the maintenance transaction
222 * @data: Value to be read into
223 *
224 * Generates a MPC85xx local configuration space read. Returns %0 on
225 * success or %-EINVAL on failure.
226 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700227static int fsl_local_config_read(struct rio_mport *mport,
228 int index, u32 offset, int len, u32 *data)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800229{
Zhang Weiad1e9382008-04-18 13:33:41 -0700230 struct rio_priv *priv = mport->priv;
Zhang Weid02443a2008-04-18 13:33:38 -0700231 pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index,
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800232 offset);
Zhang Weiad1e9382008-04-18 13:33:41 -0700233 *data = in_be32(priv->regs_win + offset);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800234
235 return 0;
236}
237
238/**
Zhang Weid02443a2008-04-18 13:33:38 -0700239 * fsl_local_config_write - Generate a MPC85xx local config space write
Randy Dunlap9941d942008-04-30 16:45:58 -0700240 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800241 * @index: ID of RapdiIO interface
242 * @offset: Offset into configuration space
243 * @len: Length (in bytes) of the maintenance transaction
244 * @data: Value to be written
245 *
246 * Generates a MPC85xx local configuration space write. Returns %0 on
247 * success or %-EINVAL on failure.
248 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700249static int fsl_local_config_write(struct rio_mport *mport,
250 int index, u32 offset, int len, u32 data)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800251{
Zhang Weiad1e9382008-04-18 13:33:41 -0700252 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800253 pr_debug
Zhang Weid02443a2008-04-18 13:33:38 -0700254 ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n",
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800255 index, offset, data);
Zhang Weiad1e9382008-04-18 13:33:41 -0700256 out_be32(priv->regs_win + offset, data);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800257
258 return 0;
259}
260
261/**
Zhang Weid02443a2008-04-18 13:33:38 -0700262 * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction
Randy Dunlap9941d942008-04-30 16:45:58 -0700263 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800264 * @index: ID of RapdiIO interface
265 * @destid: Destination ID of transaction
266 * @hopcount: Number of hops to target device
267 * @offset: Offset into configuration space
268 * @len: Length (in bytes) of the maintenance transaction
269 * @val: Location to be read into
270 *
271 * Generates a MPC85xx read maintenance transaction. Returns %0 on
272 * success or %-EINVAL on failure.
273 */
274static int
Zhang Weiad1e9382008-04-18 13:33:41 -0700275fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
276 u8 hopcount, u32 offset, int len, u32 *val)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800277{
Zhang Weiad1e9382008-04-18 13:33:41 -0700278 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800279 u8 *data;
280
281 pr_debug
Zhang Weid02443a2008-04-18 13:33:38 -0700282 ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800283 index, destid, hopcount, offset, len);
Zhang Weiad1e9382008-04-18 13:33:41 -0700284 out_be32(&priv->maint_atmu_regs->rowtar,
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800285 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
286
Zhang Weiad1e9382008-04-18 13:33:41 -0700287 data = (u8 *) priv->maint_win + offset;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800288 switch (len) {
289 case 1:
290 *val = in_8((u8 *) data);
291 break;
292 case 2:
293 *val = in_be16((u16 *) data);
294 break;
295 default:
296 *val = in_be32((u32 *) data);
297 break;
298 }
299
300 return 0;
301}
302
303/**
Zhang Weid02443a2008-04-18 13:33:38 -0700304 * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction
Randy Dunlap9941d942008-04-30 16:45:58 -0700305 * @mport: RapidIO master port info
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800306 * @index: ID of RapdiIO interface
307 * @destid: Destination ID of transaction
308 * @hopcount: Number of hops to target device
309 * @offset: Offset into configuration space
310 * @len: Length (in bytes) of the maintenance transaction
311 * @val: Value to be written
312 *
313 * Generates an MPC85xx write maintenance transaction. Returns %0 on
314 * success or %-EINVAL on failure.
315 */
316static int
Zhang Weiad1e9382008-04-18 13:33:41 -0700317fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
318 u8 hopcount, u32 offset, int len, u32 val)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800319{
Zhang Weiad1e9382008-04-18 13:33:41 -0700320 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800321 u8 *data;
322 pr_debug
Zhang Weid02443a2008-04-18 13:33:38 -0700323 ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800324 index, destid, hopcount, offset, len, val);
Zhang Weiad1e9382008-04-18 13:33:41 -0700325 out_be32(&priv->maint_atmu_regs->rowtar,
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800326 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
327
Zhang Weiad1e9382008-04-18 13:33:41 -0700328 data = (u8 *) priv->maint_win + offset;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800329 switch (len) {
330 case 1:
331 out_8((u8 *) data, val);
332 break;
333 case 2:
334 out_be16((u16 *) data, val);
335 break;
336 default:
337 out_be32((u32 *) data, val);
338 break;
339 }
340
341 return 0;
342}
343
344/**
345 * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue
346 * @mport: Master port with outbound message queue
347 * @rdev: Target of outbound message
348 * @mbox: Outbound mailbox
349 * @buffer: Message to add to outbound queue
350 * @len: Length of message
351 *
352 * Adds the @buffer message to the MPC85xx outbound message queue. Returns
353 * %0 on success or %-EINVAL on failure.
354 */
355int
356rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
357 void *buffer, size_t len)
358{
Zhang Weiad1e9382008-04-18 13:33:41 -0700359 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800360 u32 omr;
Zhang Weiad1e9382008-04-18 13:33:41 -0700361 struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt
362 + priv->msg_tx_ring.tx_slot;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800363 int ret = 0;
364
365 pr_debug
366 ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n",
367 rdev->destid, mbox, (int)buffer, len);
368
369 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
370 ret = -EINVAL;
371 goto out;
372 }
373
374 /* Copy and clear rest of buffer */
Zhang Weiad1e9382008-04-18 13:33:41 -0700375 memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer,
376 len);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800377 if (len < (RIO_MAX_MSG_SIZE - 4))
Zhang Weiad1e9382008-04-18 13:33:41 -0700378 memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot]
379 + len, 0, RIO_MAX_MSG_SIZE - len);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800380
Zhang Wei61b26912008-04-18 13:33:44 -0700381 switch (mport->phy_type) {
382 case RIO_PHY_PARALLEL:
383 /* Set mbox field for message */
384 desc->dport = mbox & 0x3;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800385
Zhang Wei61b26912008-04-18 13:33:44 -0700386 /* Enable EOMI interrupt, set priority, and set destid */
387 desc->dattr = 0x28000000 | (rdev->destid << 2);
388 break;
389 case RIO_PHY_SERIAL:
390 /* Set mbox field for message, and set destid */
391 desc->dport = (rdev->destid << 16) | (mbox & 0x3);
392
393 /* Enable EOMI interrupt and priority */
394 desc->dattr = 0x28000000;
395 break;
396 }
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800397
398 /* Set transfer size aligned to next power of 2 (in double words) */
399 desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
400
401 /* Set snooping and source buffer address */
Zhang Weiad1e9382008-04-18 13:33:41 -0700402 desc->saddr = 0x00000004
403 | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot];
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800404
405 /* Increment enqueue pointer */
Zhang Weiad1e9382008-04-18 13:33:41 -0700406 omr = in_be32(&priv->msg_regs->omr);
407 out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800408
409 /* Go to next descriptor */
Zhang Weiad1e9382008-04-18 13:33:41 -0700410 if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size)
411 priv->msg_tx_ring.tx_slot = 0;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800412
413 out:
414 return ret;
415}
416
417EXPORT_SYMBOL_GPL(rio_hw_add_outb_message);
418
419/**
Zhang Weid02443a2008-04-18 13:33:38 -0700420 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800421 * @irq: Linux interrupt number
422 * @dev_instance: Pointer to interrupt-specific data
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800423 *
424 * Handles outbound message interrupts. Executes a register outbound
Simon Arlotta8de5ce2007-05-12 05:42:54 +1000425 * mailbox event handler and acks the interrupt occurrence.
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800426 */
427static irqreturn_t
Zhang Weid02443a2008-04-18 13:33:38 -0700428fsl_rio_tx_handler(int irq, void *dev_instance)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800429{
430 int osr;
431 struct rio_mport *port = (struct rio_mport *)dev_instance;
Zhang Weiad1e9382008-04-18 13:33:41 -0700432 struct rio_priv *priv = port->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800433
Zhang Weiad1e9382008-04-18 13:33:41 -0700434 osr = in_be32(&priv->msg_regs->osr);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800435
436 if (osr & RIO_MSG_OSR_TE) {
437 pr_info("RIO: outbound message transmission error\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700438 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800439 goto out;
440 }
441
442 if (osr & RIO_MSG_OSR_QOI) {
443 pr_info("RIO: outbound message queue overflow\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700444 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800445 goto out;
446 }
447
448 if (osr & RIO_MSG_OSR_EOMI) {
Zhang Weiad1e9382008-04-18 13:33:41 -0700449 u32 dqp = in_be32(&priv->msg_regs->odqdpar);
450 int slot = (dqp - priv->msg_tx_ring.phys) >> 5;
451 port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1,
452 slot);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800453
454 /* Ack the end-of-message interrupt */
Zhang Weiad1e9382008-04-18 13:33:41 -0700455 out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800456 }
457
458 out:
459 return IRQ_HANDLED;
460}
461
462/**
463 * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox
464 * @mport: Master port implementing the outbound message unit
Matt Porter6978bbc2005-11-07 01:00:20 -0800465 * @dev_id: Device specific pointer to pass on event
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800466 * @mbox: Mailbox to open
467 * @entries: Number of entries in the outbound mailbox ring
468 *
469 * Initializes buffer ring, request the outbound message interrupt,
470 * and enables the outbound message unit. Returns %0 on success and
471 * %-EINVAL or %-ENOMEM on failure.
472 */
Matt Porter6978bbc2005-11-07 01:00:20 -0800473int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800474{
475 int i, j, rc = 0;
Zhang Weiad1e9382008-04-18 13:33:41 -0700476 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800477
478 if ((entries < RIO_MIN_TX_RING_SIZE) ||
479 (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
480 rc = -EINVAL;
481 goto out;
482 }
483
484 /* Initialize shadow copy ring */
Zhang Weiad1e9382008-04-18 13:33:41 -0700485 priv->msg_tx_ring.dev_id = dev_id;
486 priv->msg_tx_ring.size = entries;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800487
Zhang Weiad1e9382008-04-18 13:33:41 -0700488 for (i = 0; i < priv->msg_tx_ring.size; i++) {
489 priv->msg_tx_ring.virt_buffer[i] =
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400490 dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700491 &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
492 if (!priv->msg_tx_ring.virt_buffer[i]) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800493 rc = -ENOMEM;
Zhang Weiad1e9382008-04-18 13:33:41 -0700494 for (j = 0; j < priv->msg_tx_ring.size; j++)
495 if (priv->msg_tx_ring.virt_buffer[j])
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400496 dma_free_coherent(priv->dev,
Zhang Weiad1e9382008-04-18 13:33:41 -0700497 RIO_MSG_BUFFER_SIZE,
498 priv->msg_tx_ring.
499 virt_buffer[j],
500 priv->msg_tx_ring.
501 phys_buffer[j]);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800502 goto out;
503 }
504 }
505
506 /* Initialize outbound message descriptor ring */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400507 priv->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
Zhang Weiad1e9382008-04-18 13:33:41 -0700508 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
509 &priv->msg_tx_ring.phys, GFP_KERNEL);
510 if (!priv->msg_tx_ring.virt) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800511 rc = -ENOMEM;
512 goto out_dma;
513 }
Zhang Weiad1e9382008-04-18 13:33:41 -0700514 memset(priv->msg_tx_ring.virt, 0,
515 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
516 priv->msg_tx_ring.tx_slot = 0;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800517
518 /* Point dequeue/enqueue pointers at first entry in ring */
Zhang Weiad1e9382008-04-18 13:33:41 -0700519 out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys);
520 out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800521
522 /* Configure for snooping */
Zhang Weiad1e9382008-04-18 13:33:41 -0700523 out_be32(&priv->msg_regs->osar, 0x00000004);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800524
525 /* Clear interrupt status */
Zhang Weiad1e9382008-04-18 13:33:41 -0700526 out_be32(&priv->msg_regs->osr, 0x000000b3);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800527
528 /* Hook up outbound message handler */
Zhang Weiad1e9382008-04-18 13:33:41 -0700529 rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
530 "msg_tx", (void *)mport);
531 if (rc < 0)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800532 goto out_irq;
533
534 /*
535 * Configure outbound message unit
536 * Snooping
537 * Interrupts (all enabled, except QEIE)
538 * Chaining mode
539 * Disable
540 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700541 out_be32(&priv->msg_regs->omr, 0x00100220);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800542
543 /* Set number of entries */
Zhang Weiad1e9382008-04-18 13:33:41 -0700544 out_be32(&priv->msg_regs->omr,
545 in_be32(&priv->msg_regs->omr) |
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800546 ((get_bitmask_order(entries) - 2) << 12));
547
548 /* Now enable the unit */
Zhang Weiad1e9382008-04-18 13:33:41 -0700549 out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800550
551 out:
552 return rc;
553
554 out_irq:
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400555 dma_free_coherent(priv->dev,
556 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700557 priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800558
559 out_dma:
Zhang Weiad1e9382008-04-18 13:33:41 -0700560 for (i = 0; i < priv->msg_tx_ring.size; i++)
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400561 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700562 priv->msg_tx_ring.virt_buffer[i],
563 priv->msg_tx_ring.phys_buffer[i]);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800564
565 return rc;
566}
567
568/**
569 * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox
570 * @mport: Master port implementing the outbound message unit
571 * @mbox: Mailbox to close
572 *
573 * Disables the outbound message unit, free all buffers, and
574 * frees the outbound message interrupt.
575 */
576void rio_close_outb_mbox(struct rio_mport *mport, int mbox)
577{
Zhang Weiad1e9382008-04-18 13:33:41 -0700578 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800579 /* Disable inbound message unit */
Zhang Weiad1e9382008-04-18 13:33:41 -0700580 out_be32(&priv->msg_regs->omr, 0);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800581
582 /* Free ring */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400583 dma_free_coherent(priv->dev,
584 priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700585 priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800586
587 /* Free interrupt */
Zhang Weiad1e9382008-04-18 13:33:41 -0700588 free_irq(IRQ_RIO_TX(mport), (void *)mport);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800589}
590
591/**
Zhang Weid02443a2008-04-18 13:33:38 -0700592 * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800593 * @irq: Linux interrupt number
594 * @dev_instance: Pointer to interrupt-specific data
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800595 *
596 * Handles inbound message interrupts. Executes a registered inbound
Simon Arlotta8de5ce2007-05-12 05:42:54 +1000597 * mailbox event handler and acks the interrupt occurrence.
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800598 */
599static irqreturn_t
Zhang Weid02443a2008-04-18 13:33:38 -0700600fsl_rio_rx_handler(int irq, void *dev_instance)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800601{
602 int isr;
603 struct rio_mport *port = (struct rio_mport *)dev_instance;
Zhang Weiad1e9382008-04-18 13:33:41 -0700604 struct rio_priv *priv = port->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800605
Zhang Weiad1e9382008-04-18 13:33:41 -0700606 isr = in_be32(&priv->msg_regs->isr);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800607
608 if (isr & RIO_MSG_ISR_TE) {
609 pr_info("RIO: inbound message reception error\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700610 out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800611 goto out;
612 }
613
614 /* XXX Need to check/dispatch until queue empty */
615 if (isr & RIO_MSG_ISR_DIQI) {
616 /*
617 * We implement *only* mailbox 0, but can receive messages
618 * for any mailbox/letter to that mailbox destination. So,
619 * make the callback with an unknown/invalid mailbox number
620 * argument.
621 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700622 port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800623
624 /* Ack the queueing interrupt */
Zhang Weiad1e9382008-04-18 13:33:41 -0700625 out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800626 }
627
628 out:
629 return IRQ_HANDLED;
630}
631
632/**
633 * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox
634 * @mport: Master port implementing the inbound message unit
Matt Porter6978bbc2005-11-07 01:00:20 -0800635 * @dev_id: Device specific pointer to pass on event
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800636 * @mbox: Mailbox to open
637 * @entries: Number of entries in the inbound mailbox ring
638 *
639 * Initializes buffer ring, request the inbound message interrupt,
640 * and enables the inbound message unit. Returns %0 on success
641 * and %-EINVAL or %-ENOMEM on failure.
642 */
Matt Porter6978bbc2005-11-07 01:00:20 -0800643int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800644{
645 int i, rc = 0;
Zhang Weiad1e9382008-04-18 13:33:41 -0700646 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800647
648 if ((entries < RIO_MIN_RX_RING_SIZE) ||
649 (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
650 rc = -EINVAL;
651 goto out;
652 }
653
654 /* Initialize client buffer ring */
Zhang Weiad1e9382008-04-18 13:33:41 -0700655 priv->msg_rx_ring.dev_id = dev_id;
656 priv->msg_rx_ring.size = entries;
657 priv->msg_rx_ring.rx_slot = 0;
658 for (i = 0; i < priv->msg_rx_ring.size; i++)
659 priv->msg_rx_ring.virt_buffer[i] = NULL;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800660
661 /* Initialize inbound message ring */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400662 priv->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
Zhang Weiad1e9382008-04-18 13:33:41 -0700663 priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
664 &priv->msg_rx_ring.phys, GFP_KERNEL);
665 if (!priv->msg_rx_ring.virt) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800666 rc = -ENOMEM;
667 goto out;
668 }
669
670 /* Point dequeue/enqueue pointers at first entry in ring */
Zhang Weiad1e9382008-04-18 13:33:41 -0700671 out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys);
672 out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800673
674 /* Clear interrupt status */
Zhang Weiad1e9382008-04-18 13:33:41 -0700675 out_be32(&priv->msg_regs->isr, 0x00000091);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800676
677 /* Hook up inbound message handler */
Zhang Weiad1e9382008-04-18 13:33:41 -0700678 rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
679 "msg_rx", (void *)mport);
680 if (rc < 0) {
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400681 dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700682 priv->msg_tx_ring.virt_buffer[i],
683 priv->msg_tx_ring.phys_buffer[i]);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800684 goto out;
685 }
686
687 /*
688 * Configure inbound message unit:
689 * Snooping
690 * 4KB max message size
691 * Unmask all interrupt sources
692 * Disable
693 */
Zhang Weiad1e9382008-04-18 13:33:41 -0700694 out_be32(&priv->msg_regs->imr, 0x001b0060);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800695
696 /* Set number of queue entries */
Zhang Weiad1e9382008-04-18 13:33:41 -0700697 setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800698
699 /* Now enable the unit */
Zhang Weiad1e9382008-04-18 13:33:41 -0700700 setbits32(&priv->msg_regs->imr, 0x1);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800701
702 out:
703 return rc;
704}
705
706/**
707 * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox
708 * @mport: Master port implementing the inbound message unit
709 * @mbox: Mailbox to close
710 *
711 * Disables the inbound message unit, free all buffers, and
712 * frees the inbound message interrupt.
713 */
714void rio_close_inb_mbox(struct rio_mport *mport, int mbox)
715{
Zhang Weiad1e9382008-04-18 13:33:41 -0700716 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800717 /* Disable inbound message unit */
Zhang Weiad1e9382008-04-18 13:33:41 -0700718 out_be32(&priv->msg_regs->imr, 0);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800719
720 /* Free ring */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400721 dma_free_coherent(priv->dev, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700722 priv->msg_rx_ring.virt, priv->msg_rx_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800723
724 /* Free interrupt */
Zhang Weiad1e9382008-04-18 13:33:41 -0700725 free_irq(IRQ_RIO_RX(mport), (void *)mport);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800726}
727
728/**
729 * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
730 * @mport: Master port implementing the inbound message unit
731 * @mbox: Inbound mailbox number
732 * @buf: Buffer to add to inbound queue
733 *
734 * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
735 * %0 on success or %-EINVAL on failure.
736 */
737int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
738{
739 int rc = 0;
Zhang Weiad1e9382008-04-18 13:33:41 -0700740 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800741
742 pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
Zhang Weiad1e9382008-04-18 13:33:41 -0700743 priv->msg_rx_ring.rx_slot);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800744
Zhang Weiad1e9382008-04-18 13:33:41 -0700745 if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800746 printk(KERN_ERR
747 "RIO: error adding inbound buffer %d, buffer exists\n",
Zhang Weiad1e9382008-04-18 13:33:41 -0700748 priv->msg_rx_ring.rx_slot);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800749 rc = -EINVAL;
750 goto out;
751 }
752
Zhang Weiad1e9382008-04-18 13:33:41 -0700753 priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf;
754 if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size)
755 priv->msg_rx_ring.rx_slot = 0;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800756
757 out:
758 return rc;
759}
760
761EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer);
762
763/**
764 * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit
765 * @mport: Master port implementing the inbound message unit
766 * @mbox: Inbound mailbox number
767 *
768 * Gets the next available inbound message from the inbound message queue.
769 * A pointer to the message is returned on success or NULL on failure.
770 */
771void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
772{
Zhang Weiad1e9382008-04-18 13:33:41 -0700773 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800774 u32 phys_buf, virt_buf;
775 void *buf = NULL;
776 int buf_idx;
777
Zhang Weiad1e9382008-04-18 13:33:41 -0700778 phys_buf = in_be32(&priv->msg_regs->ifqdpar);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800779
780 /* If no more messages, then bail out */
Zhang Weiad1e9382008-04-18 13:33:41 -0700781 if (phys_buf == in_be32(&priv->msg_regs->ifqepar))
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800782 goto out2;
783
Zhang Weiad1e9382008-04-18 13:33:41 -0700784 virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf
785 - priv->msg_rx_ring.phys);
786 buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
787 buf = priv->msg_rx_ring.virt_buffer[buf_idx];
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800788
789 if (!buf) {
790 printk(KERN_ERR
791 "RIO: inbound message copy failed, no buffers\n");
792 goto out1;
793 }
794
795 /* Copy max message size, caller is expected to allocate that big */
796 memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
797
798 /* Clear the available buffer */
Zhang Weiad1e9382008-04-18 13:33:41 -0700799 priv->msg_rx_ring.virt_buffer[buf_idx] = NULL;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800800
801 out1:
Zhang Weiad1e9382008-04-18 13:33:41 -0700802 setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800803
804 out2:
805 return buf;
806}
807
808EXPORT_SYMBOL_GPL(rio_hw_get_inb_message);
809
810/**
Zhang Weid02443a2008-04-18 13:33:38 -0700811 * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800812 * @irq: Linux interrupt number
813 * @dev_instance: Pointer to interrupt-specific data
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800814 *
815 * Handles doorbell interrupts. Parses a list of registered
816 * doorbell event handlers and executes a matching event handler.
817 */
818static irqreturn_t
Zhang Weid02443a2008-04-18 13:33:38 -0700819fsl_rio_dbell_handler(int irq, void *dev_instance)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800820{
821 int dsr;
822 struct rio_mport *port = (struct rio_mport *)dev_instance;
Zhang Weiad1e9382008-04-18 13:33:41 -0700823 struct rio_priv *priv = port->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800824
Zhang Weiad1e9382008-04-18 13:33:41 -0700825 dsr = in_be32(&priv->msg_regs->dsr);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800826
827 if (dsr & DOORBELL_DSR_TE) {
828 pr_info("RIO: doorbell reception error\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700829 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800830 goto out;
831 }
832
833 if (dsr & DOORBELL_DSR_QFI) {
834 pr_info("RIO: doorbell queue full\n");
Zhang Weiad1e9382008-04-18 13:33:41 -0700835 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800836 goto out;
837 }
838
839 /* XXX Need to check/dispatch until queue empty */
840 if (dsr & DOORBELL_DSR_DIQI) {
841 u32 dmsg =
Zhang Weiad1e9382008-04-18 13:33:41 -0700842 (u32) priv->dbell_ring.virt +
843 (in_be32(&priv->msg_regs->dqdpar) & 0xfff);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800844 struct rio_dbell *dbell;
845 int found = 0;
846
847 pr_debug
848 ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n",
849 DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
850
851 list_for_each_entry(dbell, &port->dbells, node) {
852 if ((dbell->res->start <= DBELL_INF(dmsg)) &&
853 (dbell->res->end >= DBELL_INF(dmsg))) {
854 found = 1;
855 break;
856 }
857 }
858 if (found) {
Matt Porter6978bbc2005-11-07 01:00:20 -0800859 dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg),
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800860 DBELL_INF(dmsg));
861 } else {
862 pr_debug
863 ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n",
864 DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
865 }
Zhang Weiad1e9382008-04-18 13:33:41 -0700866 setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI);
867 out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800868 }
869
870 out:
871 return IRQ_HANDLED;
872}
873
874/**
Zhang Weid02443a2008-04-18 13:33:38 -0700875 * fsl_rio_doorbell_init - MPC85xx doorbell interface init
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800876 * @mport: Master port implementing the inbound doorbell unit
877 *
878 * Initializes doorbell unit hardware and inbound DMA buffer
Zhang Weid02443a2008-04-18 13:33:38 -0700879 * ring. Called from fsl_rio_setup(). Returns %0 on success
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800880 * or %-ENOMEM on failure.
881 */
Zhang Weid02443a2008-04-18 13:33:38 -0700882static int fsl_rio_doorbell_init(struct rio_mport *mport)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800883{
Zhang Weiad1e9382008-04-18 13:33:41 -0700884 struct rio_priv *priv = mport->priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800885 int rc = 0;
886
887 /* Map outbound doorbell window immediately after maintenance window */
Zhang Weiad1e9382008-04-18 13:33:41 -0700888 priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
889 RIO_DBELL_WIN_SIZE);
890 if (!priv->dbell_win) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800891 printk(KERN_ERR
892 "RIO: unable to map outbound doorbell window\n");
893 rc = -ENOMEM;
894 goto out;
895 }
896
897 /* Initialize inbound doorbells */
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400898 priv->dbell_ring.virt = dma_alloc_coherent(priv->dev, 512 *
Zhang Weiad1e9382008-04-18 13:33:41 -0700899 DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL);
900 if (!priv->dbell_ring.virt) {
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800901 printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
902 rc = -ENOMEM;
Zhang Weiad1e9382008-04-18 13:33:41 -0700903 iounmap(priv->dbell_win);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800904 goto out;
905 }
906
907 /* Point dequeue/enqueue pointers at first entry in ring */
Zhang Weiad1e9382008-04-18 13:33:41 -0700908 out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys);
909 out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800910
911 /* Clear interrupt status */
Zhang Weiad1e9382008-04-18 13:33:41 -0700912 out_be32(&priv->msg_regs->dsr, 0x00000091);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800913
914 /* Hook up doorbell handler */
Zhang Weiad1e9382008-04-18 13:33:41 -0700915 rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0,
916 "dbell_rx", (void *)mport);
917 if (rc < 0) {
918 iounmap(priv->dbell_win);
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +0400919 dma_free_coherent(priv->dev, 512 * DOORBELL_MESSAGE_SIZE,
Zhang Weiad1e9382008-04-18 13:33:41 -0700920 priv->dbell_ring.virt, priv->dbell_ring.phys);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800921 printk(KERN_ERR
922 "MPC85xx RIO: unable to request inbound doorbell irq");
923 goto out;
924 }
925
926 /* Configure doorbells for snooping, 512 entries, and enable */
Zhang Weiad1e9382008-04-18 13:33:41 -0700927 out_be32(&priv->msg_regs->dmr, 0x00108161);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800928
929 out:
930 return rc;
931}
932
933static char *cmdline = NULL;
934
Zhang Weid02443a2008-04-18 13:33:38 -0700935static int fsl_rio_get_hdid(int index)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800936{
937 /* XXX Need to parse multiple entries in some format */
938 if (!cmdline)
939 return -1;
940
941 return simple_strtol(cmdline, NULL, 0);
942}
943
Zhang Weid02443a2008-04-18 13:33:38 -0700944static int fsl_rio_get_cmdline(char *s)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800945{
946 if (!s)
947 return 0;
948
949 cmdline = s;
950 return 1;
951}
952
Zhang Weid02443a2008-04-18 13:33:38 -0700953__setup("riohdid=", fsl_rio_get_cmdline);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800954
Zhang Wei7f620df2008-04-18 13:33:44 -0700955static inline void fsl_rio_info(struct device *dev, u32 ccsr)
956{
957 const char *str;
958 if (ccsr & 1) {
959 /* Serial phy */
960 switch (ccsr >> 30) {
961 case 0:
962 str = "1";
963 break;
964 case 1:
965 str = "4";
966 break;
967 default:
968 str = "Unknown";
Joe Perchesd258e642009-06-28 06:26:10 +0000969 break;
Zhang Wei7f620df2008-04-18 13:33:44 -0700970 }
971 dev_info(dev, "Hardware port width: %s\n", str);
972
973 switch ((ccsr >> 27) & 7) {
974 case 0:
975 str = "Single-lane 0";
976 break;
977 case 1:
978 str = "Single-lane 2";
979 break;
980 case 2:
981 str = "Four-lane";
982 break;
983 default:
984 str = "Unknown";
985 break;
986 }
987 dev_info(dev, "Training connection status: %s\n", str);
988 } else {
989 /* Parallel phy */
990 if (!(ccsr & 0x80000000))
991 dev_info(dev, "Output port operating in 8-bit mode\n");
992 if (!(ccsr & 0x08000000))
993 dev_info(dev, "Input port operating in 8-bit mode\n");
994 }
995}
996
Matt Porter2b0c28d7f2005-11-07 01:00:19 -0800997/**
Randy Dunlap9941d942008-04-30 16:45:58 -0700998 * fsl_rio_setup - Setup Freescale PowerPC RapidIO interface
999 * @dev: of_device pointer
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001000 *
1001 * Initializes MPC85xx RapidIO hardware interface, configures
1002 * master port with system-specific info, and registers the
1003 * master port with the RapidIO subsystem.
1004 */
Zhang Weicc2bb692008-04-18 13:33:41 -07001005int fsl_rio_setup(struct of_device *dev)
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001006{
1007 struct rio_ops *ops;
1008 struct rio_mport *port;
Zhang Weicc2bb692008-04-18 13:33:41 -07001009 struct rio_priv *priv;
1010 int rc = 0;
1011 const u32 *dt_range, *cell;
1012 struct resource regs;
1013 int rlen;
Zhang Wei61b26912008-04-18 13:33:44 -07001014 u32 ccsr;
Zhang Weicc2bb692008-04-18 13:33:41 -07001015 u64 law_start, law_size;
1016 int paw, aw, sw;
1017
1018 if (!dev->node) {
1019 dev_err(&dev->dev, "Device OF-Node is NULL");
1020 return -EFAULT;
1021 }
1022
1023 rc = of_address_to_resource(dev->node, 0, &regs);
1024 if (rc) {
1025 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
1026 dev->node->full_name);
1027 return -EFAULT;
1028 }
1029 dev_info(&dev->dev, "Of-device full name %s\n", dev->node->full_name);
Kumar Galafc274a12009-05-13 17:02:24 -05001030 dev_info(&dev->dev, "Regs: %pR\n", &regs);
Zhang Weicc2bb692008-04-18 13:33:41 -07001031
1032 dt_range = of_get_property(dev->node, "ranges", &rlen);
1033 if (!dt_range) {
1034 dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
1035 dev->node->full_name);
1036 return -EFAULT;
1037 }
1038
1039 /* Get node address wide */
1040 cell = of_get_property(dev->node, "#address-cells", NULL);
1041 if (cell)
1042 aw = *cell;
1043 else
1044 aw = of_n_addr_cells(dev->node);
1045 /* Get node size wide */
1046 cell = of_get_property(dev->node, "#size-cells", NULL);
1047 if (cell)
1048 sw = *cell;
1049 else
1050 sw = of_n_size_cells(dev->node);
1051 /* Get parent address wide wide */
1052 paw = of_n_addr_cells(dev->node);
1053
1054 law_start = of_read_number(dt_range + aw, paw);
1055 law_size = of_read_number(dt_range + aw + paw, sw);
1056
1057 dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
1058 law_start, law_size);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001059
1060 ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL);
Julia Lawall6c759332009-08-07 09:00:34 +02001061 if (!ops) {
1062 rc = -ENOMEM;
1063 goto err_ops;
1064 }
Zhang Weid02443a2008-04-18 13:33:38 -07001065 ops->lcread = fsl_local_config_read;
1066 ops->lcwrite = fsl_local_config_write;
1067 ops->cread = fsl_rio_config_read;
1068 ops->cwrite = fsl_rio_config_write;
1069 ops->dsend = fsl_rio_doorbell_send;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001070
Zhang Weiad1e9382008-04-18 13:33:41 -07001071 port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
Julia Lawall6c759332009-08-07 09:00:34 +02001072 if (!port) {
1073 rc = -ENOMEM;
1074 goto err_port;
1075 }
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001076 port->id = 0;
1077 port->index = 0;
Zhang Weiad1e9382008-04-18 13:33:41 -07001078
1079 priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
1080 if (!priv) {
1081 printk(KERN_ERR "Can't alloc memory for 'priv'\n");
1082 rc = -ENOMEM;
Julia Lawall6c759332009-08-07 09:00:34 +02001083 goto err_priv;
Zhang Weiad1e9382008-04-18 13:33:41 -07001084 }
1085
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001086 INIT_LIST_HEAD(&port->dbells);
1087 port->iores.start = law_start;
Li Yang186e74b2009-05-12 16:35:59 +08001088 port->iores.end = law_start + law_size - 1;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001089 port->iores.flags = IORESOURCE_MEM;
Li Yang186e74b2009-05-12 16:35:59 +08001090 port->iores.name = "rio_io_win";
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001091
Zhang Weicc2bb692008-04-18 13:33:41 -07001092 priv->bellirq = irq_of_parse_and_map(dev->node, 2);
1093 priv->txirq = irq_of_parse_and_map(dev->node, 3);
1094 priv->rxirq = irq_of_parse_and_map(dev->node, 4);
1095 dev_info(&dev->dev, "bellirq: %d, txirq: %d, rxirq %d\n", priv->bellirq,
1096 priv->txirq, priv->rxirq);
1097
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001098 rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
1099 rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
1100 rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
1101 strcpy(port->name, "RIO0 mport");
1102
Anton Vorontsov0dbbbf12009-04-18 21:48:52 +04001103 priv->dev = &dev->dev;
1104
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001105 port->ops = ops;
Zhang Weid02443a2008-04-18 13:33:38 -07001106 port->host_deviceid = fsl_rio_get_hdid(port->id);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001107
Zhang Weiad1e9382008-04-18 13:33:41 -07001108 port->priv = priv;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001109 rio_register_mport(port);
1110
Zhang Weicc2bb692008-04-18 13:33:41 -07001111 priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
Zhang Weie0423232008-04-18 13:33:42 -07001112
Zhang Wei61b26912008-04-18 13:33:44 -07001113 /* Probe the master port phy type */
1114 ccsr = in_be32(priv->regs_win + RIO_CCSR);
1115 port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
1116 dev_info(&dev->dev, "RapidIO PHY type: %s\n",
1117 (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" :
1118 ((port->phy_type == RIO_PHY_SERIAL) ? "serial" :
1119 "unknown"));
Zhang Wei7f620df2008-04-18 13:33:44 -07001120 /* Checking the port training status */
1121 if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
1122 dev_err(&dev->dev, "Port is not ready. "
1123 "Try to restart connection...\n");
1124 switch (port->phy_type) {
1125 case RIO_PHY_SERIAL:
1126 /* Disable ports */
1127 out_be32(priv->regs_win + RIO_CCSR, 0);
1128 /* Set 1x lane */
1129 setbits32(priv->regs_win + RIO_CCSR, 0x02000000);
1130 /* Enable ports */
1131 setbits32(priv->regs_win + RIO_CCSR, 0x00600000);
1132 break;
1133 case RIO_PHY_PARALLEL:
1134 /* Disable ports */
1135 out_be32(priv->regs_win + RIO_CCSR, 0x22000000);
1136 /* Enable ports */
1137 out_be32(priv->regs_win + RIO_CCSR, 0x44000000);
1138 break;
1139 }
1140 msleep(100);
1141 if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
1142 dev_err(&dev->dev, "Port restart failed.\n");
1143 rc = -ENOLINK;
1144 goto err;
1145 }
1146 dev_info(&dev->dev, "Port restart success!\n");
1147 }
1148 fsl_rio_info(&dev->dev, ccsr);
Zhang Wei61b26912008-04-18 13:33:44 -07001149
Zhang Weie0423232008-04-18 13:33:42 -07001150 port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR))
1151 & RIO_PEF_CTLS) >> 4;
1152 dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
1153 port->sys_size ? 65536 : 256);
1154
Zhang Weiad1e9382008-04-18 13:33:41 -07001155 priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
1156 + RIO_ATMU_REGS_OFFSET);
1157 priv->maint_atmu_regs = priv->atmu_regs + 1;
1158 priv->dbell_atmu_regs = priv->atmu_regs + 2;
Zhang Wei61b26912008-04-18 13:33:44 -07001159 priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win +
1160 ((port->phy_type == RIO_PHY_SERIAL) ?
1161 RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET));
1162
1163 /* Set to receive any dist ID for serial RapidIO controller. */
1164 if (port->phy_type == RIO_PHY_SERIAL)
1165 out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001166
1167 /* Configure maintenance transaction window */
Li Yang186e74b2009-05-12 16:35:59 +08001168 out_be32(&priv->maint_atmu_regs->rowbar, law_start >> 12);
1169 out_be32(&priv->maint_atmu_regs->rowar, 0x80077015); /* 4M */
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001170
Zhang Weiad1e9382008-04-18 13:33:41 -07001171 priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001172
1173 /* Configure outbound doorbell window */
Li Yang186e74b2009-05-12 16:35:59 +08001174 out_be32(&priv->dbell_atmu_regs->rowbar,
1175 (law_start + RIO_MAINT_WIN_SIZE) >> 12);
1176 out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b); /* 4k */
Zhang Weid02443a2008-04-18 13:33:38 -07001177 fsl_rio_doorbell_init(port);
Zhang Weiad1e9382008-04-18 13:33:41 -07001178
Zhang Weicc2bb692008-04-18 13:33:41 -07001179 return 0;
Zhang Weiad1e9382008-04-18 13:33:41 -07001180err:
Julia Lawall6c759332009-08-07 09:00:34 +02001181 iounmap(priv->regs_win);
Zhang Weiad1e9382008-04-18 13:33:41 -07001182 kfree(priv);
Julia Lawall6c759332009-08-07 09:00:34 +02001183err_priv:
Zhang Weiad1e9382008-04-18 13:33:41 -07001184 kfree(port);
Julia Lawall6c759332009-08-07 09:00:34 +02001185err_port:
1186 kfree(ops);
1187err_ops:
Zhang Weicc2bb692008-04-18 13:33:41 -07001188 return rc;
Matt Porter2b0c28d7f2005-11-07 01:00:19 -08001189}
Zhang Weicc2bb692008-04-18 13:33:41 -07001190
1191/* The probe function for RapidIO peer-to-peer network.
1192 */
1193static int __devinit fsl_of_rio_rpn_probe(struct of_device *dev,
1194 const struct of_device_id *match)
1195{
1196 int rc;
1197 printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
1198 dev->node->full_name);
1199
1200 rc = fsl_rio_setup(dev);
1201 if (rc)
1202 goto out;
1203
1204 /* Enumerate all registered ports */
1205 rc = rio_init_mports();
1206out:
1207 return rc;
1208};
1209
1210static const struct of_device_id fsl_of_rio_rpn_ids[] = {
1211 {
1212 .compatible = "fsl,rapidio-delta",
1213 },
1214 {},
1215};
1216
1217static struct of_platform_driver fsl_of_rio_rpn_driver = {
1218 .name = "fsl-of-rio",
1219 .match_table = fsl_of_rio_rpn_ids,
1220 .probe = fsl_of_rio_rpn_probe,
1221};
1222
1223static __init int fsl_of_rio_rpn_init(void)
1224{
1225 return of_register_platform_driver(&fsl_of_rio_rpn_driver);
1226}
1227
1228subsys_initcall(fsl_of_rio_rpn_init);