Scott Wood | c374e00 | 2007-07-16 11:43:43 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Common CPM code |
| 3 | * |
| 4 | * Author: Scott Wood <scottwood@freescale.com> |
| 5 | * |
Hongjun Chen | 1661e5b | 2010-03-26 16:43:46 +0800 | [diff] [blame] | 6 | * Copyright 2007-2008,2010 Freescale Semiconductor, Inc. |
Scott Wood | c374e00 | 2007-07-16 11:43:43 -0500 | [diff] [blame] | 7 | * |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 8 | * Some parts derived from commproc.c/cpm2_common.c, which is: |
| 9 | * Copyright (c) 1997 Dan error_act (dmalek@jlc.net) |
| 10 | * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com> |
| 11 | * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) |
| 12 | * 2006 (c) MontaVista Software, Inc. |
| 13 | * Vitaly Bordug <vbordug@ru.mvista.com> |
| 14 | * |
Scott Wood | c374e00 | 2007-07-16 11:43:43 -0500 | [diff] [blame] | 15 | * This program is free software; you can redistribute it and/or modify |
| 16 | * it under the terms of version 2 of the GNU General Public License as |
| 17 | * published by the Free Software Foundation. |
| 18 | */ |
| 19 | |
| 20 | #include <linux/init.h> |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 21 | #include <linux/of_device.h> |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 22 | #include <linux/spinlock.h> |
Paul Gortmaker | 66b15db | 2011-05-27 10:46:24 -0400 | [diff] [blame] | 23 | #include <linux/export.h> |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 24 | #include <linux/of.h> |
Rob Herring | 26a2056 | 2013-09-26 07:40:04 -0500 | [diff] [blame^] | 25 | #include <linux/of_address.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 26 | #include <linux/slab.h> |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 27 | |
Scott Wood | c374e00 | 2007-07-16 11:43:43 -0500 | [diff] [blame] | 28 | #include <asm/udbg.h> |
| 29 | #include <asm/io.h> |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 30 | #include <asm/rheap.h> |
| 31 | #include <asm/cpm.h> |
| 32 | |
Scott Wood | c374e00 | 2007-07-16 11:43:43 -0500 | [diff] [blame] | 33 | #include <mm/mmu_decl.h> |
| 34 | |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 35 | #if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) |
| 36 | #include <linux/of_gpio.h> |
| 37 | #endif |
| 38 | |
Scott Wood | c374e00 | 2007-07-16 11:43:43 -0500 | [diff] [blame] | 39 | #ifdef CONFIG_PPC_EARLY_DEBUG_CPM |
| 40 | static u32 __iomem *cpm_udbg_txdesc = |
| 41 | (u32 __iomem __force *)CONFIG_PPC_EARLY_DEBUG_CPM_ADDR; |
| 42 | |
| 43 | static void udbg_putc_cpm(char c) |
| 44 | { |
| 45 | u8 __iomem *txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]); |
| 46 | |
| 47 | if (c == '\n') |
Nye Liu | 5e82eb3 | 2008-06-27 13:01:00 -0700 | [diff] [blame] | 48 | udbg_putc_cpm('\r'); |
Scott Wood | c374e00 | 2007-07-16 11:43:43 -0500 | [diff] [blame] | 49 | |
| 50 | while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000) |
| 51 | ; |
| 52 | |
| 53 | out_8(txbuf, c); |
| 54 | out_be32(&cpm_udbg_txdesc[0], 0xa0000001); |
| 55 | } |
| 56 | |
| 57 | void __init udbg_init_cpm(void) |
| 58 | { |
| 59 | if (cpm_udbg_txdesc) { |
| 60 | #ifdef CONFIG_CPM2 |
Benjamin Herrenschmidt | 8d1cf34 | 2009-03-19 19:34:08 +0000 | [diff] [blame] | 61 | setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG); |
Scott Wood | c374e00 | 2007-07-16 11:43:43 -0500 | [diff] [blame] | 62 | #endif |
| 63 | udbg_putc = udbg_putc_cpm; |
| 64 | } |
| 65 | } |
| 66 | #endif |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 67 | |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 68 | static spinlock_t cpm_muram_lock; |
| 69 | static rh_block_t cpm_boot_muram_rh_block[16]; |
| 70 | static rh_info_t cpm_muram_info; |
| 71 | static u8 __iomem *muram_vbase; |
| 72 | static phys_addr_t muram_pbase; |
| 73 | |
| 74 | /* Max address size we deal with */ |
| 75 | #define OF_MAX_ADDR_CELLS 4 |
| 76 | |
Anton Vorontsov | 0c7b87b | 2009-09-16 01:43:52 +0400 | [diff] [blame] | 77 | int cpm_muram_init(void) |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 78 | { |
| 79 | struct device_node *np; |
| 80 | struct resource r; |
| 81 | u32 zero[OF_MAX_ADDR_CELLS] = {}; |
| 82 | resource_size_t max = 0; |
| 83 | int i = 0; |
| 84 | int ret = 0; |
| 85 | |
Anton Vorontsov | 0c7b87b | 2009-09-16 01:43:52 +0400 | [diff] [blame] | 86 | if (muram_pbase) |
| 87 | return 0; |
| 88 | |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 89 | spin_lock_init(&cpm_muram_lock); |
| 90 | /* initialize the info header */ |
| 91 | rh_init(&cpm_muram_info, 1, |
| 92 | sizeof(cpm_boot_muram_rh_block) / |
| 93 | sizeof(cpm_boot_muram_rh_block[0]), |
| 94 | cpm_boot_muram_rh_block); |
| 95 | |
| 96 | np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data"); |
| 97 | if (!np) { |
Anton Vorontsov | 5093bb9 | 2008-05-23 20:39:06 +0400 | [diff] [blame] | 98 | /* try legacy bindings */ |
| 99 | np = of_find_node_by_name(NULL, "data-only"); |
| 100 | if (!np) { |
| 101 | printk(KERN_ERR "Cannot find CPM muram data node"); |
| 102 | ret = -ENODEV; |
| 103 | goto out; |
| 104 | } |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | muram_pbase = of_translate_address(np, zero); |
| 108 | if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) { |
| 109 | printk(KERN_ERR "Cannot translate zero through CPM muram node"); |
| 110 | ret = -ENODEV; |
| 111 | goto out; |
| 112 | } |
| 113 | |
| 114 | while (of_address_to_resource(np, i++, &r) == 0) { |
| 115 | if (r.end > max) |
| 116 | max = r.end; |
| 117 | |
| 118 | rh_attach_region(&cpm_muram_info, r.start - muram_pbase, |
Joe Perches | 28f65c11 | 2011-06-09 09:13:32 -0700 | [diff] [blame] | 119 | resource_size(&r)); |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 120 | } |
| 121 | |
| 122 | muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1); |
| 123 | if (!muram_vbase) { |
| 124 | printk(KERN_ERR "Cannot map CPM muram"); |
| 125 | ret = -ENOMEM; |
| 126 | } |
| 127 | |
| 128 | out: |
| 129 | of_node_put(np); |
| 130 | return ret; |
| 131 | } |
| 132 | |
| 133 | /** |
| 134 | * cpm_muram_alloc - allocate the requested size worth of multi-user ram |
| 135 | * @size: number of bytes to allocate |
| 136 | * @align: requested alignment, in bytes |
| 137 | * |
| 138 | * This function returns an offset into the muram area. |
| 139 | * Use cpm_dpram_addr() to get the virtual address of the area. |
| 140 | * Use cpm_muram_free() to free the allocation. |
| 141 | */ |
| 142 | unsigned long cpm_muram_alloc(unsigned long size, unsigned long align) |
| 143 | { |
| 144 | unsigned long start; |
| 145 | unsigned long flags; |
| 146 | |
| 147 | spin_lock_irqsave(&cpm_muram_lock, flags); |
| 148 | cpm_muram_info.alignment = align; |
| 149 | start = rh_alloc(&cpm_muram_info, size, "commproc"); |
Hongjun Chen | 1661e5b | 2010-03-26 16:43:46 +0800 | [diff] [blame] | 150 | memset(cpm_muram_addr(start), 0, size); |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 151 | spin_unlock_irqrestore(&cpm_muram_lock, flags); |
| 152 | |
| 153 | return start; |
| 154 | } |
| 155 | EXPORT_SYMBOL(cpm_muram_alloc); |
| 156 | |
| 157 | /** |
| 158 | * cpm_muram_free - free a chunk of multi-user ram |
| 159 | * @offset: The beginning of the chunk as returned by cpm_muram_alloc(). |
| 160 | */ |
| 161 | int cpm_muram_free(unsigned long offset) |
| 162 | { |
| 163 | int ret; |
| 164 | unsigned long flags; |
| 165 | |
| 166 | spin_lock_irqsave(&cpm_muram_lock, flags); |
| 167 | ret = rh_free(&cpm_muram_info, offset); |
| 168 | spin_unlock_irqrestore(&cpm_muram_lock, flags); |
| 169 | |
| 170 | return ret; |
| 171 | } |
| 172 | EXPORT_SYMBOL(cpm_muram_free); |
| 173 | |
| 174 | /** |
| 175 | * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram |
| 176 | * @offset: the offset into the muram area to reserve |
| 177 | * @size: the number of bytes to reserve |
| 178 | * |
| 179 | * This function returns "start" on success, -ENOMEM on failure. |
| 180 | * Use cpm_dpram_addr() to get the virtual address of the area. |
| 181 | * Use cpm_muram_free() to free the allocation. |
| 182 | */ |
| 183 | unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size) |
| 184 | { |
| 185 | unsigned long start; |
| 186 | unsigned long flags; |
| 187 | |
| 188 | spin_lock_irqsave(&cpm_muram_lock, flags); |
| 189 | cpm_muram_info.alignment = 1; |
| 190 | start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc"); |
| 191 | spin_unlock_irqrestore(&cpm_muram_lock, flags); |
| 192 | |
| 193 | return start; |
| 194 | } |
| 195 | EXPORT_SYMBOL(cpm_muram_alloc_fixed); |
| 196 | |
| 197 | /** |
| 198 | * cpm_muram_addr - turn a muram offset into a virtual address |
| 199 | * @offset: muram offset to convert |
| 200 | */ |
| 201 | void __iomem *cpm_muram_addr(unsigned long offset) |
| 202 | { |
| 203 | return muram_vbase + offset; |
| 204 | } |
| 205 | EXPORT_SYMBOL(cpm_muram_addr); |
| 206 | |
Anton Vorontsov | 5093bb9 | 2008-05-23 20:39:06 +0400 | [diff] [blame] | 207 | unsigned long cpm_muram_offset(void __iomem *addr) |
| 208 | { |
| 209 | return addr - (void __iomem *)muram_vbase; |
| 210 | } |
| 211 | EXPORT_SYMBOL(cpm_muram_offset); |
| 212 | |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 213 | /** |
Scott Wood | 4c011b1fb | 2007-10-12 15:19:11 -0500 | [diff] [blame] | 214 | * cpm_muram_dma - turn a muram virtual address into a DMA address |
Scott Wood | 15f8c60 | 2007-09-28 14:06:16 -0500 | [diff] [blame] | 215 | * @offset: virtual address from cpm_muram_addr() to convert |
| 216 | */ |
| 217 | dma_addr_t cpm_muram_dma(void __iomem *addr) |
| 218 | { |
| 219 | return muram_pbase + ((u8 __iomem *)addr - muram_vbase); |
| 220 | } |
| 221 | EXPORT_SYMBOL(cpm_muram_dma); |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 222 | |
| 223 | #if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO) |
| 224 | |
| 225 | struct cpm2_ioports { |
| 226 | u32 dir, par, sor, odr, dat; |
| 227 | u32 res[3]; |
| 228 | }; |
| 229 | |
| 230 | struct cpm2_gpio32_chip { |
| 231 | struct of_mm_gpio_chip mm_gc; |
| 232 | spinlock_t lock; |
| 233 | |
| 234 | /* shadowed data register to clear/set bits safely */ |
| 235 | u32 cpdata; |
| 236 | }; |
| 237 | |
| 238 | static inline struct cpm2_gpio32_chip * |
| 239 | to_cpm2_gpio32_chip(struct of_mm_gpio_chip *mm_gc) |
| 240 | { |
| 241 | return container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc); |
| 242 | } |
| 243 | |
| 244 | static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) |
| 245 | { |
| 246 | struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); |
| 247 | struct cpm2_ioports __iomem *iop = mm_gc->regs; |
| 248 | |
| 249 | cpm2_gc->cpdata = in_be32(&iop->dat); |
| 250 | } |
| 251 | |
| 252 | static int cpm2_gpio32_get(struct gpio_chip *gc, unsigned int gpio) |
| 253 | { |
| 254 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); |
| 255 | struct cpm2_ioports __iomem *iop = mm_gc->regs; |
| 256 | u32 pin_mask; |
| 257 | |
| 258 | pin_mask = 1 << (31 - gpio); |
| 259 | |
| 260 | return !!(in_be32(&iop->dat) & pin_mask); |
| 261 | } |
| 262 | |
Laurent Pinchart | 639d644 | 2008-08-19 14:20:23 +0200 | [diff] [blame] | 263 | static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask, |
| 264 | int value) |
| 265 | { |
| 266 | struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); |
| 267 | struct cpm2_ioports __iomem *iop = mm_gc->regs; |
| 268 | |
| 269 | if (value) |
| 270 | cpm2_gc->cpdata |= pin_mask; |
| 271 | else |
| 272 | cpm2_gc->cpdata &= ~pin_mask; |
| 273 | |
| 274 | out_be32(&iop->dat, cpm2_gc->cpdata); |
| 275 | } |
| 276 | |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 277 | static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value) |
| 278 | { |
| 279 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); |
| 280 | struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); |
Laurent Pinchart | 639d644 | 2008-08-19 14:20:23 +0200 | [diff] [blame] | 281 | unsigned long flags; |
| 282 | u32 pin_mask = 1 << (31 - gpio); |
| 283 | |
| 284 | spin_lock_irqsave(&cpm2_gc->lock, flags); |
| 285 | |
| 286 | __cpm2_gpio32_set(mm_gc, pin_mask, value); |
| 287 | |
| 288 | spin_unlock_irqrestore(&cpm2_gc->lock, flags); |
| 289 | } |
| 290 | |
| 291 | static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) |
| 292 | { |
| 293 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); |
| 294 | struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 295 | struct cpm2_ioports __iomem *iop = mm_gc->regs; |
| 296 | unsigned long flags; |
| 297 | u32 pin_mask = 1 << (31 - gpio); |
| 298 | |
| 299 | spin_lock_irqsave(&cpm2_gc->lock, flags); |
| 300 | |
Laurent Pinchart | 639d644 | 2008-08-19 14:20:23 +0200 | [diff] [blame] | 301 | setbits32(&iop->dir, pin_mask); |
| 302 | __cpm2_gpio32_set(mm_gc, pin_mask, val); |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 303 | |
| 304 | spin_unlock_irqrestore(&cpm2_gc->lock, flags); |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 305 | |
| 306 | return 0; |
| 307 | } |
| 308 | |
| 309 | static int cpm2_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio) |
| 310 | { |
| 311 | struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); |
Laurent Pinchart | 639d644 | 2008-08-19 14:20:23 +0200 | [diff] [blame] | 312 | struct cpm2_gpio32_chip *cpm2_gc = to_cpm2_gpio32_chip(mm_gc); |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 313 | struct cpm2_ioports __iomem *iop = mm_gc->regs; |
Laurent Pinchart | 639d644 | 2008-08-19 14:20:23 +0200 | [diff] [blame] | 314 | unsigned long flags; |
| 315 | u32 pin_mask = 1 << (31 - gpio); |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 316 | |
Laurent Pinchart | 639d644 | 2008-08-19 14:20:23 +0200 | [diff] [blame] | 317 | spin_lock_irqsave(&cpm2_gc->lock, flags); |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 318 | |
| 319 | clrbits32(&iop->dir, pin_mask); |
| 320 | |
Laurent Pinchart | 639d644 | 2008-08-19 14:20:23 +0200 | [diff] [blame] | 321 | spin_unlock_irqrestore(&cpm2_gc->lock, flags); |
| 322 | |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 323 | return 0; |
| 324 | } |
| 325 | |
| 326 | int cpm2_gpiochip_add32(struct device_node *np) |
| 327 | { |
| 328 | struct cpm2_gpio32_chip *cpm2_gc; |
| 329 | struct of_mm_gpio_chip *mm_gc; |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 330 | struct gpio_chip *gc; |
| 331 | |
| 332 | cpm2_gc = kzalloc(sizeof(*cpm2_gc), GFP_KERNEL); |
| 333 | if (!cpm2_gc) |
| 334 | return -ENOMEM; |
| 335 | |
| 336 | spin_lock_init(&cpm2_gc->lock); |
| 337 | |
| 338 | mm_gc = &cpm2_gc->mm_gc; |
Anton Vorontsov | a19e3da | 2010-06-08 07:48:16 -0600 | [diff] [blame] | 339 | gc = &mm_gc->gc; |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 340 | |
| 341 | mm_gc->save_regs = cpm2_gpio32_save_regs; |
Laurent Pinchart | e193325 | 2008-07-28 10:43:22 +0200 | [diff] [blame] | 342 | gc->ngpio = 32; |
| 343 | gc->direction_input = cpm2_gpio32_dir_in; |
| 344 | gc->direction_output = cpm2_gpio32_dir_out; |
| 345 | gc->get = cpm2_gpio32_get; |
| 346 | gc->set = cpm2_gpio32_set; |
| 347 | |
| 348 | return of_mm_gpiochip_add(np, mm_gc); |
| 349 | } |
| 350 | #endif /* CONFIG_CPM2 || CONFIG_8xx_GPIO */ |