David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> |
| 7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> |
| 8 | * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> |
| 9 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
| 10 | * IP32 changes by Ilya. |
| 11 | * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on |
| 12 | * the kernels original. |
| 13 | */ |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/mm.h> |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 16 | #include <linux/module.h> |
| 17 | #include <linux/string.h> |
| 18 | #include <linux/dma-mapping.h> |
| 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/scatterlist.h> |
| 21 | |
| 22 | #include <linux/cache.h> |
| 23 | #include <linux/io.h> |
| 24 | |
| 25 | #include <asm/octeon/octeon.h> |
| 26 | #include <asm/octeon/cvmx-npi-defs.h> |
| 27 | #include <asm/octeon/cvmx-pci-defs.h> |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 28 | |
| 29 | #include <dma-coherence.h> |
| 30 | |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 31 | #ifdef CONFIG_PCI |
David Daney | 01a6221 | 2009-06-29 17:18:51 -0700 | [diff] [blame] | 32 | #include <asm/octeon/pci-octeon.h> |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 33 | #endif |
| 34 | |
| 35 | #define BAR2_PCI_ADDRESS 0x8000000000ul |
| 36 | |
| 37 | struct bar1_index_state { |
| 38 | int16_t ref_count; /* Number of PCI mappings using this index */ |
| 39 | uint16_t address_bits; /* Upper bits of physical address. This is |
| 40 | shifted 22 bits */ |
| 41 | }; |
| 42 | |
| 43 | #ifdef CONFIG_PCI |
David Daney | 541247f | 2010-02-18 11:48:20 -0800 | [diff] [blame] | 44 | static DEFINE_RAW_SPINLOCK(bar1_lock); |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 45 | static struct bar1_index_state bar1_state[32]; |
| 46 | #endif |
| 47 | |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 48 | dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) |
| 49 | { |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 50 | #ifndef CONFIG_PCI |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 51 | /* Without PCI/PCIe this function can be called for Octeon internal |
| 52 | devices such as USB. These devices all support 64bit addressing */ |
| 53 | mb(); |
| 54 | return virt_to_phys(ptr); |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 55 | #else |
| 56 | unsigned long flags; |
| 57 | uint64_t dma_mask; |
| 58 | int64_t start_index; |
| 59 | dma_addr_t result = -1; |
| 60 | uint64_t physical = virt_to_phys(ptr); |
| 61 | int64_t index; |
| 62 | |
| 63 | mb(); |
| 64 | /* |
| 65 | * Use the DMA masks to determine the allowed memory |
| 66 | * region. For us it doesn't limit the actual memory, just the |
| 67 | * address visible over PCI. Devices with limits need to use |
| 68 | * lower indexed Bar1 entries. |
| 69 | */ |
| 70 | if (dev) { |
| 71 | dma_mask = dev->coherent_dma_mask; |
| 72 | if (dev->dma_mask) |
| 73 | dma_mask = *dev->dma_mask; |
| 74 | } else { |
| 75 | dma_mask = 0xfffffffful; |
| 76 | } |
| 77 | |
| 78 | /* |
| 79 | * Platform devices, such as the internal USB, skip all |
| 80 | * translation and use Octeon physical addresses directly. |
| 81 | */ |
| 82 | if (!dev || dev->bus == &platform_bus_type) |
| 83 | return physical; |
| 84 | |
| 85 | switch (octeon_dma_bar_type) { |
| 86 | case OCTEON_DMA_BAR_TYPE_PCIE: |
| 87 | if (unlikely(physical < (16ul << 10))) |
| 88 | panic("dma_map_single: Not allowed to map first 16KB." |
| 89 | " It interferes with BAR0 special area\n"); |
| 90 | else if ((physical + size >= (256ul << 20)) && |
| 91 | (physical < (512ul << 20))) |
| 92 | panic("dma_map_single: Not allowed to map bootbus\n"); |
| 93 | else if ((physical + size >= 0x400000000ull) && |
| 94 | physical < 0x410000000ull) |
| 95 | panic("dma_map_single: " |
| 96 | "Attempt to map illegal memory address 0x%llx\n", |
| 97 | physical); |
| 98 | else if (physical >= 0x420000000ull) |
| 99 | panic("dma_map_single: " |
| 100 | "Attempt to map illegal memory address 0x%llx\n", |
| 101 | physical); |
David Daney | 2b5987a | 2010-08-04 14:53:57 -0700 | [diff] [blame] | 102 | else if (physical >= CVMX_PCIE_BAR1_PHYS_BASE && |
| 103 | physical + size < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE)) { |
| 104 | result = physical - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE; |
| 105 | |
| 106 | if (((result+size-1) & dma_mask) != result+size-1) |
| 107 | panic("dma_map_single: Attempt to map address 0x%llx-0x%llx, which can't be accessed according to the dma mask 0x%llx\n", |
| 108 | physical, physical+size-1, dma_mask); |
| 109 | goto done; |
| 110 | } |
| 111 | |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 112 | /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */ |
| 113 | if ((physical >= 0x410000000ull) && physical < 0x420000000ull) |
| 114 | result = physical - 0x400000000ull; |
| 115 | else |
| 116 | result = physical; |
| 117 | if (((result+size-1) & dma_mask) != result+size-1) |
| 118 | panic("dma_map_single: Attempt to map address " |
| 119 | "0x%llx-0x%llx, which can't be accessed " |
| 120 | "according to the dma mask 0x%llx\n", |
| 121 | physical, physical+size-1, dma_mask); |
| 122 | goto done; |
| 123 | |
| 124 | case OCTEON_DMA_BAR_TYPE_BIG: |
| 125 | #ifdef CONFIG_64BIT |
| 126 | /* If the device supports 64bit addressing, then use BAR2 */ |
| 127 | if (dma_mask > BAR2_PCI_ADDRESS) { |
| 128 | result = physical + BAR2_PCI_ADDRESS; |
| 129 | goto done; |
| 130 | } |
| 131 | #endif |
| 132 | if (unlikely(physical < (4ul << 10))) { |
| 133 | panic("dma_map_single: Not allowed to map first 4KB. " |
| 134 | "It interferes with BAR0 special area\n"); |
| 135 | } else if (physical < (256ul << 20)) { |
| 136 | if (unlikely(physical + size > (256ul << 20))) |
| 137 | panic("dma_map_single: Requested memory spans " |
| 138 | "Bar0 0:256MB and bootbus\n"); |
| 139 | result = physical; |
| 140 | goto done; |
| 141 | } else if (unlikely(physical < (512ul << 20))) { |
| 142 | panic("dma_map_single: Not allowed to map bootbus\n"); |
| 143 | } else if (physical < (2ul << 30)) { |
| 144 | if (unlikely(physical + size > (2ul << 30))) |
| 145 | panic("dma_map_single: Requested memory spans " |
| 146 | "Bar0 512MB:2GB and BAR1\n"); |
| 147 | result = physical; |
| 148 | goto done; |
| 149 | } else if (physical < (2ul << 30) + (128 << 20)) { |
| 150 | /* Fall through */ |
| 151 | } else if (physical < |
| 152 | (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) { |
| 153 | if (unlikely |
| 154 | (physical + size > |
| 155 | (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))) |
| 156 | panic("dma_map_single: Requested memory " |
| 157 | "extends past Bar1 (4GB-%luMB)\n", |
| 158 | OCTEON_PCI_BAR1_HOLE_SIZE); |
| 159 | result = physical; |
| 160 | goto done; |
| 161 | } else if ((physical >= 0x410000000ull) && |
| 162 | (physical < 0x420000000ull)) { |
| 163 | if (unlikely(physical + size > 0x420000000ull)) |
| 164 | panic("dma_map_single: Requested memory spans " |
| 165 | "non existant memory\n"); |
| 166 | /* BAR0 fixed mapping 256MB:512MB -> |
| 167 | * 16GB+256MB:16GB+512MB */ |
| 168 | result = physical - 0x400000000ull; |
| 169 | goto done; |
| 170 | } else { |
| 171 | /* Continued below switch statement */ |
| 172 | } |
| 173 | break; |
| 174 | |
| 175 | case OCTEON_DMA_BAR_TYPE_SMALL: |
| 176 | #ifdef CONFIG_64BIT |
| 177 | /* If the device supports 64bit addressing, then use BAR2 */ |
| 178 | if (dma_mask > BAR2_PCI_ADDRESS) { |
| 179 | result = physical + BAR2_PCI_ADDRESS; |
| 180 | goto done; |
| 181 | } |
| 182 | #endif |
| 183 | /* Continued below switch statement */ |
| 184 | break; |
| 185 | |
| 186 | default: |
| 187 | panic("dma_map_single: Invalid octeon_dma_bar_type\n"); |
| 188 | } |
| 189 | |
| 190 | /* Don't allow mapping to span multiple Bar entries. The hardware guys |
| 191 | won't guarantee that DMA across boards work */ |
| 192 | if (unlikely((physical >> 22) != ((physical + size - 1) >> 22))) |
| 193 | panic("dma_map_single: " |
| 194 | "Requested memory spans more than one Bar1 entry\n"); |
| 195 | |
| 196 | if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) |
| 197 | start_index = 31; |
| 198 | else if (unlikely(dma_mask < (1ul << 27))) |
| 199 | start_index = (dma_mask >> 22); |
| 200 | else |
| 201 | start_index = 31; |
| 202 | |
| 203 | /* Only one processor can access the Bar register at once */ |
David Daney | 541247f | 2010-02-18 11:48:20 -0800 | [diff] [blame] | 204 | raw_spin_lock_irqsave(&bar1_lock, flags); |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 205 | |
| 206 | /* Look through Bar1 for existing mapping that will work */ |
| 207 | for (index = start_index; index >= 0; index--) { |
| 208 | if ((bar1_state[index].address_bits == physical >> 22) && |
| 209 | (bar1_state[index].ref_count)) { |
| 210 | /* An existing mapping will work, use it */ |
| 211 | bar1_state[index].ref_count++; |
| 212 | if (unlikely(bar1_state[index].ref_count < 0)) |
| 213 | panic("dma_map_single: " |
| 214 | "Bar1[%d] reference count overflowed\n", |
| 215 | (int) index); |
| 216 | result = (index << 22) | (physical & ((1 << 22) - 1)); |
| 217 | /* Large BAR1 is offset at 2GB */ |
| 218 | if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) |
| 219 | result += 2ul << 30; |
| 220 | goto done_unlock; |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | /* No existing mappings, look for a free entry */ |
| 225 | for (index = start_index; index >= 0; index--) { |
| 226 | if (unlikely(bar1_state[index].ref_count == 0)) { |
| 227 | union cvmx_pci_bar1_indexx bar1_index; |
| 228 | /* We have a free entry, use it */ |
| 229 | bar1_state[index].ref_count = 1; |
| 230 | bar1_state[index].address_bits = physical >> 22; |
| 231 | bar1_index.u32 = 0; |
| 232 | /* Address bits[35:22] sent to L2C */ |
| 233 | bar1_index.s.addr_idx = physical >> 22; |
| 234 | /* Don't put PCI accesses in L2. */ |
| 235 | bar1_index.s.ca = 1; |
| 236 | /* Endian Swap Mode */ |
| 237 | bar1_index.s.end_swp = 1; |
| 238 | /* Set '1' when the selected address range is valid. */ |
| 239 | bar1_index.s.addr_v = 1; |
| 240 | octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), |
| 241 | bar1_index.u32); |
| 242 | /* An existing mapping will work, use it */ |
| 243 | result = (index << 22) | (physical & ((1 << 22) - 1)); |
| 244 | /* Large BAR1 is offset at 2GB */ |
| 245 | if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) |
| 246 | result += 2ul << 30; |
| 247 | goto done_unlock; |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | pr_err("dma_map_single: " |
| 252 | "Can't find empty BAR1 index for physical mapping 0x%llx\n", |
| 253 | (unsigned long long) physical); |
| 254 | |
| 255 | done_unlock: |
David Daney | 541247f | 2010-02-18 11:48:20 -0800 | [diff] [blame] | 256 | raw_spin_unlock_irqrestore(&bar1_lock, flags); |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 257 | done: |
| 258 | pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result); |
| 259 | return result; |
| 260 | #endif |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 261 | } |
| 262 | |
| 263 | void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) |
| 264 | { |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 265 | #ifndef CONFIG_PCI |
| 266 | /* |
| 267 | * Without PCI/PCIe this function can be called for Octeon internal |
| 268 | * devices such as USB. These devices all support 64bit addressing. |
| 269 | */ |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 270 | return; |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 271 | #else |
| 272 | unsigned long flags; |
| 273 | uint64_t index; |
| 274 | |
| 275 | /* |
| 276 | * Platform devices, such as the internal USB, skip all |
| 277 | * translation and use Octeon physical addresses directly. |
| 278 | */ |
| 279 | if (dev->bus == &platform_bus_type) |
| 280 | return; |
| 281 | |
| 282 | switch (octeon_dma_bar_type) { |
| 283 | case OCTEON_DMA_BAR_TYPE_PCIE: |
| 284 | /* Nothing to do, all mappings are static */ |
| 285 | goto done; |
| 286 | |
| 287 | case OCTEON_DMA_BAR_TYPE_BIG: |
| 288 | #ifdef CONFIG_64BIT |
| 289 | /* Nothing to do for addresses using BAR2 */ |
| 290 | if (dma_addr >= BAR2_PCI_ADDRESS) |
| 291 | goto done; |
| 292 | #endif |
| 293 | if (unlikely(dma_addr < (4ul << 10))) |
| 294 | panic("dma_unmap_single: Unexpect DMA address 0x%llx\n", |
| 295 | dma_addr); |
| 296 | else if (dma_addr < (2ul << 30)) |
| 297 | /* Nothing to do for addresses using BAR0 */ |
| 298 | goto done; |
| 299 | else if (dma_addr < (2ul << 30) + (128ul << 20)) |
| 300 | /* Need to unmap, fall through */ |
| 301 | index = (dma_addr - (2ul << 30)) >> 22; |
| 302 | else if (dma_addr < |
| 303 | (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) |
| 304 | goto done; /* Nothing to do for the rest of BAR1 */ |
| 305 | else |
| 306 | panic("dma_unmap_single: Unexpect DMA address 0x%llx\n", |
| 307 | dma_addr); |
| 308 | /* Continued below switch statement */ |
| 309 | break; |
| 310 | |
| 311 | case OCTEON_DMA_BAR_TYPE_SMALL: |
| 312 | #ifdef CONFIG_64BIT |
| 313 | /* Nothing to do for addresses using BAR2 */ |
| 314 | if (dma_addr >= BAR2_PCI_ADDRESS) |
| 315 | goto done; |
| 316 | #endif |
| 317 | index = dma_addr >> 22; |
| 318 | /* Continued below switch statement */ |
| 319 | break; |
| 320 | |
| 321 | default: |
| 322 | panic("dma_unmap_single: Invalid octeon_dma_bar_type\n"); |
| 323 | } |
| 324 | |
| 325 | if (unlikely(index > 31)) |
| 326 | panic("dma_unmap_single: " |
| 327 | "Attempt to unmap an invalid address (0x%llx)\n", |
| 328 | dma_addr); |
| 329 | |
David Daney | 541247f | 2010-02-18 11:48:20 -0800 | [diff] [blame] | 330 | raw_spin_lock_irqsave(&bar1_lock, flags); |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 331 | bar1_state[index].ref_count--; |
| 332 | if (bar1_state[index].ref_count == 0) |
| 333 | octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0); |
| 334 | else if (unlikely(bar1_state[index].ref_count < 0)) |
| 335 | panic("dma_unmap_single: Bar1[%u] reference count < 0\n", |
| 336 | (int) index); |
David Daney | 541247f | 2010-02-18 11:48:20 -0800 | [diff] [blame] | 337 | raw_spin_unlock_irqrestore(&bar1_lock, flags); |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 338 | done: |
| 339 | pr_debug("dma_unmap_single 0x%llx\n", dma_addr); |
| 340 | return; |
| 341 | #endif |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 342 | } |