Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved. |
| 7 | * |
| 8 | * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for |
| 9 | * a description of how these routines should be used. |
| 10 | */ |
| 11 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame^] | 12 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/module.h> |
FUJITA Tomonori | b4391dd | 2009-01-05 23:36:10 +0900 | [diff] [blame] | 14 | #include <linux/dma-mapping.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <asm/dma.h> |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 16 | #include <asm/sn/intr.h> |
Mark Maule | 9b08ebd | 2005-04-25 11:32:16 -0700 | [diff] [blame] | 17 | #include <asm/sn/pcibus_provider_defs.h> |
| 18 | #include <asm/sn/pcidev.h> |
Prarit Bhargava | c13cf37 | 2005-07-06 15:26:51 -0700 | [diff] [blame] | 19 | #include <asm/sn/sn_sal.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Jens Axboe | 58b053e | 2007-10-22 20:02:46 +0200 | [diff] [blame] | 21 | #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) |
| 23 | |
| 24 | /** |
| 25 | * sn_dma_supported - test a DMA mask |
| 26 | * @dev: device to test |
| 27 | * @mask: DMA mask to test |
| 28 | * |
| 29 | * Return whether the given PCI device DMA address mask can be supported |
| 30 | * properly. For example, if your device can only drive the low 24-bits |
| 31 | * during PCI bus mastering, then you would pass 0x00ffffff as the mask to |
| 32 | * this function. Of course, SN only supports devices that have 32 or more |
| 33 | * address bits when using the PMU. |
| 34 | */ |
FUJITA Tomonori | cdc28d5 | 2009-01-05 23:36:15 +0900 | [diff] [blame] | 35 | static int sn_dma_supported(struct device *dev, u64 mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | { |
| 37 | BUG_ON(dev->bus != &pci_bus_type); |
| 38 | |
| 39 | if (mask < 0x7fffffff) |
| 40 | return 0; |
| 41 | return 1; |
| 42 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
| 44 | /** |
| 45 | * sn_dma_set_mask - set the DMA mask |
| 46 | * @dev: device to set |
| 47 | * @dma_mask: new mask |
| 48 | * |
| 49 | * Set @dev's DMA mask if the hw supports it. |
| 50 | */ |
| 51 | int sn_dma_set_mask(struct device *dev, u64 dma_mask) |
| 52 | { |
| 53 | BUG_ON(dev->bus != &pci_bus_type); |
| 54 | |
| 55 | if (!sn_dma_supported(dev, dma_mask)) |
| 56 | return 0; |
| 57 | |
| 58 | *dev->dma_mask = dma_mask; |
| 59 | return 1; |
| 60 | } |
| 61 | EXPORT_SYMBOL(sn_dma_set_mask); |
| 62 | |
| 63 | /** |
| 64 | * sn_dma_alloc_coherent - allocate memory for coherent DMA |
| 65 | * @dev: device to allocate for |
| 66 | * @size: size of the region |
| 67 | * @dma_handle: DMA (bus) address |
| 68 | * @flags: memory allocation flags |
| 69 | * |
| 70 | * dma_alloc_coherent() returns a pointer to a memory region suitable for |
| 71 | * coherent DMA traffic to/from a PCI device. On SN platforms, this means |
| 72 | * that @dma_handle will have the %PCIIO_DMA_CMD flag set. |
| 73 | * |
| 74 | * This interface is usually used for "command" streams (e.g. the command |
| 75 | * queue for a SCSI controller). See Documentation/DMA-API.txt for |
| 76 | * more information. |
| 77 | */ |
FUJITA Tomonori | cdc28d5 | 2009-01-05 23:36:15 +0900 | [diff] [blame] | 78 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, |
| 79 | dma_addr_t * dma_handle, gfp_t flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | { |
| 81 | void *cpuaddr; |
| 82 | unsigned long phys_addr; |
Christoph Lameter | 7c2a6c6 | 2005-07-12 16:03:00 -0700 | [diff] [blame] | 83 | int node; |
Mark Maule | e955d82 | 2005-04-25 11:26:03 -0700 | [diff] [blame] | 84 | struct pci_dev *pdev = to_pci_dev(dev); |
| 85 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | |
| 87 | BUG_ON(dev->bus != &pci_bus_type); |
| 88 | |
| 89 | /* |
| 90 | * Allocate the memory. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | */ |
Christoph Lameter | 7c2a6c6 | 2005-07-12 16:03:00 -0700 | [diff] [blame] | 92 | node = pcibus_to_node(pdev->bus); |
| 93 | if (likely(node >=0)) { |
Mel Gorman | 6484eb3 | 2009-06-16 15:31:54 -0700 | [diff] [blame] | 94 | struct page *p = alloc_pages_exact_node(node, |
| 95 | flags, get_order(size)); |
Christoph Lameter | 7c2a6c6 | 2005-07-12 16:03:00 -0700 | [diff] [blame] | 96 | |
| 97 | if (likely(p)) |
| 98 | cpuaddr = page_address(p); |
| 99 | else |
| 100 | return NULL; |
| 101 | } else |
Takashi Iwai | dc64161 | 2006-01-24 14:30:56 -0800 | [diff] [blame] | 102 | cpuaddr = (void *)__get_free_pages(flags, get_order(size)); |
Christoph Lameter | 7c2a6c6 | 2005-07-12 16:03:00 -0700 | [diff] [blame] | 103 | |
| 104 | if (unlikely(!cpuaddr)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | return NULL; |
| 106 | |
| 107 | memset(cpuaddr, 0x0, size); |
| 108 | |
| 109 | /* physical addr. of the memory we just got */ |
| 110 | phys_addr = __pa(cpuaddr); |
| 111 | |
| 112 | /* |
| 113 | * 64 bit address translations should never fail. |
| 114 | * 32 bit translations can fail if there are insufficient mapping |
| 115 | * resources. |
| 116 | */ |
| 117 | |
Mark Maule | 83821d3 | 2006-04-14 16:03:54 -0500 | [diff] [blame] | 118 | *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, |
| 119 | SN_DMA_ADDR_PHYS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | if (!*dma_handle) { |
Harvey Harrison | d4ed808 | 2008-03-04 15:15:00 -0800 | [diff] [blame] | 121 | printk(KERN_ERR "%s: out of ATEs\n", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | free_pages((unsigned long)cpuaddr, get_order(size)); |
| 123 | return NULL; |
| 124 | } |
| 125 | |
| 126 | return cpuaddr; |
| 127 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
| 129 | /** |
| 130 | * sn_pci_free_coherent - free memory associated with coherent DMAable region |
| 131 | * @dev: device to free for |
| 132 | * @size: size to free |
| 133 | * @cpu_addr: kernel virtual address to free |
| 134 | * @dma_handle: DMA address associated with this region |
| 135 | * |
| 136 | * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping |
| 137 | * any associated IOMMU mappings. |
| 138 | */ |
FUJITA Tomonori | cdc28d5 | 2009-01-05 23:36:15 +0900 | [diff] [blame] | 139 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
| 140 | dma_addr_t dma_handle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | { |
Mark Maule | e955d82 | 2005-04-25 11:26:03 -0700 | [diff] [blame] | 142 | struct pci_dev *pdev = to_pci_dev(dev); |
| 143 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | |
| 145 | BUG_ON(dev->bus != &pci_bus_type); |
| 146 | |
Mark Maule | e955d82 | 2005-04-25 11:26:03 -0700 | [diff] [blame] | 147 | provider->dma_unmap(pdev, dma_handle, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | free_pages((unsigned long)cpu_addr, get_order(size)); |
| 149 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
| 151 | /** |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 152 | * sn_dma_map_single_attrs - map a single page for DMA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | * @dev: device to map for |
| 154 | * @cpu_addr: kernel virtual address of the region to map |
| 155 | * @size: size of the region |
| 156 | * @direction: DMA direction |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 157 | * @attrs: optional dma attributes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | * |
| 159 | * Map the region pointed to by @cpu_addr for DMA and return the |
| 160 | * DMA address. |
| 161 | * |
| 162 | * We map this to the one step pcibr_dmamap_trans interface rather than |
| 163 | * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have |
| 164 | * no way of saving the dmamap handle from the alloc to later free |
| 165 | * (which is pretty much unacceptable). |
| 166 | * |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 167 | * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with |
| 168 | * dma_map_consistent() so that writes force a flush of pending DMA. |
| 169 | * (See "SGI Altix Architecture Considerations for Linux Device Drivers", |
| 170 | * Document Number: 007-4763-001) |
| 171 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | * TODO: simplify our interface; |
| 173 | * figure out how to save dmamap handle so can use two step. |
| 174 | */ |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 175 | static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, |
| 176 | unsigned long offset, size_t size, |
| 177 | enum dma_data_direction dir, |
| 178 | struct dma_attrs *attrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 180 | void *cpu_addr = page_address(page) + offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | dma_addr_t dma_addr; |
| 182 | unsigned long phys_addr; |
Mark Maule | e955d82 | 2005-04-25 11:26:03 -0700 | [diff] [blame] | 183 | struct pci_dev *pdev = to_pci_dev(dev); |
| 184 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 185 | int dmabarr; |
| 186 | |
| 187 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
| 189 | BUG_ON(dev->bus != &pci_bus_type); |
| 190 | |
| 191 | phys_addr = __pa(cpu_addr); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 192 | if (dmabarr) |
| 193 | dma_addr = provider->dma_map_consistent(pdev, phys_addr, |
| 194 | size, SN_DMA_ADDR_PHYS); |
| 195 | else |
| 196 | dma_addr = provider->dma_map(pdev, phys_addr, size, |
| 197 | SN_DMA_ADDR_PHYS); |
| 198 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | if (!dma_addr) { |
Harvey Harrison | d4ed808 | 2008-03-04 15:15:00 -0800 | [diff] [blame] | 200 | printk(KERN_ERR "%s: out of ATEs\n", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | return 0; |
| 202 | } |
| 203 | return dma_addr; |
| 204 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | |
| 206 | /** |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 207 | * sn_dma_unmap_single_attrs - unamp a DMA mapped page |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | * @dev: device to sync |
| 209 | * @dma_addr: DMA address to sync |
| 210 | * @size: size of region |
| 211 | * @direction: DMA direction |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 212 | * @attrs: optional dma attributes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | * |
| 214 | * This routine is supposed to sync the DMA region specified |
| 215 | * by @dma_handle into the coherence domain. On SN, we're always cache |
| 216 | * coherent, so we just need to free any ATEs associated with this mapping. |
| 217 | */ |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 218 | static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, |
| 219 | size_t size, enum dma_data_direction dir, |
| 220 | struct dma_attrs *attrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | { |
Mark Maule | e955d82 | 2005-04-25 11:26:03 -0700 | [diff] [blame] | 222 | struct pci_dev *pdev = to_pci_dev(dev); |
| 223 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | |
| 225 | BUG_ON(dev->bus != &pci_bus_type); |
Mark Maule | e955d82 | 2005-04-25 11:26:03 -0700 | [diff] [blame] | 226 | |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 227 | provider->dma_unmap(pdev, dma_addr, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | |
| 230 | /** |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 231 | * sn_dma_unmap_sg - unmap a DMA scatterlist |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | * @dev: device to unmap |
| 233 | * @sg: scatterlist to unmap |
| 234 | * @nhwentries: number of scatterlist entries |
| 235 | * @direction: DMA direction |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 236 | * @attrs: optional dma attributes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | * |
| 238 | * Unmap a set of streaming mode DMA translations. |
| 239 | */ |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 240 | static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, |
| 241 | int nhwentries, enum dma_data_direction dir, |
| 242 | struct dma_attrs *attrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | { |
| 244 | int i; |
Mark Maule | e955d82 | 2005-04-25 11:26:03 -0700 | [diff] [blame] | 245 | struct pci_dev *pdev = to_pci_dev(dev); |
| 246 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
Jens Axboe | 9b6eccf | 2007-10-16 11:27:26 +0200 | [diff] [blame] | 247 | struct scatterlist *sg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | |
| 249 | BUG_ON(dev->bus != &pci_bus_type); |
| 250 | |
Jens Axboe | 9b6eccf | 2007-10-16 11:27:26 +0200 | [diff] [blame] | 251 | for_each_sg(sgl, sg, nhwentries, i) { |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 252 | provider->dma_unmap(pdev, sg->dma_address, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | sg->dma_address = (dma_addr_t) NULL; |
| 254 | sg->dma_length = 0; |
| 255 | } |
| 256 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
| 258 | /** |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 259 | * sn_dma_map_sg - map a scatterlist for DMA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | * @dev: device to map for |
| 261 | * @sg: scatterlist to map |
| 262 | * @nhwentries: number of entries |
| 263 | * @direction: direction of the DMA transaction |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 264 | * @attrs: optional dma attributes |
| 265 | * |
| 266 | * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with |
| 267 | * dma_map_consistent() so that writes force a flush of pending DMA. |
| 268 | * (See "SGI Altix Architecture Considerations for Linux Device Drivers", |
| 269 | * Document Number: 007-4763-001) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | * |
| 271 | * Maps each entry of @sg for DMA. |
| 272 | */ |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 273 | static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, |
| 274 | int nhwentries, enum dma_data_direction dir, |
| 275 | struct dma_attrs *attrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | { |
| 277 | unsigned long phys_addr; |
Jens Axboe | 9b6eccf | 2007-10-16 11:27:26 +0200 | [diff] [blame] | 278 | struct scatterlist *saved_sg = sgl, *sg; |
Mark Maule | e955d82 | 2005-04-25 11:26:03 -0700 | [diff] [blame] | 279 | struct pci_dev *pdev = to_pci_dev(dev); |
| 280 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | int i; |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 282 | int dmabarr; |
| 283 | |
| 284 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | |
| 286 | BUG_ON(dev->bus != &pci_bus_type); |
| 287 | |
| 288 | /* |
| 289 | * Setup a DMA address for each entry in the scatterlist. |
| 290 | */ |
Jens Axboe | 9b6eccf | 2007-10-16 11:27:26 +0200 | [diff] [blame] | 291 | for_each_sg(sgl, sg, nhwentries, i) { |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 292 | dma_addr_t dma_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 294 | if (dmabarr) |
| 295 | dma_addr = provider->dma_map_consistent(pdev, |
| 296 | phys_addr, |
| 297 | sg->length, |
| 298 | SN_DMA_ADDR_PHYS); |
| 299 | else |
| 300 | dma_addr = provider->dma_map(pdev, phys_addr, |
| 301 | sg->length, |
| 302 | SN_DMA_ADDR_PHYS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | |
Arthur Kepner | 309df0c | 2008-04-29 01:00:32 -0700 | [diff] [blame] | 304 | sg->dma_address = dma_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | if (!sg->dma_address) { |
Harvey Harrison | d4ed808 | 2008-03-04 15:15:00 -0800 | [diff] [blame] | 306 | printk(KERN_ERR "%s: out of ATEs\n", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | |
| 308 | /* |
| 309 | * Free any successfully allocated entries. |
| 310 | */ |
| 311 | if (i > 0) |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 312 | sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | return 0; |
| 314 | } |
| 315 | |
| 316 | sg->dma_length = sg->length; |
| 317 | } |
| 318 | |
| 319 | return nhwentries; |
| 320 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | |
FUJITA Tomonori | cdc28d5 | 2009-01-05 23:36:15 +0900 | [diff] [blame] | 322 | static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 323 | size_t size, enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | { |
| 325 | BUG_ON(dev->bus != &pci_bus_type); |
| 326 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | |
FUJITA Tomonori | cdc28d5 | 2009-01-05 23:36:15 +0900 | [diff] [blame] | 328 | static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 329 | size_t size, |
| 330 | enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | { |
| 332 | BUG_ON(dev->bus != &pci_bus_type); |
| 333 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | |
FUJITA Tomonori | cdc28d5 | 2009-01-05 23:36:15 +0900 | [diff] [blame] | 335 | static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 336 | int nelems, enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | { |
| 338 | BUG_ON(dev->bus != &pci_bus_type); |
| 339 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | |
FUJITA Tomonori | cdc28d5 | 2009-01-05 23:36:15 +0900 | [diff] [blame] | 341 | static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 342 | int nelems, enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | { |
| 344 | BUG_ON(dev->bus != &pci_bus_type); |
| 345 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | |
FUJITA Tomonori | cdc28d5 | 2009-01-05 23:36:15 +0900 | [diff] [blame] | 347 | static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | { |
| 349 | return 0; |
| 350 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | |
John Keller | 175add1 | 2008-11-24 16:47:17 -0600 | [diff] [blame] | 352 | u64 sn_dma_get_required_mask(struct device *dev) |
| 353 | { |
Yang Hongyang | 6a35528 | 2009-04-06 19:01:13 -0700 | [diff] [blame] | 354 | return DMA_BIT_MASK(64); |
John Keller | 175add1 | 2008-11-24 16:47:17 -0600 | [diff] [blame] | 355 | } |
| 356 | EXPORT_SYMBOL_GPL(sn_dma_get_required_mask); |
| 357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | char *sn_pci_get_legacy_mem(struct pci_bus *bus) |
| 359 | { |
| 360 | if (!SN_PCIBUS_BUSSOFT(bus)) |
| 361 | return ERR_PTR(-ENODEV); |
| 362 | |
| 363 | return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET); |
| 364 | } |
| 365 | |
| 366 | int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size) |
| 367 | { |
| 368 | unsigned long addr; |
| 369 | int ret; |
Mark Maule | 61b9cf7 | 2005-09-23 12:31:53 -0500 | [diff] [blame] | 370 | struct ia64_sal_retval isrv; |
| 371 | |
| 372 | /* |
| 373 | * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work |
| 374 | * around hw issues at the pci bus level. SGI proms older than |
Simon Arlott | 72fdbdc | 2007-05-11 14:55:43 -0700 | [diff] [blame] | 375 | * 4.10 don't implement this. |
Mark Maule | 61b9cf7 | 2005-09-23 12:31:53 -0500 | [diff] [blame] | 376 | */ |
| 377 | |
| 378 | SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, |
Jes Sorensen | 8ed9b2c | 2006-02-13 05:29:57 -0500 | [diff] [blame] | 379 | pci_domain_nr(bus), bus->number, |
| 380 | 0, /* io */ |
| 381 | 0, /* read */ |
| 382 | port, size, __pa(val)); |
Mark Maule | 61b9cf7 | 2005-09-23 12:31:53 -0500 | [diff] [blame] | 383 | |
| 384 | if (isrv.status == 0) |
| 385 | return size; |
| 386 | |
| 387 | /* |
| 388 | * If the above failed, retry using the SAL_PROBE call which should |
| 389 | * be present in all proms (but which cannot work round PCI chipset |
Simon Arlott | 72fdbdc | 2007-05-11 14:55:43 -0700 | [diff] [blame] | 390 | * bugs). This code is retained for compatibility with old |
Mark Maule | 61b9cf7 | 2005-09-23 12:31:53 -0500 | [diff] [blame] | 391 | * pre-4.10 proms, and should be removed at some point in the future. |
| 392 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | |
| 394 | if (!SN_PCIBUS_BUSSOFT(bus)) |
| 395 | return -ENODEV; |
| 396 | |
| 397 | addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; |
| 398 | addr += port; |
| 399 | |
| 400 | ret = ia64_sn_probe_mem(addr, (long)size, (void *)val); |
| 401 | |
| 402 | if (ret == 2) |
| 403 | return -EINVAL; |
| 404 | |
| 405 | if (ret == 1) |
| 406 | *val = -1; |
| 407 | |
| 408 | return size; |
| 409 | } |
| 410 | |
| 411 | int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size) |
| 412 | { |
| 413 | int ret = size; |
| 414 | unsigned long paddr; |
| 415 | unsigned long *addr; |
Mark Maule | 61b9cf7 | 2005-09-23 12:31:53 -0500 | [diff] [blame] | 416 | struct ia64_sal_retval isrv; |
| 417 | |
| 418 | /* |
| 419 | * First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work |
| 420 | * around hw issues at the pci bus level. SGI proms older than |
Simon Arlott | 72fdbdc | 2007-05-11 14:55:43 -0700 | [diff] [blame] | 421 | * 4.10 don't implement this. |
Mark Maule | 61b9cf7 | 2005-09-23 12:31:53 -0500 | [diff] [blame] | 422 | */ |
| 423 | |
| 424 | SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE, |
Jes Sorensen | 8ed9b2c | 2006-02-13 05:29:57 -0500 | [diff] [blame] | 425 | pci_domain_nr(bus), bus->number, |
| 426 | 0, /* io */ |
| 427 | 1, /* write */ |
| 428 | port, size, __pa(&val)); |
Mark Maule | 61b9cf7 | 2005-09-23 12:31:53 -0500 | [diff] [blame] | 429 | |
| 430 | if (isrv.status == 0) |
| 431 | return size; |
| 432 | |
| 433 | /* |
| 434 | * If the above failed, retry using the SAL_PROBE call which should |
| 435 | * be present in all proms (but which cannot work round PCI chipset |
Simon Arlott | 72fdbdc | 2007-05-11 14:55:43 -0700 | [diff] [blame] | 436 | * bugs). This code is retained for compatibility with old |
Mark Maule | 61b9cf7 | 2005-09-23 12:31:53 -0500 | [diff] [blame] | 437 | * pre-4.10 proms, and should be removed at some point in the future. |
| 438 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | |
| 440 | if (!SN_PCIBUS_BUSSOFT(bus)) { |
| 441 | ret = -ENODEV; |
| 442 | goto out; |
| 443 | } |
| 444 | |
| 445 | /* Put the phys addr in uncached space */ |
| 446 | paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET; |
| 447 | paddr += port; |
| 448 | addr = (unsigned long *)paddr; |
| 449 | |
| 450 | switch (size) { |
| 451 | case 1: |
| 452 | *(volatile u8 *)(addr) = (u8)(val); |
| 453 | break; |
| 454 | case 2: |
| 455 | *(volatile u16 *)(addr) = (u16)(val); |
| 456 | break; |
| 457 | case 4: |
| 458 | *(volatile u32 *)(addr) = (u32)(val); |
| 459 | break; |
| 460 | default: |
| 461 | ret = -EINVAL; |
| 462 | break; |
| 463 | } |
| 464 | out: |
| 465 | return ret; |
| 466 | } |
FUJITA Tomonori | b4391dd | 2009-01-05 23:36:10 +0900 | [diff] [blame] | 467 | |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 468 | static struct dma_map_ops sn_dma_ops = { |
FUJITA Tomonori | b4391dd | 2009-01-05 23:36:10 +0900 | [diff] [blame] | 469 | .alloc_coherent = sn_dma_alloc_coherent, |
| 470 | .free_coherent = sn_dma_free_coherent, |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 471 | .map_page = sn_dma_map_page, |
| 472 | .unmap_page = sn_dma_unmap_page, |
| 473 | .map_sg = sn_dma_map_sg, |
| 474 | .unmap_sg = sn_dma_unmap_sg, |
FUJITA Tomonori | b4391dd | 2009-01-05 23:36:10 +0900 | [diff] [blame] | 475 | .sync_single_for_cpu = sn_dma_sync_single_for_cpu, |
| 476 | .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu, |
| 477 | .sync_single_for_device = sn_dma_sync_single_for_device, |
| 478 | .sync_sg_for_device = sn_dma_sync_sg_for_device, |
| 479 | .mapping_error = sn_dma_mapping_error, |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 480 | .dma_supported = sn_dma_supported, |
FUJITA Tomonori | b4391dd | 2009-01-05 23:36:10 +0900 | [diff] [blame] | 481 | }; |
FUJITA Tomonori | 4d9b977 | 2009-01-05 23:36:12 +0900 | [diff] [blame] | 482 | |
| 483 | void sn_dma_init(void) |
| 484 | { |
| 485 | dma_ops = &sn_dma_ops; |
| 486 | } |