Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 1 | /* |
| 2 | * Coherent per-device memory handling. |
| 3 | * Borrowed from i386 |
| 4 | */ |
Brian Starkey | 6b03ae0 | 2016-03-22 14:28:03 -0700 | [diff] [blame] | 5 | #include <linux/io.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 6 | #include <linux/slab.h> |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 7 | #include <linux/kernel.h> |
Paul Gortmaker | 08a999c | 2011-07-01 16:07:32 -0400 | [diff] [blame] | 8 | #include <linux/module.h> |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 9 | #include <linux/dma-mapping.h> |
| 10 | |
| 11 | struct dma_coherent_mem { |
| 12 | void *virt_base; |
Marin Mitov | ed1d218 | 2010-05-31 13:03:04 +0300 | [diff] [blame] | 13 | dma_addr_t device_base; |
Bjorn Helgaas | 88a984b | 2014-05-20 16:54:22 -0600 | [diff] [blame] | 14 | unsigned long pfn_base; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 15 | int size; |
| 16 | int flags; |
| 17 | unsigned long *bitmap; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 18 | spinlock_t spinlock; |
Vladimir Murzin | c41f9ea | 2017-06-26 10:18:57 +0100 | [diff] [blame^] | 19 | bool use_dev_dma_pfn_offset; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 20 | }; |
| 21 | |
Vladimir Murzin | c41f9ea | 2017-06-26 10:18:57 +0100 | [diff] [blame^] | 22 | static inline dma_addr_t dma_get_device_base(struct device *dev, |
| 23 | struct dma_coherent_mem * mem) |
| 24 | { |
| 25 | if (mem->use_dev_dma_pfn_offset) |
| 26 | return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT; |
| 27 | else |
| 28 | return mem->device_base; |
| 29 | } |
| 30 | |
Michal Nazarewicz | 9e5b3d6 | 2016-01-04 22:36:40 +0100 | [diff] [blame] | 31 | static bool dma_init_coherent_memory( |
| 32 | phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags, |
| 33 | struct dma_coherent_mem **mem) |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 34 | { |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 35 | struct dma_coherent_mem *dma_mem = NULL; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 36 | void __iomem *mem_base = NULL; |
| 37 | int pages = size >> PAGE_SHIFT; |
| 38 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); |
| 39 | |
| 40 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) |
| 41 | goto out; |
| 42 | if (!size) |
| 43 | goto out; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 44 | |
Brian Starkey | 6b03ae0 | 2016-03-22 14:28:03 -0700 | [diff] [blame] | 45 | if (flags & DMA_MEMORY_MAP) |
| 46 | mem_base = memremap(phys_addr, size, MEMREMAP_WC); |
| 47 | else |
| 48 | mem_base = ioremap(phys_addr, size); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 49 | if (!mem_base) |
| 50 | goto out; |
| 51 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 52 | dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); |
| 53 | if (!dma_mem) |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 54 | goto out; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 55 | dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); |
| 56 | if (!dma_mem->bitmap) |
| 57 | goto out; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 58 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 59 | dma_mem->virt_base = mem_base; |
| 60 | dma_mem->device_base = device_addr; |
| 61 | dma_mem->pfn_base = PFN_DOWN(phys_addr); |
| 62 | dma_mem->size = pages; |
| 63 | dma_mem->flags = flags; |
| 64 | spin_lock_init(&dma_mem->spinlock); |
| 65 | |
| 66 | *mem = dma_mem; |
Michal Nazarewicz | 9e5b3d6 | 2016-01-04 22:36:40 +0100 | [diff] [blame] | 67 | return true; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 68 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 69 | out: |
| 70 | kfree(dma_mem); |
Brian Starkey | 6b03ae0 | 2016-03-22 14:28:03 -0700 | [diff] [blame] | 71 | if (mem_base) { |
| 72 | if (flags & DMA_MEMORY_MAP) |
| 73 | memunmap(mem_base); |
| 74 | else |
| 75 | iounmap(mem_base); |
| 76 | } |
Michal Nazarewicz | 9e5b3d6 | 2016-01-04 22:36:40 +0100 | [diff] [blame] | 77 | return false; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 78 | } |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 79 | |
| 80 | static void dma_release_coherent_memory(struct dma_coherent_mem *mem) |
| 81 | { |
| 82 | if (!mem) |
| 83 | return; |
Brian Starkey | 6b03ae0 | 2016-03-22 14:28:03 -0700 | [diff] [blame] | 84 | |
| 85 | if (mem->flags & DMA_MEMORY_MAP) |
| 86 | memunmap(mem->virt_base); |
| 87 | else |
| 88 | iounmap(mem->virt_base); |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 89 | kfree(mem->bitmap); |
| 90 | kfree(mem); |
| 91 | } |
| 92 | |
| 93 | static int dma_assign_coherent_memory(struct device *dev, |
| 94 | struct dma_coherent_mem *mem) |
| 95 | { |
| 96 | if (dev->dma_mem) |
| 97 | return -EBUSY; |
| 98 | |
| 99 | dev->dma_mem = mem; |
| 100 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ |
| 101 | |
| 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
| 106 | dma_addr_t device_addr, size_t size, int flags) |
| 107 | { |
| 108 | struct dma_coherent_mem *mem; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 109 | |
Michal Nazarewicz | 9e5b3d6 | 2016-01-04 22:36:40 +0100 | [diff] [blame] | 110 | if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags, |
| 111 | &mem)) |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 112 | return 0; |
| 113 | |
| 114 | if (dma_assign_coherent_memory(dev, mem) == 0) |
Michal Nazarewicz | 9e5b3d6 | 2016-01-04 22:36:40 +0100 | [diff] [blame] | 115 | return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 116 | |
| 117 | dma_release_coherent_memory(mem); |
| 118 | return 0; |
| 119 | } |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 120 | EXPORT_SYMBOL(dma_declare_coherent_memory); |
| 121 | |
| 122 | void dma_release_declared_memory(struct device *dev) |
| 123 | { |
| 124 | struct dma_coherent_mem *mem = dev->dma_mem; |
| 125 | |
| 126 | if (!mem) |
| 127 | return; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 128 | dma_release_coherent_memory(mem); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 129 | dev->dma_mem = NULL; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 130 | } |
| 131 | EXPORT_SYMBOL(dma_release_declared_memory); |
| 132 | |
| 133 | void *dma_mark_declared_memory_occupied(struct device *dev, |
| 134 | dma_addr_t device_addr, size_t size) |
| 135 | { |
| 136 | struct dma_coherent_mem *mem = dev->dma_mem; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 137 | unsigned long flags; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 138 | int pos, err; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 139 | |
Jan Beulich | d2dc1f4 | 2008-08-05 13:01:31 -0700 | [diff] [blame] | 140 | size += device_addr & ~PAGE_MASK; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 141 | |
| 142 | if (!mem) |
| 143 | return ERR_PTR(-EINVAL); |
| 144 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 145 | spin_lock_irqsave(&mem->spinlock, flags); |
Vladimir Murzin | c41f9ea | 2017-06-26 10:18:57 +0100 | [diff] [blame^] | 146 | pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem)); |
Jan Beulich | d2dc1f4 | 2008-08-05 13:01:31 -0700 | [diff] [blame] | 147 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 148 | spin_unlock_irqrestore(&mem->spinlock, flags); |
| 149 | |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 150 | if (err != 0) |
| 151 | return ERR_PTR(err); |
| 152 | return mem->virt_base + (pos << PAGE_SHIFT); |
| 153 | } |
| 154 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); |
| 155 | |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 156 | /** |
Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 157 | * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 158 | * |
| 159 | * @dev: device from which we allocate memory |
| 160 | * @size: size of requested memory area |
| 161 | * @dma_handle: This will be filled with the correct dma handle |
| 162 | * @ret: This pointer will be filled with the virtual address |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 163 | * to allocated area. |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 164 | * |
Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 165 | * This function should be only called from per-arch dma_alloc_coherent() |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 166 | * to support allocation from per-device coherent memory pools. |
| 167 | * |
| 168 | * Returns 0 if dma_alloc_coherent should continue with allocating from |
Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 169 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 170 | */ |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 171 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, |
| 172 | dma_addr_t *dma_handle, void **ret) |
| 173 | { |
Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 174 | struct dma_coherent_mem *mem; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 175 | int order = get_order(size); |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 176 | unsigned long flags; |
Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 177 | int pageno; |
Bastian Hecht | dd01c75 | 2016-09-28 08:51:57 +0100 | [diff] [blame] | 178 | int dma_memory_map; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 179 | |
Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 180 | if (!dev) |
| 181 | return 0; |
| 182 | mem = dev->dma_mem; |
| 183 | if (!mem) |
| 184 | return 0; |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 185 | |
| 186 | *ret = NULL; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 187 | spin_lock_irqsave(&mem->spinlock, flags); |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 188 | |
Adrian McMenamin | cdf57ca | 2009-01-21 18:47:38 +0900 | [diff] [blame] | 189 | if (unlikely(size > (mem->size << PAGE_SHIFT))) |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 190 | goto err; |
Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 191 | |
| 192 | pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 193 | if (unlikely(pageno < 0)) |
| 194 | goto err; |
| 195 | |
| 196 | /* |
| 197 | * Memory was found in the per-device area. |
| 198 | */ |
Vladimir Murzin | c41f9ea | 2017-06-26 10:18:57 +0100 | [diff] [blame^] | 199 | *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 200 | *ret = mem->virt_base + (pageno << PAGE_SHIFT); |
Bastian Hecht | dd01c75 | 2016-09-28 08:51:57 +0100 | [diff] [blame] | 201 | dma_memory_map = (mem->flags & DMA_MEMORY_MAP); |
| 202 | spin_unlock_irqrestore(&mem->spinlock, flags); |
| 203 | if (dma_memory_map) |
Brian Starkey | 20d7a35 | 2016-03-22 14:28:06 -0700 | [diff] [blame] | 204 | memset(*ret, 0, size); |
| 205 | else |
| 206 | memset_io(*ret, 0, size); |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 207 | |
Andrew Morton | eccd83e | 2009-01-06 14:43:09 -0800 | [diff] [blame] | 208 | return 1; |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 209 | |
| 210 | err: |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 211 | spin_unlock_irqrestore(&mem->spinlock, flags); |
Paul Mundt | 0609697 | 2009-01-21 18:51:53 +0900 | [diff] [blame] | 212 | /* |
| 213 | * In the case where the allocation can not be satisfied from the |
| 214 | * per-device area, try to fall back to generic memory if the |
| 215 | * constraints allow it. |
| 216 | */ |
| 217 | return mem->flags & DMA_MEMORY_EXCLUSIVE; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 218 | } |
Ingo Molnar | a38409f | 2008-08-20 12:16:09 +0200 | [diff] [blame] | 219 | EXPORT_SYMBOL(dma_alloc_from_coherent); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 220 | |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 221 | /** |
Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 222 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 223 | * @dev: device from which the memory was allocated |
| 224 | * @order: the order of pages allocated |
| 225 | * @vaddr: virtual address of allocated pages |
| 226 | * |
| 227 | * This checks whether the memory was allocated from the per-device |
| 228 | * coherent memory pool and if so, releases that memory. |
| 229 | * |
| 230 | * Returns 1 if we correctly released the memory, or 0 if |
Dmitry Baryshkov | cb3952b | 2008-07-30 14:46:50 +0400 | [diff] [blame] | 231 | * dma_release_coherent() should proceed with releasing memory from |
Dmitry Baryshkov | b6d4f7e | 2008-07-20 15:01:10 +0400 | [diff] [blame] | 232 | * generic pools. |
| 233 | */ |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 234 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) |
| 235 | { |
| 236 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
| 237 | |
| 238 | if (mem && vaddr >= mem->virt_base && vaddr < |
| 239 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
| 240 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 241 | unsigned long flags; |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 242 | |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 243 | spin_lock_irqsave(&mem->spinlock, flags); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 244 | bitmap_release_region(mem->bitmap, page, order); |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 245 | spin_unlock_irqrestore(&mem->spinlock, flags); |
Dmitry Baryshkov | ee7e551 | 2008-06-29 14:18:46 +0400 | [diff] [blame] | 246 | return 1; |
| 247 | } |
| 248 | return 0; |
| 249 | } |
Ingo Molnar | a38409f | 2008-08-20 12:16:09 +0200 | [diff] [blame] | 250 | EXPORT_SYMBOL(dma_release_from_coherent); |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 251 | |
| 252 | /** |
| 253 | * dma_mmap_from_coherent() - try to mmap the memory allocated from |
| 254 | * per-device coherent memory pool to userspace |
| 255 | * @dev: device from which the memory was allocated |
| 256 | * @vma: vm_area for the userspace memory |
| 257 | * @vaddr: cpu address returned by dma_alloc_from_coherent |
| 258 | * @size: size of the memory buffer allocated by dma_alloc_from_coherent |
Randy Dunlap | 6e7b4a5 | 2012-06-09 15:02:59 -0700 | [diff] [blame] | 259 | * @ret: result from remap_pfn_range() |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 260 | * |
| 261 | * This checks whether the memory was allocated from the per-device |
| 262 | * coherent memory pool and if so, maps that memory to the provided vma. |
| 263 | * |
Laurent Pinchart | ba4d93b | 2012-10-18 09:29:44 +0200 | [diff] [blame] | 264 | * Returns 1 if we correctly mapped the memory, or 0 if the caller should |
| 265 | * proceed with mapping memory from generic pools. |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 266 | */ |
| 267 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, |
| 268 | void *vaddr, size_t size, int *ret) |
| 269 | { |
| 270 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; |
| 271 | |
| 272 | if (mem && vaddr >= mem->virt_base && vaddr + size <= |
| 273 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { |
| 274 | unsigned long off = vma->vm_pgoff; |
| 275 | int start = (vaddr - mem->virt_base) >> PAGE_SHIFT; |
Muhammad Falak R Wani | e688f14 | 2016-05-21 18:52:57 +0530 | [diff] [blame] | 276 | int user_count = vma_pages(vma); |
George G. Davis | 9ca5d4f | 2016-09-28 08:51:56 +0100 | [diff] [blame] | 277 | int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 278 | |
| 279 | *ret = -ENXIO; |
| 280 | if (off < count && user_count <= count - off) { |
Bjorn Helgaas | 88a984b | 2014-05-20 16:54:22 -0600 | [diff] [blame] | 281 | unsigned long pfn = mem->pfn_base + start + off; |
Marek Szyprowski | bca0fa5 | 2012-03-23 13:05:14 +0100 | [diff] [blame] | 282 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, |
| 283 | user_count << PAGE_SHIFT, |
| 284 | vma->vm_page_prot); |
| 285 | } |
| 286 | return 1; |
| 287 | } |
| 288 | return 0; |
| 289 | } |
| 290 | EXPORT_SYMBOL(dma_mmap_from_coherent); |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 291 | |
| 292 | /* |
| 293 | * Support for reserved memory regions defined in device tree |
| 294 | */ |
| 295 | #ifdef CONFIG_OF_RESERVED_MEM |
| 296 | #include <linux/of.h> |
| 297 | #include <linux/of_fdt.h> |
| 298 | #include <linux/of_reserved_mem.h> |
| 299 | |
| 300 | static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) |
| 301 | { |
| 302 | struct dma_coherent_mem *mem = rmem->priv; |
| 303 | |
| 304 | if (!mem && |
Michal Nazarewicz | 9e5b3d6 | 2016-01-04 22:36:40 +0100 | [diff] [blame] | 305 | !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, |
| 306 | DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE, |
| 307 | &mem)) { |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 308 | pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", |
| 309 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
| 310 | return -ENODEV; |
| 311 | } |
Vladimir Murzin | c41f9ea | 2017-06-26 10:18:57 +0100 | [diff] [blame^] | 312 | mem->use_dev_dma_pfn_offset = true; |
Marek Szyprowski | 7bfa5ab | 2014-10-13 15:51:07 -0700 | [diff] [blame] | 313 | rmem->priv = mem; |
| 314 | dma_assign_coherent_memory(dev, mem); |
| 315 | return 0; |
| 316 | } |
| 317 | |
| 318 | static void rmem_dma_device_release(struct reserved_mem *rmem, |
| 319 | struct device *dev) |
| 320 | { |
| 321 | dev->dma_mem = NULL; |
| 322 | } |
| 323 | |
| 324 | static const struct reserved_mem_ops rmem_dma_ops = { |
| 325 | .device_init = rmem_dma_device_init, |
| 326 | .device_release = rmem_dma_device_release, |
| 327 | }; |
| 328 | |
| 329 | static int __init rmem_dma_setup(struct reserved_mem *rmem) |
| 330 | { |
| 331 | unsigned long node = rmem->fdt_node; |
| 332 | |
| 333 | if (of_get_flat_dt_prop(node, "reusable", NULL)) |
| 334 | return -EINVAL; |
| 335 | |
| 336 | #ifdef CONFIG_ARM |
| 337 | if (!of_get_flat_dt_prop(node, "no-map", NULL)) { |
| 338 | pr_err("Reserved memory: regions without no-map are not yet supported\n"); |
| 339 | return -EINVAL; |
| 340 | } |
| 341 | #endif |
| 342 | |
| 343 | rmem->ops = &rmem_dma_ops; |
| 344 | pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", |
| 345 | &rmem->base, (unsigned long)rmem->size / SZ_1M); |
| 346 | return 0; |
| 347 | } |
| 348 | RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); |
| 349 | #endif |