blob: 9dbef4d1baa47de9bfe2875eafee0880f7fa3bd4 [file] [log] [blame]
Tejun Heo9ac78492007-01-20 16:00:26 +09001/*
2 * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
Sricharan R09515ef2017-04-10 16:51:01 +053010#include <linux/acpi.h>
Tejun Heo9ac78492007-01-20 16:00:26 +090011#include <linux/dma-mapping.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040012#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/gfp.h>
Sricharan R09515ef2017-04-10 16:51:01 +053014#include <linux/of_device.h>
Laura Abbott513510d2014-10-09 15:26:40 -070015#include <linux/slab.h>
16#include <linux/vmalloc.h>
Tejun Heo9ac78492007-01-20 16:00:26 +090017
18/*
19 * Managed DMA API
20 */
21struct dma_devres {
22 size_t size;
23 void *vaddr;
24 dma_addr_t dma_handle;
25};
26
27static void dmam_coherent_release(struct device *dev, void *res)
28{
29 struct dma_devres *this = res;
30
31 dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
32}
33
34static void dmam_noncoherent_release(struct device *dev, void *res)
35{
36 struct dma_devres *this = res;
37
38 dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
39}
40
41static int dmam_match(struct device *dev, void *res, void *match_data)
42{
43 struct dma_devres *this = res, *match = match_data;
44
45 if (this->vaddr == match->vaddr) {
46 WARN_ON(this->size != match->size ||
47 this->dma_handle != match->dma_handle);
48 return 1;
49 }
50 return 0;
51}
52
53/**
54 * dmam_alloc_coherent - Managed dma_alloc_coherent()
55 * @dev: Device to allocate coherent memory for
56 * @size: Size of allocation
57 * @dma_handle: Out argument for allocated DMA handle
58 * @gfp: Allocation flags
59 *
60 * Managed dma_alloc_coherent(). Memory allocated using this function
61 * will be automatically released on driver detach.
62 *
63 * RETURNS:
64 * Pointer to allocated memory on success, NULL on failure.
65 */
Marius Cristian Eseanu6d42d792015-03-08 12:34:14 +020066void *dmam_alloc_coherent(struct device *dev, size_t size,
Tejun Heo9ac78492007-01-20 16:00:26 +090067 dma_addr_t *dma_handle, gfp_t gfp)
68{
69 struct dma_devres *dr;
70 void *vaddr;
71
72 dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
73 if (!dr)
74 return NULL;
75
76 vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
77 if (!vaddr) {
78 devres_free(dr);
79 return NULL;
80 }
81
82 dr->vaddr = vaddr;
83 dr->dma_handle = *dma_handle;
84 dr->size = size;
85
86 devres_add(dev, dr);
87
88 return vaddr;
89}
90EXPORT_SYMBOL(dmam_alloc_coherent);
91
92/**
93 * dmam_free_coherent - Managed dma_free_coherent()
94 * @dev: Device to free coherent memory for
95 * @size: Size of allocation
96 * @vaddr: Virtual address of the memory to free
97 * @dma_handle: DMA handle of the memory to free
98 *
99 * Managed dma_free_coherent().
100 */
101void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
102 dma_addr_t dma_handle)
103{
104 struct dma_devres match_data = { size, vaddr, dma_handle };
105
106 dma_free_coherent(dev, size, vaddr, dma_handle);
107 WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
108 &match_data));
109}
110EXPORT_SYMBOL(dmam_free_coherent);
111
112/**
Florian Fainellicd74da92016-11-29 17:21:05 -0800113 * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
Tejun Heo9ac78492007-01-20 16:00:26 +0900114 * @dev: Device to allocate non_coherent memory for
115 * @size: Size of allocation
116 * @dma_handle: Out argument for allocated DMA handle
117 * @gfp: Allocation flags
118 *
Florian Fainellicd74da92016-11-29 17:21:05 -0800119 * Managed dma_alloc_noncoherent(). Memory allocated using this
Tejun Heo9ac78492007-01-20 16:00:26 +0900120 * function will be automatically released on driver detach.
121 *
122 * RETURNS:
123 * Pointer to allocated memory on success, NULL on failure.
124 */
125void *dmam_alloc_noncoherent(struct device *dev, size_t size,
126 dma_addr_t *dma_handle, gfp_t gfp)
127{
128 struct dma_devres *dr;
129 void *vaddr;
130
131 dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
132 if (!dr)
133 return NULL;
134
135 vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
136 if (!vaddr) {
137 devres_free(dr);
138 return NULL;
139 }
140
141 dr->vaddr = vaddr;
142 dr->dma_handle = *dma_handle;
143 dr->size = size;
144
145 devres_add(dev, dr);
146
147 return vaddr;
148}
149EXPORT_SYMBOL(dmam_alloc_noncoherent);
150
151/**
152 * dmam_free_coherent - Managed dma_free_noncoherent()
153 * @dev: Device to free noncoherent memory for
154 * @size: Size of allocation
155 * @vaddr: Virtual address of the memory to free
156 * @dma_handle: DMA handle of the memory to free
157 *
158 * Managed dma_free_noncoherent().
159 */
160void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
161 dma_addr_t dma_handle)
162{
163 struct dma_devres match_data = { size, vaddr, dma_handle };
164
165 dma_free_noncoherent(dev, size, vaddr, dma_handle);
166 WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
167 &match_data));
168}
169EXPORT_SYMBOL(dmam_free_noncoherent);
170
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800171#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Tejun Heo9ac78492007-01-20 16:00:26 +0900172
173static void dmam_coherent_decl_release(struct device *dev, void *res)
174{
175 dma_release_declared_memory(dev);
176}
177
178/**
179 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
180 * @dev: Device to declare coherent memory for
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600181 * @phys_addr: Physical address of coherent memory to be declared
Tejun Heo9ac78492007-01-20 16:00:26 +0900182 * @device_addr: Device address of coherent memory to be declared
183 * @size: Size of coherent memory to be declared
184 * @flags: Flags
185 *
186 * Managed dma_declare_coherent_memory().
187 *
188 * RETURNS:
189 * 0 on success, -errno on failure.
190 */
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600191int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900192 dma_addr_t device_addr, size_t size, int flags)
193{
194 void *res;
195 int rc;
196
197 res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
198 if (!res)
199 return -ENOMEM;
200
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600201 rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
Tejun Heo9ac78492007-01-20 16:00:26 +0900202 flags);
Vyacheslav V. Yurkov775115c2016-06-14 09:58:37 +0200203 if (rc) {
Tejun Heo9ac78492007-01-20 16:00:26 +0900204 devres_add(dev, res);
Vyacheslav V. Yurkov775115c2016-06-14 09:58:37 +0200205 rc = 0;
206 } else {
Tejun Heo9ac78492007-01-20 16:00:26 +0900207 devres_free(res);
Vyacheslav V. Yurkov775115c2016-06-14 09:58:37 +0200208 rc = -ENOMEM;
209 }
Tejun Heo9ac78492007-01-20 16:00:26 +0900210
211 return rc;
212}
213EXPORT_SYMBOL(dmam_declare_coherent_memory);
214
215/**
216 * dmam_release_declared_memory - Managed dma_release_declared_memory().
217 * @dev: Device to release declared coherent memory for
218 *
219 * Managed dmam_release_declared_memory().
220 */
221void dmam_release_declared_memory(struct device *dev)
222{
223 WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
224}
225EXPORT_SYMBOL(dmam_release_declared_memory);
226
Marek Szyprowskic6c22952012-11-26 10:41:48 -0300227#endif
228
Marek Szyprowskid2b74282012-06-13 10:05:52 +0200229/*
230 * Create scatter-list for the already allocated DMA buffer.
231 */
232int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
233 void *cpu_addr, dma_addr_t handle, size_t size)
234{
235 struct page *page = virt_to_page(cpu_addr);
236 int ret;
237
238 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
239 if (unlikely(ret))
240 return ret;
241
242 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
243 return 0;
244}
245EXPORT_SYMBOL(dma_common_get_sgtable);
246
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200247/*
248 * Create userspace mapping for the DMA-coherent memory.
249 */
250int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
251 void *cpu_addr, dma_addr_t dma_addr, size_t size)
252{
253 int ret = -ENXIO;
Christoph Hellwig0d4a6192016-01-20 15:01:22 -0800254#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
Muhammad Falak R Wani95da00e2016-05-21 18:52:22 +0530255 unsigned long user_count = vma_pages(vma);
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200256 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
257 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
258 unsigned long off = vma->vm_pgoff;
259
260 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
261
262 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
263 return ret;
264
265 if (off < count && user_count <= (count - off)) {
266 ret = remap_pfn_range(vma, vma->vm_start,
267 pfn + off,
268 user_count << PAGE_SHIFT,
269 vma->vm_page_prot);
270 }
Christoph Hellwig0d4a6192016-01-20 15:01:22 -0800271#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +0200272
273 return ret;
274}
275EXPORT_SYMBOL(dma_common_mmap);
Laura Abbott513510d2014-10-09 15:26:40 -0700276
277#ifdef CONFIG_MMU
Catalin Marinas1a3389f2017-05-25 17:26:47 +0100278static struct vm_struct *__dma_common_pages_remap(struct page **pages,
279 size_t size, unsigned long vm_flags, pgprot_t prot,
280 const void *caller)
281{
282 struct vm_struct *area;
283
284 area = get_vm_area_caller(size, vm_flags, caller);
285 if (!area)
286 return NULL;
287
288 if (map_vm_area(area, prot, pages)) {
289 vunmap(area->addr);
290 return NULL;
291 }
292
293 return area;
294}
295
Laura Abbott513510d2014-10-09 15:26:40 -0700296/*
297 * remaps an array of PAGE_SIZE pages into another vm_area
298 * Cannot be used in non-sleeping contexts
299 */
300void *dma_common_pages_remap(struct page **pages, size_t size,
301 unsigned long vm_flags, pgprot_t prot,
302 const void *caller)
303{
304 struct vm_struct *area;
305
Catalin Marinas1a3389f2017-05-25 17:26:47 +0100306 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
Laura Abbott513510d2014-10-09 15:26:40 -0700307 if (!area)
308 return NULL;
309
310 area->pages = pages;
311
Laura Abbott513510d2014-10-09 15:26:40 -0700312 return area->addr;
313}
314
315/*
316 * remaps an allocated contiguous region into another vm_area.
317 * Cannot be used in non-sleeping contexts
318 */
319
320void *dma_common_contiguous_remap(struct page *page, size_t size,
321 unsigned long vm_flags,
322 pgprot_t prot, const void *caller)
323{
324 int i;
325 struct page **pages;
Catalin Marinas1a3389f2017-05-25 17:26:47 +0100326 struct vm_struct *area;
Laura Abbott513510d2014-10-09 15:26:40 -0700327
328 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
329 if (!pages)
330 return NULL;
331
Geliang Tang0dd89112017-03-24 22:10:49 +0800332 for (i = 0; i < (size >> PAGE_SHIFT); i++)
333 pages[i] = nth_page(page, i);
Laura Abbott513510d2014-10-09 15:26:40 -0700334
Catalin Marinas1a3389f2017-05-25 17:26:47 +0100335 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
Laura Abbott513510d2014-10-09 15:26:40 -0700336
337 kfree(pages);
338
Catalin Marinas1a3389f2017-05-25 17:26:47 +0100339 if (!area)
340 return NULL;
341 return area->addr;
Laura Abbott513510d2014-10-09 15:26:40 -0700342}
343
344/*
345 * unmaps a range previously mapped by dma_common_*_remap
346 */
347void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
348{
349 struct vm_struct *area = find_vm_area(cpu_addr);
350
351 if (!area || (area->flags & vm_flags) != vm_flags) {
352 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
353 return;
354 }
355
Peng Fan85714102016-07-21 16:04:21 +0800356 unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
Laura Abbott513510d2014-10-09 15:26:40 -0700357 vunmap(cpu_addr);
358}
359#endif
Sricharan R09515ef2017-04-10 16:51:01 +0530360
361/*
362 * Common configuration to enable DMA API use for a device
363 */
364#include <linux/pci.h>
365
366int dma_configure(struct device *dev)
367{
368 struct device *bridge = NULL, *dma_dev = dev;
369 enum dev_dma_attr attr;
Laurent Pinchart7b07cbe2017-04-10 16:51:02 +0530370 int ret = 0;
Sricharan R09515ef2017-04-10 16:51:01 +0530371
372 if (dev_is_pci(dev)) {
373 bridge = pci_get_host_bridge_device(to_pci_dev(dev));
374 dma_dev = bridge;
375 if (IS_ENABLED(CONFIG_OF) && dma_dev->parent &&
376 dma_dev->parent->of_node)
377 dma_dev = dma_dev->parent;
378 }
379
380 if (dma_dev->of_node) {
Laurent Pinchart7b07cbe2017-04-10 16:51:02 +0530381 ret = of_dma_configure(dev, dma_dev->of_node);
Sricharan R09515ef2017-04-10 16:51:01 +0530382 } else if (has_acpi_companion(dma_dev)) {
383 attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode));
384 if (attr != DEV_DMA_NOT_SUPPORTED)
Sricharan R5a1bb632017-04-10 16:51:03 +0530385 ret = acpi_dma_configure(dev, attr);
Sricharan R09515ef2017-04-10 16:51:01 +0530386 }
387
388 if (bridge)
389 pci_put_host_bridge_device(bridge);
390
Laurent Pinchart7b07cbe2017-04-10 16:51:02 +0530391 return ret;
Sricharan R09515ef2017-04-10 16:51:01 +0530392}
393
394void dma_deconfigure(struct device *dev)
395{
396 of_dma_deconfigure(dev);
397 acpi_dma_deconfigure(dev);
398}