blob: dcd4df1f71746c293f8914bca2654a911f34ffe3 [file] [log] [blame]
Bart Van Assche551199a2017-01-20 13:04:07 -08001/*
2 * lib/dma-virt.c
3 *
4 * DMA operations that map to virtual addresses without flushing memory.
5 */
6#include <linux/export.h>
7#include <linux/mm.h>
8#include <linux/dma-mapping.h>
9#include <linux/scatterlist.h>
10
11static void *dma_virt_alloc(struct device *dev, size_t size,
12 dma_addr_t *dma_handle, gfp_t gfp,
13 unsigned long attrs)
14{
15 void *ret;
16
17 ret = (void *)__get_free_pages(gfp, get_order(size));
18 if (ret)
19 *dma_handle = (uintptr_t)ret;
20 return ret;
21}
22
23static void dma_virt_free(struct device *dev, size_t size,
24 void *cpu_addr, dma_addr_t dma_addr,
25 unsigned long attrs)
26{
27 free_pages((unsigned long)cpu_addr, get_order(size));
28}
29
30static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
31 unsigned long offset, size_t size,
32 enum dma_data_direction dir,
33 unsigned long attrs)
34{
35 return (uintptr_t)(page_address(page) + offset);
36}
37
38static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
39 int nents, enum dma_data_direction dir,
40 unsigned long attrs)
41{
42 int i;
43 struct scatterlist *sg;
44
45 for_each_sg(sgl, sg, nents, i) {
46 BUG_ON(!sg_page(sg));
47 sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
48 sg_dma_len(sg) = sg->length;
49 }
50
51 return nents;
52}
53
54static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr)
55{
56 return false;
57}
58
59static int dma_virt_supported(struct device *dev, u64 mask)
60{
61 return true;
62}
63
64const struct dma_map_ops dma_virt_ops = {
65 .alloc = dma_virt_alloc,
66 .free = dma_virt_free,
67 .map_page = dma_virt_map_page,
68 .map_sg = dma_virt_map_sg,
69 .mapping_error = dma_virt_mapping_error,
70 .dma_supported = dma_virt_supported,
71};
72EXPORT_SYMBOL(dma_virt_ops);