blob: 5c4f113297213035fe9e78811800cd12eabbee7c [file] [log] [blame]
Bart Van Assche551199a2017-01-20 13:04:07 -08001/*
2 * lib/dma-virt.c
3 *
4 * DMA operations that map to virtual addresses without flushing memory.
5 */
6#include <linux/export.h>
7#include <linux/mm.h>
8#include <linux/dma-mapping.h>
9#include <linux/scatterlist.h>
10
11static void *dma_virt_alloc(struct device *dev, size_t size,
12 dma_addr_t *dma_handle, gfp_t gfp,
13 unsigned long attrs)
14{
15 void *ret;
16
17 ret = (void *)__get_free_pages(gfp, get_order(size));
18 if (ret)
19 *dma_handle = (uintptr_t)ret;
20 return ret;
21}
22
23static void dma_virt_free(struct device *dev, size_t size,
24 void *cpu_addr, dma_addr_t dma_addr,
25 unsigned long attrs)
26{
27 free_pages((unsigned long)cpu_addr, get_order(size));
28}
29
30static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
31 unsigned long offset, size_t size,
32 enum dma_data_direction dir,
33 unsigned long attrs)
34{
35 return (uintptr_t)(page_address(page) + offset);
36}
37
38static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
39 int nents, enum dma_data_direction dir,
40 unsigned long attrs)
41{
42 int i;
43 struct scatterlist *sg;
44
45 for_each_sg(sgl, sg, nents, i) {
46 BUG_ON(!sg_page(sg));
47 sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
48 sg_dma_len(sg) = sg->length;
49 }
50
51 return nents;
52}
53
Bart Van Assche551199a2017-01-20 13:04:07 -080054const struct dma_map_ops dma_virt_ops = {
55 .alloc = dma_virt_alloc,
56 .free = dma_virt_free,
57 .map_page = dma_virt_map_page,
58 .map_sg = dma_virt_map_sg,
Bart Van Assche551199a2017-01-20 13:04:07 -080059};
60EXPORT_SYMBOL(dma_virt_ops);