blob: e75451e517bdd2b55a4c94bc17be3abaddc24c1c [file] [log] [blame]
Olof Johanssone2f91572011-10-12 23:52:29 -07001/*
2 * Copyright (C) 2010 NVIDIA Corporation.
3 * Copyright (C) 2010 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/io.h>
18#include <linux/dma-mapping.h>
19#include <linux/spinlock.h>
20#include <linux/completion.h>
21#include <linux/sched.h>
22#include <linux/mutex.h>
23
24#include <mach/dma.h>
25#include <mach/iomap.h>
26
27#include "apbio.h"
28
29static DEFINE_MUTEX(tegra_apb_dma_lock);
30
31static struct tegra_dma_channel *tegra_apb_dma;
32static u32 *tegra_apb_bb;
33static dma_addr_t tegra_apb_bb_phys;
34static DECLARE_COMPLETION(tegra_apb_wait);
35
36bool tegra_apb_init(void)
37{
38 struct tegra_dma_channel *ch;
39
40 mutex_lock(&tegra_apb_dma_lock);
41
42 /* Check to see if we raced to setup */
43 if (tegra_apb_dma)
44 goto out;
45
46 ch = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
47 TEGRA_DMA_SHARED);
48
49 if (!ch)
50 goto out_fail;
51
52 tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
53 &tegra_apb_bb_phys, GFP_KERNEL);
54 if (!tegra_apb_bb) {
55 pr_err("%s: can not allocate bounce buffer\n", __func__);
56 tegra_dma_free_channel(ch);
57 goto out_fail;
58 }
59
60 tegra_apb_dma = ch;
61out:
62 mutex_unlock(&tegra_apb_dma_lock);
63 return true;
64
65out_fail:
66 mutex_unlock(&tegra_apb_dma_lock);
67 return false;
68}
69
70static void apb_dma_complete(struct tegra_dma_req *req)
71{
72 complete(&tegra_apb_wait);
73}
74
75u32 tegra_apb_readl(unsigned long offset)
76{
77 struct tegra_dma_req req;
78 int ret;
79
80 if (!tegra_apb_dma && !tegra_apb_init())
81 return readl(IO_TO_VIRT(offset));
82
83 mutex_lock(&tegra_apb_dma_lock);
84 req.complete = apb_dma_complete;
85 req.to_memory = 1;
86 req.dest_addr = tegra_apb_bb_phys;
87 req.dest_bus_width = 32;
88 req.dest_wrap = 1;
89 req.source_addr = offset;
90 req.source_bus_width = 32;
91 req.source_wrap = 4;
92 req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
93 req.size = 4;
94
95 INIT_COMPLETION(tegra_apb_wait);
96
97 tegra_dma_enqueue_req(tegra_apb_dma, &req);
98
99 ret = wait_for_completion_timeout(&tegra_apb_wait,
100 msecs_to_jiffies(50));
101
102 if (WARN(ret == 0, "apb read dma timed out")) {
103 tegra_dma_dequeue_req(tegra_apb_dma, &req);
104 *(u32 *)tegra_apb_bb = 0;
105 }
106
107 mutex_unlock(&tegra_apb_dma_lock);
108 return *((u32 *)tegra_apb_bb);
109}
110
111void tegra_apb_writel(u32 value, unsigned long offset)
112{
113 struct tegra_dma_req req;
114 int ret;
115
116 if (!tegra_apb_dma && !tegra_apb_init()) {
117 writel(value, IO_TO_VIRT(offset));
118 return;
119 }
120
121 mutex_lock(&tegra_apb_dma_lock);
122 *((u32 *)tegra_apb_bb) = value;
123 req.complete = apb_dma_complete;
124 req.to_memory = 0;
125 req.dest_addr = offset;
126 req.dest_wrap = 4;
127 req.dest_bus_width = 32;
128 req.source_addr = tegra_apb_bb_phys;
129 req.source_bus_width = 32;
130 req.source_wrap = 1;
131 req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
132 req.size = 4;
133
134 INIT_COMPLETION(tegra_apb_wait);
135
136 tegra_dma_enqueue_req(tegra_apb_dma, &req);
137
138 ret = wait_for_completion_timeout(&tegra_apb_wait,
139 msecs_to_jiffies(50));
140
141 if (WARN(ret == 0, "apb write dma timed out"))
142 tegra_dma_dequeue_req(tegra_apb_dma, &req);
143
144 mutex_unlock(&tegra_apb_dma_lock);
145}