blob: dc0fe389be5645e632a669ee932f50140e43d80c [file] [log] [blame]
Olof Johanssone2f91572011-10-12 23:52:29 -07001/*
2 * Copyright (C) 2010 NVIDIA Corporation.
3 * Copyright (C) 2010 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/io.h>
Laxman Dewanganb861c272012-06-20 18:06:34 +053018#include <mach/iomap.h>
19#include <linux/of.h>
Laxman Dewangan5b39fc02012-06-29 17:00:07 +053020#include <linux/dmaengine.h>
Olof Johanssone2f91572011-10-12 23:52:29 -070021#include <linux/dma-mapping.h>
22#include <linux/spinlock.h>
23#include <linux/completion.h>
24#include <linux/sched.h>
25#include <linux/mutex.h>
26
27#include <mach/dma.h>
Olof Johanssone2f91572011-10-12 23:52:29 -070028
29#include "apbio.h"
30
Laxman Dewangan5b39fc02012-06-29 17:00:07 +053031#if defined(CONFIG_TEGRA_SYSTEM_DMA) || defined(CONFIG_TEGRA20_APB_DMA)
Olof Johanssone2f91572011-10-12 23:52:29 -070032static DEFINE_MUTEX(tegra_apb_dma_lock);
Olof Johanssone2f91572011-10-12 23:52:29 -070033static u32 *tegra_apb_bb;
34static dma_addr_t tegra_apb_bb_phys;
35static DECLARE_COMPLETION(tegra_apb_wait);
36
Laxman Dewanganb861c272012-06-20 18:06:34 +053037static u32 tegra_apb_readl_direct(unsigned long offset);
38static void tegra_apb_writel_direct(u32 value, unsigned long offset);
39
Laxman Dewangan5b39fc02012-06-29 17:00:07 +053040#if defined(CONFIG_TEGRA_SYSTEM_DMA)
41static struct tegra_dma_channel *tegra_apb_dma;
42
Olof Johanssone2f91572011-10-12 23:52:29 -070043bool tegra_apb_init(void)
44{
45 struct tegra_dma_channel *ch;
46
47 mutex_lock(&tegra_apb_dma_lock);
48
49 /* Check to see if we raced to setup */
50 if (tegra_apb_dma)
51 goto out;
52
53 ch = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
54 TEGRA_DMA_SHARED);
55
56 if (!ch)
57 goto out_fail;
58
59 tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
60 &tegra_apb_bb_phys, GFP_KERNEL);
61 if (!tegra_apb_bb) {
62 pr_err("%s: can not allocate bounce buffer\n", __func__);
63 tegra_dma_free_channel(ch);
64 goto out_fail;
65 }
66
67 tegra_apb_dma = ch;
68out:
69 mutex_unlock(&tegra_apb_dma_lock);
70 return true;
71
72out_fail:
73 mutex_unlock(&tegra_apb_dma_lock);
74 return false;
75}
76
77static void apb_dma_complete(struct tegra_dma_req *req)
78{
79 complete(&tegra_apb_wait);
80}
81
Laxman Dewanganb861c272012-06-20 18:06:34 +053082static u32 tegra_apb_readl_using_dma(unsigned long offset)
Olof Johanssone2f91572011-10-12 23:52:29 -070083{
84 struct tegra_dma_req req;
85 int ret;
86
87 if (!tegra_apb_dma && !tegra_apb_init())
Laxman Dewanganb861c272012-06-20 18:06:34 +053088 return tegra_apb_readl_direct(offset);
Olof Johanssone2f91572011-10-12 23:52:29 -070089
90 mutex_lock(&tegra_apb_dma_lock);
91 req.complete = apb_dma_complete;
92 req.to_memory = 1;
93 req.dest_addr = tegra_apb_bb_phys;
94 req.dest_bus_width = 32;
95 req.dest_wrap = 1;
96 req.source_addr = offset;
97 req.source_bus_width = 32;
98 req.source_wrap = 4;
99 req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
100 req.size = 4;
101
102 INIT_COMPLETION(tegra_apb_wait);
103
104 tegra_dma_enqueue_req(tegra_apb_dma, &req);
105
106 ret = wait_for_completion_timeout(&tegra_apb_wait,
107 msecs_to_jiffies(50));
108
109 if (WARN(ret == 0, "apb read dma timed out")) {
110 tegra_dma_dequeue_req(tegra_apb_dma, &req);
111 *(u32 *)tegra_apb_bb = 0;
112 }
113
114 mutex_unlock(&tegra_apb_dma_lock);
115 return *((u32 *)tegra_apb_bb);
116}
117
Laxman Dewanganb861c272012-06-20 18:06:34 +0530118static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
Olof Johanssone2f91572011-10-12 23:52:29 -0700119{
120 struct tegra_dma_req req;
121 int ret;
122
123 if (!tegra_apb_dma && !tegra_apb_init()) {
Laxman Dewanganb861c272012-06-20 18:06:34 +0530124 tegra_apb_writel_direct(value, offset);
Olof Johanssone2f91572011-10-12 23:52:29 -0700125 return;
126 }
127
128 mutex_lock(&tegra_apb_dma_lock);
129 *((u32 *)tegra_apb_bb) = value;
130 req.complete = apb_dma_complete;
131 req.to_memory = 0;
132 req.dest_addr = offset;
133 req.dest_wrap = 4;
134 req.dest_bus_width = 32;
135 req.source_addr = tegra_apb_bb_phys;
136 req.source_bus_width = 32;
137 req.source_wrap = 1;
138 req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
139 req.size = 4;
140
141 INIT_COMPLETION(tegra_apb_wait);
142
143 tegra_dma_enqueue_req(tegra_apb_dma, &req);
144
145 ret = wait_for_completion_timeout(&tegra_apb_wait,
146 msecs_to_jiffies(50));
147
148 if (WARN(ret == 0, "apb write dma timed out"))
149 tegra_dma_dequeue_req(tegra_apb_dma, &req);
150
151 mutex_unlock(&tegra_apb_dma_lock);
152}
Laxman Dewangan5b39fc02012-06-29 17:00:07 +0530153
154#else
155static struct dma_chan *tegra_apb_dma_chan;
156static struct dma_slave_config dma_sconfig;
157
158bool tegra_apb_dma_init(void)
159{
160 dma_cap_mask_t mask;
161
162 mutex_lock(&tegra_apb_dma_lock);
163
164 /* Check to see if we raced to setup */
165 if (tegra_apb_dma_chan)
166 goto skip_init;
167
168 dma_cap_zero(mask);
169 dma_cap_set(DMA_SLAVE, mask);
170 tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL);
171 if (!tegra_apb_dma_chan) {
172 /*
173 * This is common until the device is probed, so don't
174 * shout about it.
175 */
176 pr_debug("%s: can not allocate dma channel\n", __func__);
177 goto err_dma_alloc;
178 }
179
180 tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
181 &tegra_apb_bb_phys, GFP_KERNEL);
182 if (!tegra_apb_bb) {
183 pr_err("%s: can not allocate bounce buffer\n", __func__);
184 goto err_buff_alloc;
185 }
186
187 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
188 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
189 dma_sconfig.slave_id = TEGRA_DMA_REQ_SEL_CNTR;
190 dma_sconfig.src_maxburst = 1;
191 dma_sconfig.dst_maxburst = 1;
192
193skip_init:
194 mutex_unlock(&tegra_apb_dma_lock);
195 return true;
196
197err_buff_alloc:
198 dma_release_channel(tegra_apb_dma_chan);
199 tegra_apb_dma_chan = NULL;
200
201err_dma_alloc:
202 mutex_unlock(&tegra_apb_dma_lock);
203 return false;
204}
205
206static void apb_dma_complete(void *args)
207{
208 complete(&tegra_apb_wait);
209}
210
211static int do_dma_transfer(unsigned long apb_add,
212 enum dma_transfer_direction dir)
213{
214 struct dma_async_tx_descriptor *dma_desc;
215 int ret;
216
217 if (dir == DMA_DEV_TO_MEM)
218 dma_sconfig.src_addr = apb_add;
219 else
220 dma_sconfig.dst_addr = apb_add;
221
222 ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig);
223 if (ret)
224 return ret;
225
226 dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan,
227 tegra_apb_bb_phys, sizeof(u32), dir,
228 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
229 if (!dma_desc)
230 return -EINVAL;
231
232 dma_desc->callback = apb_dma_complete;
233 dma_desc->callback_param = NULL;
234
235 INIT_COMPLETION(tegra_apb_wait);
236
237 dmaengine_submit(dma_desc);
238 dma_async_issue_pending(tegra_apb_dma_chan);
239 ret = wait_for_completion_timeout(&tegra_apb_wait,
240 msecs_to_jiffies(50));
241
242 if (WARN(ret == 0, "apb read dma timed out")) {
243 dmaengine_terminate_all(tegra_apb_dma_chan);
244 return -EFAULT;
245 }
246 return 0;
247}
248
249static u32 tegra_apb_readl_using_dma(unsigned long offset)
250{
251 int ret;
252
253 if (!tegra_apb_dma_chan && !tegra_apb_dma_init())
254 return tegra_apb_readl_direct(offset);
255
256 mutex_lock(&tegra_apb_dma_lock);
257 ret = do_dma_transfer(offset, DMA_DEV_TO_MEM);
258 if (ret < 0) {
259 pr_err("error in reading offset 0x%08lx using dma\n", offset);
260 *(u32 *)tegra_apb_bb = 0;
261 }
262 mutex_unlock(&tegra_apb_dma_lock);
263 return *((u32 *)tegra_apb_bb);
264}
265
266static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
267{
268 int ret;
269
270 if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) {
271 tegra_apb_writel_direct(value, offset);
272 return;
273 }
274
275 mutex_lock(&tegra_apb_dma_lock);
276 *((u32 *)tegra_apb_bb) = value;
277 ret = do_dma_transfer(offset, DMA_MEM_TO_DEV);
278 if (ret < 0)
279 pr_err("error in writing offset 0x%08lx using dma\n", offset);
280 mutex_unlock(&tegra_apb_dma_lock);
281}
282#endif
Laxman Dewanganb861c272012-06-20 18:06:34 +0530283#else
284#define tegra_apb_readl_using_dma tegra_apb_readl_direct
285#define tegra_apb_writel_using_dma tegra_apb_writel_direct
286#endif
287
288typedef u32 (*apbio_read_fptr)(unsigned long offset);
289typedef void (*apbio_write_fptr)(u32 value, unsigned long offset);
290
291static apbio_read_fptr apbio_read;
292static apbio_write_fptr apbio_write;
293
294static u32 tegra_apb_readl_direct(unsigned long offset)
295{
296 return readl(IO_TO_VIRT(offset));
297}
298
299static void tegra_apb_writel_direct(u32 value, unsigned long offset)
300{
301 writel(value, IO_TO_VIRT(offset));
302}
303
304void tegra_apb_io_init(void)
305{
306 /* Need to use dma only when it is Tegra20 based platform */
307 if (of_machine_is_compatible("nvidia,tegra20") ||
308 !of_have_populated_dt()) {
309 apbio_read = tegra_apb_readl_using_dma;
310 apbio_write = tegra_apb_writel_using_dma;
311 } else {
312 apbio_read = tegra_apb_readl_direct;
313 apbio_write = tegra_apb_writel_direct;
314 }
315}
316
317u32 tegra_apb_readl(unsigned long offset)
318{
319 return apbio_read(offset);
320}
321
322void tegra_apb_writel(u32 value, unsigned long offset)
323{
324 apbio_write(value, offset);
325}