blob: 74ac0db5373989ff362932e315f34d64f14b418b [file] [log] [blame]
Olof Johanssone2f91572011-10-12 23:52:29 -07001/*
2 * Copyright (C) 2010 NVIDIA Corporation.
3 * Copyright (C) 2010 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/io.h>
Laxman Dewanganb861c272012-06-20 18:06:34 +053018#include <mach/iomap.h>
19#include <linux/of.h>
20
21#ifdef CONFIG_TEGRA_SYSTEM_DMA
Olof Johanssone2f91572011-10-12 23:52:29 -070022#include <linux/dma-mapping.h>
23#include <linux/spinlock.h>
24#include <linux/completion.h>
25#include <linux/sched.h>
26#include <linux/mutex.h>
27
28#include <mach/dma.h>
Olof Johanssone2f91572011-10-12 23:52:29 -070029
30#include "apbio.h"
31
32static DEFINE_MUTEX(tegra_apb_dma_lock);
33
34static struct tegra_dma_channel *tegra_apb_dma;
35static u32 *tegra_apb_bb;
36static dma_addr_t tegra_apb_bb_phys;
37static DECLARE_COMPLETION(tegra_apb_wait);
38
Laxman Dewanganb861c272012-06-20 18:06:34 +053039static u32 tegra_apb_readl_direct(unsigned long offset);
40static void tegra_apb_writel_direct(u32 value, unsigned long offset);
41
Olof Johanssone2f91572011-10-12 23:52:29 -070042bool tegra_apb_init(void)
43{
44 struct tegra_dma_channel *ch;
45
46 mutex_lock(&tegra_apb_dma_lock);
47
48 /* Check to see if we raced to setup */
49 if (tegra_apb_dma)
50 goto out;
51
52 ch = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT |
53 TEGRA_DMA_SHARED);
54
55 if (!ch)
56 goto out_fail;
57
58 tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
59 &tegra_apb_bb_phys, GFP_KERNEL);
60 if (!tegra_apb_bb) {
61 pr_err("%s: can not allocate bounce buffer\n", __func__);
62 tegra_dma_free_channel(ch);
63 goto out_fail;
64 }
65
66 tegra_apb_dma = ch;
67out:
68 mutex_unlock(&tegra_apb_dma_lock);
69 return true;
70
71out_fail:
72 mutex_unlock(&tegra_apb_dma_lock);
73 return false;
74}
75
76static void apb_dma_complete(struct tegra_dma_req *req)
77{
78 complete(&tegra_apb_wait);
79}
80
Laxman Dewanganb861c272012-06-20 18:06:34 +053081static u32 tegra_apb_readl_using_dma(unsigned long offset)
Olof Johanssone2f91572011-10-12 23:52:29 -070082{
83 struct tegra_dma_req req;
84 int ret;
85
86 if (!tegra_apb_dma && !tegra_apb_init())
Laxman Dewanganb861c272012-06-20 18:06:34 +053087 return tegra_apb_readl_direct(offset);
Olof Johanssone2f91572011-10-12 23:52:29 -070088
89 mutex_lock(&tegra_apb_dma_lock);
90 req.complete = apb_dma_complete;
91 req.to_memory = 1;
92 req.dest_addr = tegra_apb_bb_phys;
93 req.dest_bus_width = 32;
94 req.dest_wrap = 1;
95 req.source_addr = offset;
96 req.source_bus_width = 32;
97 req.source_wrap = 4;
98 req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
99 req.size = 4;
100
101 INIT_COMPLETION(tegra_apb_wait);
102
103 tegra_dma_enqueue_req(tegra_apb_dma, &req);
104
105 ret = wait_for_completion_timeout(&tegra_apb_wait,
106 msecs_to_jiffies(50));
107
108 if (WARN(ret == 0, "apb read dma timed out")) {
109 tegra_dma_dequeue_req(tegra_apb_dma, &req);
110 *(u32 *)tegra_apb_bb = 0;
111 }
112
113 mutex_unlock(&tegra_apb_dma_lock);
114 return *((u32 *)tegra_apb_bb);
115}
116
Laxman Dewanganb861c272012-06-20 18:06:34 +0530117static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
Olof Johanssone2f91572011-10-12 23:52:29 -0700118{
119 struct tegra_dma_req req;
120 int ret;
121
122 if (!tegra_apb_dma && !tegra_apb_init()) {
Laxman Dewanganb861c272012-06-20 18:06:34 +0530123 tegra_apb_writel_direct(value, offset);
Olof Johanssone2f91572011-10-12 23:52:29 -0700124 return;
125 }
126
127 mutex_lock(&tegra_apb_dma_lock);
128 *((u32 *)tegra_apb_bb) = value;
129 req.complete = apb_dma_complete;
130 req.to_memory = 0;
131 req.dest_addr = offset;
132 req.dest_wrap = 4;
133 req.dest_bus_width = 32;
134 req.source_addr = tegra_apb_bb_phys;
135 req.source_bus_width = 32;
136 req.source_wrap = 1;
137 req.req_sel = TEGRA_DMA_REQ_SEL_CNTR;
138 req.size = 4;
139
140 INIT_COMPLETION(tegra_apb_wait);
141
142 tegra_dma_enqueue_req(tegra_apb_dma, &req);
143
144 ret = wait_for_completion_timeout(&tegra_apb_wait,
145 msecs_to_jiffies(50));
146
147 if (WARN(ret == 0, "apb write dma timed out"))
148 tegra_dma_dequeue_req(tegra_apb_dma, &req);
149
150 mutex_unlock(&tegra_apb_dma_lock);
151}
Laxman Dewanganb861c272012-06-20 18:06:34 +0530152#else
153#define tegra_apb_readl_using_dma tegra_apb_readl_direct
154#define tegra_apb_writel_using_dma tegra_apb_writel_direct
155#endif
156
157typedef u32 (*apbio_read_fptr)(unsigned long offset);
158typedef void (*apbio_write_fptr)(u32 value, unsigned long offset);
159
160static apbio_read_fptr apbio_read;
161static apbio_write_fptr apbio_write;
162
163static u32 tegra_apb_readl_direct(unsigned long offset)
164{
165 return readl(IO_TO_VIRT(offset));
166}
167
168static void tegra_apb_writel_direct(u32 value, unsigned long offset)
169{
170 writel(value, IO_TO_VIRT(offset));
171}
172
173void tegra_apb_io_init(void)
174{
175 /* Need to use dma only when it is Tegra20 based platform */
176 if (of_machine_is_compatible("nvidia,tegra20") ||
177 !of_have_populated_dt()) {
178 apbio_read = tegra_apb_readl_using_dma;
179 apbio_write = tegra_apb_writel_using_dma;
180 } else {
181 apbio_read = tegra_apb_readl_direct;
182 apbio_write = tegra_apb_writel_direct;
183 }
184}
185
186u32 tegra_apb_readl(unsigned long offset)
187{
188 return apbio_read(offset);
189}
190
191void tegra_apb_writel(u32 value, unsigned long offset)
192{
193 apbio_write(value, offset);
194}