| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * arch/arm/mach-tegra/dma.c | 
|  | 3 | * | 
|  | 4 | * System DMA driver for NVIDIA Tegra SoCs | 
|  | 5 | * | 
|  | 6 | * Copyright (c) 2008-2009, NVIDIA Corporation. | 
|  | 7 | * | 
|  | 8 | * This program is free software; you can redistribute it and/or modify | 
|  | 9 | * it under the terms of the GNU General Public License as published by | 
|  | 10 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 11 | * (at your option) any later version. | 
|  | 12 | * | 
|  | 13 | * This program is distributed in the hope that it will be useful, but WITHOUT | 
|  | 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|  | 15 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
|  | 16 | * more details. | 
|  | 17 | * | 
|  | 18 | * You should have received a copy of the GNU General Public License along | 
|  | 19 | * with this program; if not, write to the Free Software Foundation, Inc., | 
|  | 20 | * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. | 
|  | 21 | */ | 
|  | 22 |  | 
|  | 23 | #include <linux/io.h> | 
|  | 24 | #include <linux/interrupt.h> | 
|  | 25 | #include <linux/module.h> | 
|  | 26 | #include <linux/spinlock.h> | 
|  | 27 | #include <linux/err.h> | 
|  | 28 | #include <linux/irq.h> | 
|  | 29 | #include <linux/delay.h> | 
|  | 30 | #include <mach/dma.h> | 
|  | 31 | #include <mach/irqs.h> | 
|  | 32 | #include <mach/iomap.h> | 
| Colin Cross | 2ea67fd | 2010-10-04 08:49:49 -0700 | [diff] [blame] | 33 | #include <mach/suspend.h> | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 34 |  | 
|  | 35 | #define APB_DMA_GEN				0x000 | 
|  | 36 | #define GEN_ENABLE				(1<<31) | 
|  | 37 |  | 
|  | 38 | #define APB_DMA_CNTRL				0x010 | 
|  | 39 |  | 
|  | 40 | #define APB_DMA_IRQ_MASK			0x01c | 
|  | 41 |  | 
|  | 42 | #define APB_DMA_IRQ_MASK_SET			0x020 | 
|  | 43 |  | 
|  | 44 | #define APB_DMA_CHAN_CSR			0x000 | 
|  | 45 | #define CSR_ENB					(1<<31) | 
|  | 46 | #define CSR_IE_EOC				(1<<30) | 
|  | 47 | #define CSR_HOLD				(1<<29) | 
|  | 48 | #define CSR_DIR					(1<<28) | 
|  | 49 | #define CSR_ONCE				(1<<27) | 
|  | 50 | #define CSR_FLOW				(1<<21) | 
|  | 51 | #define CSR_REQ_SEL_SHIFT			16 | 
|  | 52 | #define CSR_REQ_SEL_MASK			(0x1F<<CSR_REQ_SEL_SHIFT) | 
|  | 53 | #define CSR_REQ_SEL_INVALID			(31<<CSR_REQ_SEL_SHIFT) | 
|  | 54 | #define CSR_WCOUNT_SHIFT			2 | 
|  | 55 | #define CSR_WCOUNT_MASK				0xFFFC | 
|  | 56 |  | 
|  | 57 | #define APB_DMA_CHAN_STA				0x004 | 
|  | 58 | #define STA_BUSY				(1<<31) | 
|  | 59 | #define STA_ISE_EOC				(1<<30) | 
|  | 60 | #define STA_HALT				(1<<29) | 
|  | 61 | #define STA_PING_PONG				(1<<28) | 
|  | 62 | #define STA_COUNT_SHIFT				2 | 
|  | 63 | #define STA_COUNT_MASK				0xFFFC | 
|  | 64 |  | 
|  | 65 | #define APB_DMA_CHAN_AHB_PTR				0x010 | 
|  | 66 |  | 
|  | 67 | #define APB_DMA_CHAN_AHB_SEQ				0x014 | 
|  | 68 | #define AHB_SEQ_INTR_ENB			(1<<31) | 
|  | 69 | #define AHB_SEQ_BUS_WIDTH_SHIFT			28 | 
|  | 70 | #define AHB_SEQ_BUS_WIDTH_MASK			(0x7<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 71 | #define AHB_SEQ_BUS_WIDTH_8			(0<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 72 | #define AHB_SEQ_BUS_WIDTH_16			(1<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 73 | #define AHB_SEQ_BUS_WIDTH_32			(2<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 74 | #define AHB_SEQ_BUS_WIDTH_64			(3<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 75 | #define AHB_SEQ_BUS_WIDTH_128			(4<<AHB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 76 | #define AHB_SEQ_DATA_SWAP			(1<<27) | 
|  | 77 | #define AHB_SEQ_BURST_MASK			(0x7<<24) | 
|  | 78 | #define AHB_SEQ_BURST_1				(4<<24) | 
|  | 79 | #define AHB_SEQ_BURST_4				(5<<24) | 
|  | 80 | #define AHB_SEQ_BURST_8				(6<<24) | 
|  | 81 | #define AHB_SEQ_DBL_BUF				(1<<19) | 
|  | 82 | #define AHB_SEQ_WRAP_SHIFT			16 | 
|  | 83 | #define AHB_SEQ_WRAP_MASK			(0x7<<AHB_SEQ_WRAP_SHIFT) | 
|  | 84 |  | 
|  | 85 | #define APB_DMA_CHAN_APB_PTR				0x018 | 
|  | 86 |  | 
|  | 87 | #define APB_DMA_CHAN_APB_SEQ				0x01c | 
|  | 88 | #define APB_SEQ_BUS_WIDTH_SHIFT			28 | 
|  | 89 | #define APB_SEQ_BUS_WIDTH_MASK			(0x7<<APB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 90 | #define APB_SEQ_BUS_WIDTH_8			(0<<APB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 91 | #define APB_SEQ_BUS_WIDTH_16			(1<<APB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 92 | #define APB_SEQ_BUS_WIDTH_32			(2<<APB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 93 | #define APB_SEQ_BUS_WIDTH_64			(3<<APB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 94 | #define APB_SEQ_BUS_WIDTH_128			(4<<APB_SEQ_BUS_WIDTH_SHIFT) | 
|  | 95 | #define APB_SEQ_DATA_SWAP			(1<<27) | 
|  | 96 | #define APB_SEQ_WRAP_SHIFT			16 | 
|  | 97 | #define APB_SEQ_WRAP_MASK			(0x7<<APB_SEQ_WRAP_SHIFT) | 
|  | 98 |  | 
|  | 99 | #define TEGRA_SYSTEM_DMA_CH_NR			16 | 
|  | 100 | #define TEGRA_SYSTEM_DMA_AVP_CH_NUM		4 | 
|  | 101 | #define TEGRA_SYSTEM_DMA_CH_MIN			0 | 
|  | 102 | #define TEGRA_SYSTEM_DMA_CH_MAX	\ | 
|  | 103 | (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1) | 
|  | 104 |  | 
|  | 105 | #define NV_DMA_MAX_TRASFER_SIZE 0x10000 | 
|  | 106 |  | 
|  | 107 | const unsigned int ahb_addr_wrap_table[8] = { | 
|  | 108 | 0, 32, 64, 128, 256, 512, 1024, 2048 | 
|  | 109 | }; | 
|  | 110 |  | 
|  | 111 | const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64}; | 
|  | 112 |  | 
|  | 113 | const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128}; | 
|  | 114 |  | 
|  | 115 | #define TEGRA_DMA_NAME_SIZE 16 | 
|  | 116 | struct tegra_dma_channel { | 
|  | 117 | struct list_head	list; | 
|  | 118 | int			id; | 
|  | 119 | spinlock_t		lock; | 
|  | 120 | char			name[TEGRA_DMA_NAME_SIZE]; | 
|  | 121 | void  __iomem		*addr; | 
|  | 122 | int			mode; | 
|  | 123 | int			irq; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 124 | int			req_transfer_count; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 125 | }; | 
|  | 126 |  | 
|  | 127 | #define  NV_DMA_MAX_CHANNELS  32 | 
|  | 128 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 129 | static DEFINE_MUTEX(tegra_dma_lock); | 
|  | 130 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 131 | static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS); | 
|  | 132 | static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS]; | 
|  | 133 |  | 
|  | 134 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | 
|  | 135 | struct tegra_dma_req *req); | 
|  | 136 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 
|  | 137 | struct tegra_dma_req *req); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 138 | static void tegra_dma_stop(struct tegra_dma_channel *ch); | 
|  | 139 |  | 
|  | 140 | void tegra_dma_flush(struct tegra_dma_channel *ch) | 
|  | 141 | { | 
|  | 142 | } | 
|  | 143 | EXPORT_SYMBOL(tegra_dma_flush); | 
|  | 144 |  | 
|  | 145 | void tegra_dma_dequeue(struct tegra_dma_channel *ch) | 
|  | 146 | { | 
|  | 147 | struct tegra_dma_req *req; | 
|  | 148 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 149 | if (tegra_dma_is_empty(ch)) | 
|  | 150 | return; | 
|  | 151 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 152 | req = list_entry(ch->list.next, typeof(*req), node); | 
|  | 153 |  | 
|  | 154 | tegra_dma_dequeue_req(ch, req); | 
|  | 155 | return; | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | void tegra_dma_stop(struct tegra_dma_channel *ch) | 
|  | 159 | { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 160 | u32 csr; | 
|  | 161 | u32 status; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 162 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 163 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 164 | csr &= ~CSR_IE_EOC; | 
|  | 165 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
|  | 166 |  | 
|  | 167 | csr &= ~CSR_ENB; | 
|  | 168 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
|  | 169 |  | 
|  | 170 | status = readl(ch->addr + APB_DMA_CHAN_STA); | 
|  | 171 | if (status & STA_ISE_EOC) | 
|  | 172 | writel(status, ch->addr + APB_DMA_CHAN_STA); | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | int tegra_dma_cancel(struct tegra_dma_channel *ch) | 
|  | 176 | { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 177 | u32 csr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 178 | unsigned long irq_flags; | 
|  | 179 |  | 
|  | 180 | spin_lock_irqsave(&ch->lock, irq_flags); | 
|  | 181 | while (!list_empty(&ch->list)) | 
|  | 182 | list_del(ch->list.next); | 
|  | 183 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 184 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 185 | csr &= ~CSR_REQ_SEL_MASK; | 
|  | 186 | csr |= CSR_REQ_SEL_INVALID; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 187 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
|  | 188 |  | 
|  | 189 | tegra_dma_stop(ch); | 
|  | 190 |  | 
|  | 191 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
|  | 192 | return 0; | 
|  | 193 | } | 
|  | 194 |  | 
|  | 195 | int tegra_dma_dequeue_req(struct tegra_dma_channel *ch, | 
|  | 196 | struct tegra_dma_req *_req) | 
|  | 197 | { | 
|  | 198 | unsigned int csr; | 
|  | 199 | unsigned int status; | 
|  | 200 | struct tegra_dma_req *req = NULL; | 
|  | 201 | int found = 0; | 
|  | 202 | unsigned long irq_flags; | 
|  | 203 | int to_transfer; | 
|  | 204 | int req_transfer_count; | 
|  | 205 |  | 
|  | 206 | spin_lock_irqsave(&ch->lock, irq_flags); | 
|  | 207 | list_for_each_entry(req, &ch->list, node) { | 
|  | 208 | if (req == _req) { | 
|  | 209 | list_del(&req->node); | 
|  | 210 | found = 1; | 
|  | 211 | break; | 
|  | 212 | } | 
|  | 213 | } | 
|  | 214 | if (!found) { | 
|  | 215 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
|  | 216 | return 0; | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 | /* STOP the DMA and get the transfer count. | 
|  | 220 | * Getting the transfer count is tricky. | 
|  | 221 | *  - Change the source selector to invalid to stop the DMA from | 
|  | 222 | *    FIFO to memory. | 
|  | 223 | *  - Read the status register to know the number of pending | 
|  | 224 | *    bytes to be transfered. | 
|  | 225 | *  - Finally stop or program the DMA to the next buffer in the | 
|  | 226 | *    list. | 
|  | 227 | */ | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 228 | csr = readl(ch->addr + APB_DMA_CHAN_CSR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 229 | csr &= ~CSR_REQ_SEL_MASK; | 
|  | 230 | csr |= CSR_REQ_SEL_INVALID; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 231 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
|  | 232 |  | 
|  | 233 | /* Get the transfer count */ | 
|  | 234 | status = readl(ch->addr + APB_DMA_CHAN_STA); | 
|  | 235 | to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 236 | req_transfer_count = ch->req_transfer_count; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 237 | req_transfer_count += 1; | 
|  | 238 | to_transfer += 1; | 
|  | 239 |  | 
|  | 240 | req->bytes_transferred = req_transfer_count; | 
|  | 241 |  | 
|  | 242 | if (status & STA_BUSY) | 
|  | 243 | req->bytes_transferred -= to_transfer; | 
|  | 244 |  | 
|  | 245 | /* In continous transfer mode, DMA only tracks the count of the | 
|  | 246 | * half DMA buffer. So, if the DMA already finished half the DMA | 
|  | 247 | * then add the half buffer to the completed count. | 
|  | 248 | * | 
|  | 249 | *	FIXME: There can be a race here. What if the req to | 
|  | 250 | *	dequue happens at the same time as the DMA just moved to | 
|  | 251 | *	the new buffer and SW didn't yet received the interrupt? | 
|  | 252 | */ | 
|  | 253 | if (ch->mode & TEGRA_DMA_MODE_CONTINOUS) | 
|  | 254 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) | 
|  | 255 | req->bytes_transferred += req_transfer_count; | 
|  | 256 |  | 
|  | 257 | req->bytes_transferred *= 4; | 
|  | 258 |  | 
|  | 259 | tegra_dma_stop(ch); | 
|  | 260 | if (!list_empty(&ch->list)) { | 
|  | 261 | /* if the list is not empty, queue the next request */ | 
|  | 262 | struct tegra_dma_req *next_req; | 
|  | 263 | next_req = list_entry(ch->list.next, | 
|  | 264 | typeof(*next_req), node); | 
|  | 265 | tegra_dma_update_hw(ch, next_req); | 
|  | 266 | } | 
|  | 267 | req->status = -TEGRA_DMA_REQ_ERROR_ABORTED; | 
|  | 268 |  | 
|  | 269 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
|  | 270 |  | 
|  | 271 | /* Callback should be called without any lock */ | 
|  | 272 | req->complete(req); | 
|  | 273 | return 0; | 
|  | 274 | } | 
|  | 275 | EXPORT_SYMBOL(tegra_dma_dequeue_req); | 
|  | 276 |  | 
|  | 277 | bool tegra_dma_is_empty(struct tegra_dma_channel *ch) | 
|  | 278 | { | 
|  | 279 | unsigned long irq_flags; | 
|  | 280 | bool is_empty; | 
|  | 281 |  | 
|  | 282 | spin_lock_irqsave(&ch->lock, irq_flags); | 
|  | 283 | if (list_empty(&ch->list)) | 
|  | 284 | is_empty = true; | 
|  | 285 | else | 
|  | 286 | is_empty = false; | 
|  | 287 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
|  | 288 | return is_empty; | 
|  | 289 | } | 
|  | 290 | EXPORT_SYMBOL(tegra_dma_is_empty); | 
|  | 291 |  | 
|  | 292 | bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch, | 
|  | 293 | struct tegra_dma_req *_req) | 
|  | 294 | { | 
|  | 295 | unsigned long irq_flags; | 
|  | 296 | struct tegra_dma_req *req; | 
|  | 297 |  | 
|  | 298 | spin_lock_irqsave(&ch->lock, irq_flags); | 
|  | 299 | list_for_each_entry(req, &ch->list, node) { | 
|  | 300 | if (req == _req) { | 
|  | 301 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
|  | 302 | return true; | 
|  | 303 | } | 
|  | 304 | } | 
|  | 305 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
|  | 306 | return false; | 
|  | 307 | } | 
|  | 308 | EXPORT_SYMBOL(tegra_dma_is_req_inflight); | 
|  | 309 |  | 
|  | 310 | int tegra_dma_enqueue_req(struct tegra_dma_channel *ch, | 
|  | 311 | struct tegra_dma_req *req) | 
|  | 312 | { | 
|  | 313 | unsigned long irq_flags; | 
| Stephen Warren | 499ef7a | 2011-01-05 14:24:12 -0700 | [diff] [blame^] | 314 | struct tegra_dma_req *_req; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 315 | int start_dma = 0; | 
|  | 316 |  | 
|  | 317 | if (req->size > NV_DMA_MAX_TRASFER_SIZE || | 
|  | 318 | req->source_addr & 0x3 || req->dest_addr & 0x3) { | 
|  | 319 | pr_err("Invalid DMA request for channel %d\n", ch->id); | 
|  | 320 | return -EINVAL; | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | spin_lock_irqsave(&ch->lock, irq_flags); | 
|  | 324 |  | 
| Stephen Warren | 499ef7a | 2011-01-05 14:24:12 -0700 | [diff] [blame^] | 325 | list_for_each_entry(_req, &ch->list, node) { | 
|  | 326 | if (req == _req) { | 
|  | 327 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
|  | 328 | return -EEXIST; | 
|  | 329 | } | 
|  | 330 | } | 
|  | 331 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 332 | req->bytes_transferred = 0; | 
|  | 333 | req->status = 0; | 
|  | 334 | req->buffer_status = 0; | 
|  | 335 | if (list_empty(&ch->list)) | 
|  | 336 | start_dma = 1; | 
|  | 337 |  | 
|  | 338 | list_add_tail(&req->node, &ch->list); | 
|  | 339 |  | 
|  | 340 | if (start_dma) | 
|  | 341 | tegra_dma_update_hw(ch, req); | 
|  | 342 |  | 
|  | 343 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
|  | 344 |  | 
|  | 345 | return 0; | 
|  | 346 | } | 
|  | 347 | EXPORT_SYMBOL(tegra_dma_enqueue_req); | 
|  | 348 |  | 
|  | 349 | struct tegra_dma_channel *tegra_dma_allocate_channel(int mode) | 
|  | 350 | { | 
|  | 351 | int channel; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 352 | struct tegra_dma_channel *ch = NULL; | 
|  | 353 |  | 
|  | 354 | mutex_lock(&tegra_dma_lock); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 355 |  | 
|  | 356 | /* first channel is the shared channel */ | 
|  | 357 | if (mode & TEGRA_DMA_SHARED) { | 
|  | 358 | channel = TEGRA_SYSTEM_DMA_CH_MIN; | 
|  | 359 | } else { | 
|  | 360 | channel = find_first_zero_bit(channel_usage, | 
|  | 361 | ARRAY_SIZE(dma_channels)); | 
|  | 362 | if (channel >= ARRAY_SIZE(dma_channels)) | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 363 | goto out; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 364 | } | 
|  | 365 | __set_bit(channel, channel_usage); | 
|  | 366 | ch = &dma_channels[channel]; | 
|  | 367 | ch->mode = mode; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 368 |  | 
|  | 369 | out: | 
|  | 370 | mutex_unlock(&tegra_dma_lock); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 371 | return ch; | 
|  | 372 | } | 
|  | 373 | EXPORT_SYMBOL(tegra_dma_allocate_channel); | 
|  | 374 |  | 
|  | 375 | void tegra_dma_free_channel(struct tegra_dma_channel *ch) | 
|  | 376 | { | 
|  | 377 | if (ch->mode & TEGRA_DMA_SHARED) | 
|  | 378 | return; | 
|  | 379 | tegra_dma_cancel(ch); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 380 | mutex_lock(&tegra_dma_lock); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 381 | __clear_bit(ch->id, channel_usage); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 382 | mutex_unlock(&tegra_dma_lock); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 383 | } | 
|  | 384 | EXPORT_SYMBOL(tegra_dma_free_channel); | 
|  | 385 |  | 
|  | 386 | static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch, | 
|  | 387 | struct tegra_dma_req *req) | 
|  | 388 | { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 389 | u32 apb_ptr; | 
|  | 390 | u32 ahb_ptr; | 
|  | 391 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 392 | if (req->to_memory) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 393 | apb_ptr = req->source_addr; | 
|  | 394 | ahb_ptr = req->dest_addr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 395 | } else { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 396 | apb_ptr = req->dest_addr; | 
|  | 397 | ahb_ptr = req->source_addr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 398 | } | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 399 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 
|  | 400 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 401 |  | 
|  | 402 | req->status = TEGRA_DMA_REQ_INFLIGHT; | 
|  | 403 | return; | 
|  | 404 | } | 
|  | 405 |  | 
|  | 406 | static void tegra_dma_update_hw(struct tegra_dma_channel *ch, | 
|  | 407 | struct tegra_dma_req *req) | 
|  | 408 | { | 
|  | 409 | int ahb_addr_wrap; | 
|  | 410 | int apb_addr_wrap; | 
|  | 411 | int ahb_bus_width; | 
|  | 412 | int apb_bus_width; | 
|  | 413 | int index; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 414 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 415 | u32 ahb_seq; | 
|  | 416 | u32 apb_seq; | 
|  | 417 | u32 ahb_ptr; | 
|  | 418 | u32 apb_ptr; | 
|  | 419 | u32 csr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 420 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 421 | csr = CSR_IE_EOC | CSR_FLOW; | 
|  | 422 | ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1; | 
|  | 423 | apb_seq = 0; | 
|  | 424 |  | 
|  | 425 | csr |= req->req_sel << CSR_REQ_SEL_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 426 |  | 
|  | 427 | /* One shot mode is always single buffered, | 
|  | 428 | * continuous mode is always double buffered | 
|  | 429 | * */ | 
|  | 430 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 431 | csr |= CSR_ONCE; | 
|  | 432 | ch->req_transfer_count = (req->size >> 2) - 1; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 433 | } else { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 434 | ahb_seq |= AHB_SEQ_DBL_BUF; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 435 |  | 
|  | 436 | /* In double buffered mode, we set the size to half the | 
|  | 437 | * requested size and interrupt when half the buffer | 
|  | 438 | * is full */ | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 439 | ch->req_transfer_count = (req->size >> 3) - 1; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 440 | } | 
|  | 441 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 442 | csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT; | 
|  | 443 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 444 | if (req->to_memory) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 445 | apb_ptr = req->source_addr; | 
|  | 446 | ahb_ptr = req->dest_addr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 447 |  | 
|  | 448 | apb_addr_wrap = req->source_wrap; | 
|  | 449 | ahb_addr_wrap = req->dest_wrap; | 
|  | 450 | apb_bus_width = req->source_bus_width; | 
|  | 451 | ahb_bus_width = req->dest_bus_width; | 
|  | 452 |  | 
|  | 453 | } else { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 454 | csr |= CSR_DIR; | 
|  | 455 | apb_ptr = req->dest_addr; | 
|  | 456 | ahb_ptr = req->source_addr; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 457 |  | 
|  | 458 | apb_addr_wrap = req->dest_wrap; | 
|  | 459 | ahb_addr_wrap = req->source_wrap; | 
|  | 460 | apb_bus_width = req->dest_bus_width; | 
|  | 461 | ahb_bus_width = req->source_bus_width; | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 | apb_addr_wrap >>= 2; | 
|  | 465 | ahb_addr_wrap >>= 2; | 
|  | 466 |  | 
|  | 467 | /* set address wrap for APB size */ | 
|  | 468 | index = 0; | 
|  | 469 | do  { | 
|  | 470 | if (apb_addr_wrap_table[index] == apb_addr_wrap) | 
|  | 471 | break; | 
|  | 472 | index++; | 
|  | 473 | } while (index < ARRAY_SIZE(apb_addr_wrap_table)); | 
|  | 474 | BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table)); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 475 | apb_seq |= index << APB_SEQ_WRAP_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 476 |  | 
|  | 477 | /* set address wrap for AHB size */ | 
|  | 478 | index = 0; | 
|  | 479 | do  { | 
|  | 480 | if (ahb_addr_wrap_table[index] == ahb_addr_wrap) | 
|  | 481 | break; | 
|  | 482 | index++; | 
|  | 483 | } while (index < ARRAY_SIZE(ahb_addr_wrap_table)); | 
|  | 484 | BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table)); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 485 | ahb_seq |= index << AHB_SEQ_WRAP_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 486 |  | 
|  | 487 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 
|  | 488 | if (bus_width_table[index] == ahb_bus_width) | 
|  | 489 | break; | 
|  | 490 | } | 
|  | 491 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 492 | ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 493 |  | 
|  | 494 | for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) { | 
|  | 495 | if (bus_width_table[index] == apb_bus_width) | 
|  | 496 | break; | 
|  | 497 | } | 
|  | 498 | BUG_ON(index == ARRAY_SIZE(bus_width_table)); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 499 | apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 500 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 501 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
|  | 502 | writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ); | 
|  | 503 | writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR); | 
|  | 504 | writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ); | 
|  | 505 | writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 506 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 507 | csr |= CSR_ENB; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 508 | writel(csr, ch->addr + APB_DMA_CHAN_CSR); | 
|  | 509 |  | 
|  | 510 | req->status = TEGRA_DMA_REQ_INFLIGHT; | 
|  | 511 | } | 
|  | 512 |  | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 513 | static void handle_oneshot_dma(struct tegra_dma_channel *ch) | 
|  | 514 | { | 
|  | 515 | struct tegra_dma_req *req; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 516 | unsigned long irq_flags; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 517 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 518 | spin_lock_irqsave(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 519 | if (list_empty(&ch->list)) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 520 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 521 | return; | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | req = list_entry(ch->list.next, typeof(*req), node); | 
|  | 525 | if (req) { | 
|  | 526 | int bytes_transferred; | 
|  | 527 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 528 | bytes_transferred = ch->req_transfer_count; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 529 | bytes_transferred += 1; | 
|  | 530 | bytes_transferred <<= 2; | 
|  | 531 |  | 
|  | 532 | list_del(&req->node); | 
|  | 533 | req->bytes_transferred = bytes_transferred; | 
|  | 534 | req->status = TEGRA_DMA_REQ_SUCCESS; | 
|  | 535 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 536 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 537 | /* Callback should be called without any lock */ | 
|  | 538 | pr_debug("%s: transferred %d bytes\n", __func__, | 
|  | 539 | req->bytes_transferred); | 
|  | 540 | req->complete(req); | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 541 | spin_lock_irqsave(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 542 | } | 
|  | 543 |  | 
|  | 544 | if (!list_empty(&ch->list)) { | 
|  | 545 | req = list_entry(ch->list.next, typeof(*req), node); | 
|  | 546 | /* the complete function we just called may have enqueued | 
|  | 547 | another req, in which case dma has already started */ | 
|  | 548 | if (req->status != TEGRA_DMA_REQ_INFLIGHT) | 
|  | 549 | tegra_dma_update_hw(ch, req); | 
|  | 550 | } | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 551 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 552 | } | 
|  | 553 |  | 
|  | 554 | static void handle_continuous_dma(struct tegra_dma_channel *ch) | 
|  | 555 | { | 
|  | 556 | struct tegra_dma_req *req; | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 557 | unsigned long irq_flags; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 558 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 559 | spin_lock_irqsave(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 560 | if (list_empty(&ch->list)) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 561 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 562 | return; | 
|  | 563 | } | 
|  | 564 |  | 
|  | 565 | req = list_entry(ch->list.next, typeof(*req), node); | 
|  | 566 | if (req) { | 
|  | 567 | if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) { | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 568 | bool is_dma_ping_complete; | 
|  | 569 | is_dma_ping_complete = (readl(ch->addr + APB_DMA_CHAN_STA) | 
|  | 570 | & STA_PING_PONG) ? true : false; | 
|  | 571 | if (req->to_memory) | 
|  | 572 | is_dma_ping_complete = !is_dma_ping_complete; | 
|  | 573 | /* Out of sync - Release current buffer */ | 
|  | 574 | if (!is_dma_ping_complete) { | 
|  | 575 | int bytes_transferred; | 
|  | 576 |  | 
|  | 577 | bytes_transferred = ch->req_transfer_count; | 
|  | 578 | bytes_transferred += 1; | 
|  | 579 | bytes_transferred <<= 3; | 
|  | 580 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | 
|  | 581 | req->bytes_transferred = bytes_transferred; | 
|  | 582 | req->status = TEGRA_DMA_REQ_SUCCESS; | 
|  | 583 | tegra_dma_stop(ch); | 
|  | 584 |  | 
|  | 585 | if (!list_is_last(&req->node, &ch->list)) { | 
|  | 586 | struct tegra_dma_req *next_req; | 
|  | 587 |  | 
|  | 588 | next_req = list_entry(req->node.next, | 
|  | 589 | typeof(*next_req), node); | 
|  | 590 | tegra_dma_update_hw(ch, next_req); | 
|  | 591 | } | 
|  | 592 |  | 
|  | 593 | list_del(&req->node); | 
|  | 594 |  | 
|  | 595 | /* DMA lock is NOT held when callbak is called */ | 
|  | 596 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
|  | 597 | req->complete(req); | 
|  | 598 | return; | 
|  | 599 | } | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 600 | /* Load the next request into the hardware, if available | 
|  | 601 | * */ | 
|  | 602 | if (!list_is_last(&req->node, &ch->list)) { | 
|  | 603 | struct tegra_dma_req *next_req; | 
|  | 604 |  | 
|  | 605 | next_req = list_entry(req->node.next, | 
|  | 606 | typeof(*next_req), node); | 
|  | 607 | tegra_dma_update_hw_partial(ch, next_req); | 
|  | 608 | } | 
|  | 609 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL; | 
|  | 610 | req->status = TEGRA_DMA_REQ_SUCCESS; | 
|  | 611 | /* DMA lock is NOT held when callback is called */ | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 612 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 613 | if (likely(req->threshold)) | 
|  | 614 | req->threshold(req); | 
|  | 615 | return; | 
|  | 616 |  | 
|  | 617 | } else if (req->buffer_status == | 
|  | 618 | TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) { | 
|  | 619 | /* Callback when the buffer is completely full (i.e on | 
|  | 620 | * the second  interrupt */ | 
|  | 621 | int bytes_transferred; | 
|  | 622 |  | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 623 | bytes_transferred = ch->req_transfer_count; | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 624 | bytes_transferred += 1; | 
|  | 625 | bytes_transferred <<= 3; | 
|  | 626 |  | 
|  | 627 | req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL; | 
|  | 628 | req->bytes_transferred = bytes_transferred; | 
|  | 629 | req->status = TEGRA_DMA_REQ_SUCCESS; | 
|  | 630 | list_del(&req->node); | 
|  | 631 |  | 
|  | 632 | /* DMA lock is NOT held when callbak is called */ | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 633 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 634 | req->complete(req); | 
|  | 635 | return; | 
|  | 636 |  | 
|  | 637 | } else { | 
|  | 638 | BUG(); | 
|  | 639 | } | 
|  | 640 | } | 
| Colin Cross | 5789fee | 2010-08-18 00:19:12 -0700 | [diff] [blame] | 641 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 642 | } | 
|  | 643 |  | 
|  | 644 | static irqreturn_t dma_isr(int irq, void *data) | 
|  | 645 | { | 
|  | 646 | struct tegra_dma_channel *ch = data; | 
|  | 647 | unsigned long status; | 
|  | 648 |  | 
|  | 649 | status = readl(ch->addr + APB_DMA_CHAN_STA); | 
|  | 650 | if (status & STA_ISE_EOC) | 
|  | 651 | writel(status, ch->addr + APB_DMA_CHAN_STA); | 
|  | 652 | else { | 
|  | 653 | pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id); | 
|  | 654 | return IRQ_HANDLED; | 
|  | 655 | } | 
|  | 656 | return IRQ_WAKE_THREAD; | 
|  | 657 | } | 
|  | 658 |  | 
|  | 659 | static irqreturn_t dma_thread_fn(int irq, void *data) | 
|  | 660 | { | 
|  | 661 | struct tegra_dma_channel *ch = data; | 
|  | 662 |  | 
|  | 663 | if (ch->mode & TEGRA_DMA_MODE_ONESHOT) | 
|  | 664 | handle_oneshot_dma(ch); | 
|  | 665 | else | 
|  | 666 | handle_continuous_dma(ch); | 
|  | 667 |  | 
|  | 668 |  | 
|  | 669 | return IRQ_HANDLED; | 
|  | 670 | } | 
|  | 671 |  | 
|  | 672 | int __init tegra_dma_init(void) | 
|  | 673 | { | 
|  | 674 | int ret = 0; | 
|  | 675 | int i; | 
|  | 676 | unsigned int irq; | 
|  | 677 | void __iomem *addr; | 
|  | 678 |  | 
|  | 679 | addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | 
|  | 680 | writel(GEN_ENABLE, addr + APB_DMA_GEN); | 
|  | 681 | writel(0, addr + APB_DMA_CNTRL); | 
|  | 682 | writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX), | 
|  | 683 | addr + APB_DMA_IRQ_MASK_SET); | 
|  | 684 |  | 
|  | 685 | memset(channel_usage, 0, sizeof(channel_usage)); | 
|  | 686 | memset(dma_channels, 0, sizeof(dma_channels)); | 
|  | 687 |  | 
|  | 688 | /* Reserve all the channels we are not supposed to touch */ | 
|  | 689 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++) | 
|  | 690 | __set_bit(i, channel_usage); | 
|  | 691 |  | 
|  | 692 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | 
|  | 693 | struct tegra_dma_channel *ch = &dma_channels[i]; | 
|  | 694 |  | 
|  | 695 | __clear_bit(i, channel_usage); | 
|  | 696 |  | 
|  | 697 | ch->id = i; | 
|  | 698 | snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i); | 
|  | 699 |  | 
|  | 700 | ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | 
|  | 701 | TEGRA_APB_DMA_CH0_SIZE * i); | 
|  | 702 |  | 
|  | 703 | spin_lock_init(&ch->lock); | 
|  | 704 | INIT_LIST_HEAD(&ch->list); | 
| Colin Cross | 4de3a8f | 2010-04-05 13:16:42 -0700 | [diff] [blame] | 705 |  | 
|  | 706 | irq = INT_APB_DMA_CH0 + i; | 
|  | 707 | ret = request_threaded_irq(irq, dma_isr, dma_thread_fn, 0, | 
|  | 708 | dma_channels[i].name, ch); | 
|  | 709 | if (ret) { | 
|  | 710 | pr_err("Failed to register IRQ %d for DMA %d\n", | 
|  | 711 | irq, i); | 
|  | 712 | goto fail; | 
|  | 713 | } | 
|  | 714 | ch->irq = irq; | 
|  | 715 | } | 
|  | 716 | /* mark the shared channel allocated */ | 
|  | 717 | __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage); | 
|  | 718 |  | 
|  | 719 | for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++) | 
|  | 720 | __set_bit(i, channel_usage); | 
|  | 721 |  | 
|  | 722 | return ret; | 
|  | 723 | fail: | 
|  | 724 | writel(0, addr + APB_DMA_GEN); | 
|  | 725 | for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) { | 
|  | 726 | struct tegra_dma_channel *ch = &dma_channels[i]; | 
|  | 727 | if (ch->irq) | 
|  | 728 | free_irq(ch->irq, ch); | 
|  | 729 | } | 
|  | 730 | return ret; | 
|  | 731 | } | 
|  | 732 |  | 
|  | 733 | #ifdef CONFIG_PM | 
|  | 734 | static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3]; | 
|  | 735 |  | 
|  | 736 | void tegra_dma_suspend(void) | 
|  | 737 | { | 
|  | 738 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | 
|  | 739 | u32 *ctx = apb_dma; | 
|  | 740 | int i; | 
|  | 741 |  | 
|  | 742 | *ctx++ = readl(addr + APB_DMA_GEN); | 
|  | 743 | *ctx++ = readl(addr + APB_DMA_CNTRL); | 
|  | 744 | *ctx++ = readl(addr + APB_DMA_IRQ_MASK); | 
|  | 745 |  | 
|  | 746 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | 
|  | 747 | addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | 
|  | 748 | TEGRA_APB_DMA_CH0_SIZE * i); | 
|  | 749 |  | 
|  | 750 | *ctx++ = readl(addr + APB_DMA_CHAN_CSR); | 
|  | 751 | *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR); | 
|  | 752 | *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ); | 
|  | 753 | *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR); | 
|  | 754 | *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ); | 
|  | 755 | } | 
|  | 756 | } | 
|  | 757 |  | 
|  | 758 | void tegra_dma_resume(void) | 
|  | 759 | { | 
|  | 760 | void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE); | 
|  | 761 | u32 *ctx = apb_dma; | 
|  | 762 | int i; | 
|  | 763 |  | 
|  | 764 | writel(*ctx++, addr + APB_DMA_GEN); | 
|  | 765 | writel(*ctx++, addr + APB_DMA_CNTRL); | 
|  | 766 | writel(*ctx++, addr + APB_DMA_IRQ_MASK); | 
|  | 767 |  | 
|  | 768 | for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) { | 
|  | 769 | addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE + | 
|  | 770 | TEGRA_APB_DMA_CH0_SIZE * i); | 
|  | 771 |  | 
|  | 772 | writel(*ctx++, addr + APB_DMA_CHAN_CSR); | 
|  | 773 | writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR); | 
|  | 774 | writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ); | 
|  | 775 | writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR); | 
|  | 776 | writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ); | 
|  | 777 | } | 
|  | 778 | } | 
|  | 779 |  | 
|  | 780 | #endif |