| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 1 | /* | 
 | 2 |  * timb_dma.c timberdale FPGA DMA driver | 
 | 3 |  * Copyright (c) 2010 Intel Corporation | 
 | 4 |  * | 
 | 5 |  * This program is free software; you can redistribute it and/or modify | 
 | 6 |  * it under the terms of the GNU General Public License version 2 as | 
 | 7 |  * published by the Free Software Foundation. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License | 
 | 15 |  * along with this program; if not, write to the Free Software | 
 | 16 |  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 
 | 17 |  */ | 
 | 18 |  | 
 | 19 | /* Supports: | 
 | 20 |  * Timberdale FPGA DMA engine | 
 | 21 |  */ | 
 | 22 |  | 
 | 23 | #include <linux/dmaengine.h> | 
 | 24 | #include <linux/dma-mapping.h> | 
 | 25 | #include <linux/init.h> | 
 | 26 | #include <linux/interrupt.h> | 
 | 27 | #include <linux/io.h> | 
 | 28 | #include <linux/module.h> | 
 | 29 | #include <linux/platform_device.h> | 
| Stephen Rothwell | 6a3cd3e | 2010-03-29 15:54:40 +1100 | [diff] [blame] | 30 | #include <linux/slab.h> | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 31 |  | 
 | 32 | #include <linux/timb_dma.h> | 
 | 33 |  | 
| Russell King - ARM Linux | d2ebfb3 | 2012-03-06 22:34:26 +0000 | [diff] [blame] | 34 | #include "dmaengine.h" | 
 | 35 |  | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 36 | #define DRIVER_NAME "timb-dma" | 
 | 37 |  | 
 | 38 | /* Global DMA registers */ | 
 | 39 | #define TIMBDMA_ACR		0x34 | 
 | 40 | #define TIMBDMA_32BIT_ADDR	0x01 | 
 | 41 |  | 
 | 42 | #define TIMBDMA_ISR		0x080000 | 
 | 43 | #define TIMBDMA_IPR		0x080004 | 
 | 44 | #define TIMBDMA_IER		0x080008 | 
 | 45 |  | 
 | 46 | /* Channel specific registers */ | 
 | 47 | /* RX instances base addresses are 0x00, 0x40, 0x80 ... | 
 | 48 |  * TX instances base addresses are 0x18, 0x58, 0x98 ... | 
 | 49 |  */ | 
 | 50 | #define TIMBDMA_INSTANCE_OFFSET		0x40 | 
 | 51 | #define TIMBDMA_INSTANCE_TX_OFFSET	0x18 | 
 | 52 |  | 
 | 53 | /* RX registers, relative the instance base */ | 
 | 54 | #define TIMBDMA_OFFS_RX_DHAR	0x00 | 
 | 55 | #define TIMBDMA_OFFS_RX_DLAR	0x04 | 
 | 56 | #define TIMBDMA_OFFS_RX_LR	0x0C | 
 | 57 | #define TIMBDMA_OFFS_RX_BLR	0x10 | 
 | 58 | #define TIMBDMA_OFFS_RX_ER	0x14 | 
 | 59 | #define TIMBDMA_RX_EN		0x01 | 
 | 60 | /* bytes per Row, video specific register | 
 | 61 |  * which is placed after the TX registers... | 
 | 62 |  */ | 
 | 63 | #define TIMBDMA_OFFS_RX_BPRR	0x30 | 
 | 64 |  | 
 | 65 | /* TX registers, relative the instance base */ | 
 | 66 | #define TIMBDMA_OFFS_TX_DHAR	0x00 | 
 | 67 | #define TIMBDMA_OFFS_TX_DLAR	0x04 | 
 | 68 | #define TIMBDMA_OFFS_TX_BLR	0x0C | 
 | 69 | #define TIMBDMA_OFFS_TX_LR	0x14 | 
 | 70 |  | 
 | 71 |  | 
 | 72 | #define TIMB_DMA_DESC_SIZE	8 | 
 | 73 |  | 
 | 74 | struct timb_dma_desc { | 
 | 75 | 	struct list_head		desc_node; | 
 | 76 | 	struct dma_async_tx_descriptor	txd; | 
 | 77 | 	u8				*desc_list; | 
 | 78 | 	unsigned int			desc_list_len; | 
 | 79 | 	bool				interrupt; | 
 | 80 | }; | 
 | 81 |  | 
 | 82 | struct timb_dma_chan { | 
 | 83 | 	struct dma_chan		chan; | 
 | 84 | 	void __iomem		*membase; | 
| Richard Röjfors | 0f65169 | 2010-03-26 08:23:58 +0100 | [diff] [blame] | 85 | 	spinlock_t		lock; /* Used to protect data structures, | 
 | 86 | 					especially the lists and descriptors, | 
 | 87 | 					from races between the tasklet and calls | 
 | 88 | 					from above */ | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 89 | 	bool			ongoing; | 
 | 90 | 	struct list_head	active_list; | 
 | 91 | 	struct list_head	queue; | 
 | 92 | 	struct list_head	free_list; | 
 | 93 | 	unsigned int		bytes_per_line; | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 94 | 	enum dma_transfer_direction	direction; | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 95 | 	unsigned int		descs; /* Descriptors to allocate */ | 
 | 96 | 	unsigned int		desc_elems; /* number of elems per descriptor */ | 
 | 97 | }; | 
 | 98 |  | 
 | 99 | struct timb_dma { | 
 | 100 | 	struct dma_device	dma; | 
 | 101 | 	void __iomem		*membase; | 
 | 102 | 	struct tasklet_struct	tasklet; | 
 | 103 | 	struct timb_dma_chan	channels[0]; | 
 | 104 | }; | 
 | 105 |  | 
 | 106 | static struct device *chan2dev(struct dma_chan *chan) | 
 | 107 | { | 
 | 108 | 	return &chan->dev->device; | 
 | 109 | } | 
 | 110 | static struct device *chan2dmadev(struct dma_chan *chan) | 
 | 111 | { | 
 | 112 | 	return chan2dev(chan)->parent->parent; | 
 | 113 | } | 
 | 114 |  | 
 | 115 | static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) | 
 | 116 | { | 
 | 117 | 	int id = td_chan->chan.chan_id; | 
 | 118 | 	return (struct timb_dma *)((u8 *)td_chan - | 
 | 119 | 		id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | 
 | 120 | } | 
 | 121 |  | 
 | 122 | /* Must be called with the spinlock held */ | 
 | 123 | static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) | 
 | 124 | { | 
 | 125 | 	int id = td_chan->chan.chan_id; | 
 | 126 | 	struct timb_dma *td = tdchantotd(td_chan); | 
 | 127 | 	u32 ier; | 
 | 128 |  | 
 | 129 | 	/* enable interrupt for this channel */ | 
 | 130 | 	ier = ioread32(td->membase + TIMBDMA_IER); | 
 | 131 | 	ier |= 1 << id; | 
 | 132 | 	dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, | 
 | 133 | 		ier); | 
 | 134 | 	iowrite32(ier, td->membase + TIMBDMA_IER); | 
 | 135 | } | 
 | 136 |  | 
 | 137 | /* Should be called with the spinlock held */ | 
 | 138 | static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) | 
 | 139 | { | 
 | 140 | 	int id = td_chan->chan.chan_id; | 
 | 141 | 	struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - | 
 | 142 | 		id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | 
 | 143 | 	u32 isr; | 
 | 144 | 	bool done = false; | 
 | 145 |  | 
 | 146 | 	dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); | 
 | 147 |  | 
 | 148 | 	isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); | 
 | 149 | 	if (isr) { | 
 | 150 | 		iowrite32(isr, td->membase + TIMBDMA_ISR); | 
 | 151 | 		done = true; | 
 | 152 | 	} | 
 | 153 |  | 
 | 154 | 	return done; | 
 | 155 | } | 
 | 156 |  | 
 | 157 | static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | 
 | 158 | 	bool single) | 
 | 159 | { | 
 | 160 | 	dma_addr_t addr; | 
 | 161 | 	int len; | 
 | 162 |  | 
 | 163 | 	addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | | 
 | 164 | 		dma_desc[4]; | 
 | 165 |  | 
 | 166 | 	len = (dma_desc[3] << 8) | dma_desc[2]; | 
 | 167 |  | 
 | 168 | 	if (single) | 
 | 169 | 		dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | 
| Vinod Koul | d561394 | 2011-11-28 08:51:16 +0530 | [diff] [blame] | 170 | 			DMA_TO_DEVICE); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 171 | 	else | 
 | 172 | 		dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | 
| Vinod Koul | d561394 | 2011-11-28 08:51:16 +0530 | [diff] [blame] | 173 | 			DMA_TO_DEVICE); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 174 | } | 
 | 175 |  | 
 | 176 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | 
 | 177 | { | 
 | 178 | 	struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, | 
 | 179 | 		struct timb_dma_chan, chan); | 
 | 180 | 	u8 *descs; | 
 | 181 |  | 
 | 182 | 	for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { | 
 | 183 | 		__td_unmap_desc(td_chan, descs, single); | 
 | 184 | 		if (descs[0] & 0x02) | 
 | 185 | 			break; | 
 | 186 | 	} | 
 | 187 | } | 
 | 188 |  | 
 | 189 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, | 
 | 190 | 	struct scatterlist *sg, bool last) | 
 | 191 | { | 
| Alexey Dobriyan | 4be929b | 2010-05-24 14:33:03 -0700 | [diff] [blame] | 192 | 	if (sg_dma_len(sg) > USHRT_MAX) { | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 193 | 		dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); | 
 | 194 | 		return -EINVAL; | 
 | 195 | 	} | 
 | 196 |  | 
 | 197 | 	/* length must be word aligned */ | 
 | 198 | 	if (sg_dma_len(sg) % sizeof(u32)) { | 
 | 199 | 		dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", | 
 | 200 | 			sg_dma_len(sg)); | 
 | 201 | 		return -EINVAL; | 
 | 202 | 	} | 
 | 203 |  | 
| Dan Carpenter | efcc289 | 2010-05-25 11:55:06 +0200 | [diff] [blame] | 204 | 	dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n", | 
 | 205 | 		dma_desc, (unsigned long long)sg_dma_address(sg)); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 206 |  | 
 | 207 | 	dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; | 
 | 208 | 	dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; | 
 | 209 | 	dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; | 
 | 210 | 	dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; | 
 | 211 |  | 
 | 212 | 	dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; | 
 | 213 | 	dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; | 
 | 214 |  | 
 | 215 | 	dma_desc[1] = 0x00; | 
 | 216 | 	dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ | 
 | 217 |  | 
 | 218 | 	return 0; | 
 | 219 | } | 
 | 220 |  | 
 | 221 | /* Must be called with the spinlock held */ | 
 | 222 | static void __td_start_dma(struct timb_dma_chan *td_chan) | 
 | 223 | { | 
 | 224 | 	struct timb_dma_desc *td_desc; | 
 | 225 |  | 
 | 226 | 	if (td_chan->ongoing) { | 
 | 227 | 		dev_err(chan2dev(&td_chan->chan), | 
 | 228 | 			"Transfer already ongoing\n"); | 
 | 229 | 		return; | 
 | 230 | 	} | 
 | 231 |  | 
 | 232 | 	td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | 
 | 233 | 		desc_node); | 
 | 234 |  | 
 | 235 | 	dev_dbg(chan2dev(&td_chan->chan), | 
 | 236 | 		"td_chan: %p, chan: %d, membase: %p\n", | 
 | 237 | 		td_chan, td_chan->chan.chan_id, td_chan->membase); | 
 | 238 |  | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 239 | 	if (td_chan->direction == DMA_DEV_TO_MEM) { | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 240 |  | 
 | 241 | 		/* descriptor address */ | 
 | 242 | 		iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); | 
 | 243 | 		iowrite32(td_desc->txd.phys, td_chan->membase + | 
 | 244 | 			TIMBDMA_OFFS_RX_DLAR); | 
 | 245 | 		/* Bytes per line */ | 
 | 246 | 		iowrite32(td_chan->bytes_per_line, td_chan->membase + | 
 | 247 | 			TIMBDMA_OFFS_RX_BPRR); | 
 | 248 | 		/* enable RX */ | 
 | 249 | 		iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); | 
 | 250 | 	} else { | 
 | 251 | 		/* address high */ | 
 | 252 | 		iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); | 
 | 253 | 		iowrite32(td_desc->txd.phys, td_chan->membase + | 
 | 254 | 			TIMBDMA_OFFS_TX_DLAR); | 
 | 255 | 	} | 
 | 256 |  | 
 | 257 | 	td_chan->ongoing = true; | 
 | 258 |  | 
 | 259 | 	if (td_desc->interrupt) | 
 | 260 | 		__td_enable_chan_irq(td_chan); | 
 | 261 | } | 
 | 262 |  | 
 | 263 | static void __td_finish(struct timb_dma_chan *td_chan) | 
 | 264 | { | 
 | 265 | 	dma_async_tx_callback		callback; | 
 | 266 | 	void				*param; | 
 | 267 | 	struct dma_async_tx_descriptor	*txd; | 
 | 268 | 	struct timb_dma_desc		*td_desc; | 
 | 269 |  | 
 | 270 | 	/* can happen if the descriptor is canceled */ | 
 | 271 | 	if (list_empty(&td_chan->active_list)) | 
 | 272 | 		return; | 
 | 273 |  | 
 | 274 | 	td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | 
 | 275 | 		desc_node); | 
 | 276 | 	txd = &td_desc->txd; | 
 | 277 |  | 
 | 278 | 	dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", | 
 | 279 | 		txd->cookie); | 
 | 280 |  | 
 | 281 | 	/* make sure to stop the transfer */ | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 282 | 	if (td_chan->direction == DMA_DEV_TO_MEM) | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 283 | 		iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); | 
 | 284 | /* Currently no support for stopping DMA transfers | 
 | 285 | 	else | 
 | 286 | 		iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); | 
 | 287 | */ | 
| Russell King - ARM Linux | f7fbce0 | 2012-03-06 22:35:07 +0000 | [diff] [blame] | 288 | 	dma_cookie_complete(txd); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 289 | 	td_chan->ongoing = false; | 
 | 290 |  | 
 | 291 | 	callback = txd->callback; | 
 | 292 | 	param = txd->callback_param; | 
 | 293 |  | 
 | 294 | 	list_move(&td_desc->desc_node, &td_chan->free_list); | 
 | 295 |  | 
 | 296 | 	if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | 
 | 297 | 		__td_unmap_descs(td_desc, | 
 | 298 | 			txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); | 
 | 299 |  | 
 | 300 | 	/* | 
 | 301 | 	 * The API requires that no submissions are done from a | 
 | 302 | 	 * callback, so we don't need to drop the lock here | 
 | 303 | 	 */ | 
 | 304 | 	if (callback) | 
 | 305 | 		callback(param); | 
 | 306 | } | 
 | 307 |  | 
 | 308 | static u32 __td_ier_mask(struct timb_dma *td) | 
 | 309 | { | 
 | 310 | 	int i; | 
 | 311 | 	u32 ret = 0; | 
 | 312 |  | 
 | 313 | 	for (i = 0; i < td->dma.chancnt; i++) { | 
 | 314 | 		struct timb_dma_chan *td_chan = td->channels + i; | 
 | 315 | 		if (td_chan->ongoing) { | 
 | 316 | 			struct timb_dma_desc *td_desc = | 
 | 317 | 				list_entry(td_chan->active_list.next, | 
 | 318 | 				struct timb_dma_desc, desc_node); | 
 | 319 | 			if (td_desc->interrupt) | 
 | 320 | 				ret |= 1 << i; | 
 | 321 | 		} | 
 | 322 | 	} | 
 | 323 |  | 
 | 324 | 	return ret; | 
 | 325 | } | 
 | 326 |  | 
 | 327 | static void __td_start_next(struct timb_dma_chan *td_chan) | 
 | 328 | { | 
 | 329 | 	struct timb_dma_desc *td_desc; | 
 | 330 |  | 
 | 331 | 	BUG_ON(list_empty(&td_chan->queue)); | 
 | 332 | 	BUG_ON(td_chan->ongoing); | 
 | 333 |  | 
 | 334 | 	td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, | 
 | 335 | 		desc_node); | 
 | 336 |  | 
 | 337 | 	dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", | 
 | 338 | 		__func__, td_desc->txd.cookie); | 
 | 339 |  | 
 | 340 | 	list_move(&td_desc->desc_node, &td_chan->active_list); | 
 | 341 | 	__td_start_dma(td_chan); | 
 | 342 | } | 
 | 343 |  | 
 | 344 | static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | 
 | 345 | { | 
 | 346 | 	struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, | 
 | 347 | 		txd); | 
 | 348 | 	struct timb_dma_chan *td_chan = container_of(txd->chan, | 
 | 349 | 		struct timb_dma_chan, chan); | 
 | 350 | 	dma_cookie_t cookie; | 
 | 351 |  | 
 | 352 | 	spin_lock_bh(&td_chan->lock); | 
| Russell King - ARM Linux | 884485e | 2012-03-06 22:34:46 +0000 | [diff] [blame] | 353 | 	cookie = dma_cookie_assign(txd); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 354 |  | 
 | 355 | 	if (list_empty(&td_chan->active_list)) { | 
 | 356 | 		dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | 
 | 357 | 			txd->cookie); | 
 | 358 | 		list_add_tail(&td_desc->desc_node, &td_chan->active_list); | 
 | 359 | 		__td_start_dma(td_chan); | 
 | 360 | 	} else { | 
 | 361 | 		dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", | 
 | 362 | 			txd->cookie); | 
 | 363 |  | 
 | 364 | 		list_add_tail(&td_desc->desc_node, &td_chan->queue); | 
 | 365 | 	} | 
 | 366 |  | 
 | 367 | 	spin_unlock_bh(&td_chan->lock); | 
 | 368 |  | 
 | 369 | 	return cookie; | 
 | 370 | } | 
 | 371 |  | 
 | 372 | static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) | 
 | 373 | { | 
 | 374 | 	struct dma_chan *chan = &td_chan->chan; | 
 | 375 | 	struct timb_dma_desc *td_desc; | 
 | 376 | 	int err; | 
 | 377 |  | 
 | 378 | 	td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); | 
 | 379 | 	if (!td_desc) { | 
 | 380 | 		dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | 
| Julia Lawall | 4856800 | 2010-05-27 14:33:17 +0200 | [diff] [blame] | 381 | 		goto out; | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 382 | 	} | 
 | 383 |  | 
 | 384 | 	td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; | 
 | 385 |  | 
 | 386 | 	td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); | 
 | 387 | 	if (!td_desc->desc_list) { | 
 | 388 | 		dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | 
 | 389 | 		goto err; | 
 | 390 | 	} | 
 | 391 |  | 
 | 392 | 	dma_async_tx_descriptor_init(&td_desc->txd, chan); | 
 | 393 | 	td_desc->txd.tx_submit = td_tx_submit; | 
 | 394 | 	td_desc->txd.flags = DMA_CTRL_ACK; | 
 | 395 |  | 
 | 396 | 	td_desc->txd.phys = dma_map_single(chan2dmadev(chan), | 
| Vinod Koul | d561394 | 2011-11-28 08:51:16 +0530 | [diff] [blame] | 397 | 		td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 398 |  | 
 | 399 | 	err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); | 
 | 400 | 	if (err) { | 
 | 401 | 		dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); | 
 | 402 | 		goto err; | 
 | 403 | 	} | 
 | 404 |  | 
 | 405 | 	return td_desc; | 
 | 406 | err: | 
 | 407 | 	kfree(td_desc->desc_list); | 
 | 408 | 	kfree(td_desc); | 
| Julia Lawall | 4856800 | 2010-05-27 14:33:17 +0200 | [diff] [blame] | 409 | out: | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 410 | 	return NULL; | 
 | 411 |  | 
 | 412 | } | 
 | 413 |  | 
 | 414 | static void td_free_desc(struct timb_dma_desc *td_desc) | 
 | 415 | { | 
 | 416 | 	dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); | 
 | 417 | 	dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, | 
| Vinod Koul | d561394 | 2011-11-28 08:51:16 +0530 | [diff] [blame] | 418 | 		td_desc->desc_list_len, DMA_TO_DEVICE); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 419 |  | 
 | 420 | 	kfree(td_desc->desc_list); | 
 | 421 | 	kfree(td_desc); | 
 | 422 | } | 
 | 423 |  | 
 | 424 | static void td_desc_put(struct timb_dma_chan *td_chan, | 
 | 425 | 	struct timb_dma_desc *td_desc) | 
 | 426 | { | 
 | 427 | 	dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); | 
 | 428 |  | 
 | 429 | 	spin_lock_bh(&td_chan->lock); | 
 | 430 | 	list_add(&td_desc->desc_node, &td_chan->free_list); | 
 | 431 | 	spin_unlock_bh(&td_chan->lock); | 
 | 432 | } | 
 | 433 |  | 
 | 434 | static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) | 
 | 435 | { | 
 | 436 | 	struct timb_dma_desc *td_desc, *_td_desc; | 
 | 437 | 	struct timb_dma_desc *ret = NULL; | 
 | 438 |  | 
 | 439 | 	spin_lock_bh(&td_chan->lock); | 
 | 440 | 	list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, | 
 | 441 | 		desc_node) { | 
 | 442 | 		if (async_tx_test_ack(&td_desc->txd)) { | 
 | 443 | 			list_del(&td_desc->desc_node); | 
 | 444 | 			ret = td_desc; | 
 | 445 | 			break; | 
 | 446 | 		} | 
 | 447 | 		dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", | 
 | 448 | 			td_desc); | 
 | 449 | 	} | 
 | 450 | 	spin_unlock_bh(&td_chan->lock); | 
 | 451 |  | 
 | 452 | 	return ret; | 
 | 453 | } | 
 | 454 |  | 
 | 455 | static int td_alloc_chan_resources(struct dma_chan *chan) | 
 | 456 | { | 
 | 457 | 	struct timb_dma_chan *td_chan = | 
 | 458 | 		container_of(chan, struct timb_dma_chan, chan); | 
 | 459 | 	int i; | 
 | 460 |  | 
 | 461 | 	dev_dbg(chan2dev(chan), "%s: entry\n", __func__); | 
 | 462 |  | 
 | 463 | 	BUG_ON(!list_empty(&td_chan->free_list)); | 
 | 464 | 	for (i = 0; i < td_chan->descs; i++) { | 
 | 465 | 		struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); | 
 | 466 | 		if (!td_desc) { | 
 | 467 | 			if (i) | 
 | 468 | 				break; | 
 | 469 | 			else { | 
 | 470 | 				dev_err(chan2dev(chan), | 
 | 471 | 					"Couldnt allocate any descriptors\n"); | 
 | 472 | 				return -ENOMEM; | 
 | 473 | 			} | 
 | 474 | 		} | 
 | 475 |  | 
 | 476 | 		td_desc_put(td_chan, td_desc); | 
 | 477 | 	} | 
 | 478 |  | 
 | 479 | 	spin_lock_bh(&td_chan->lock); | 
| Russell King - ARM Linux | d3ee98cdc | 2012-03-06 22:35:47 +0000 | [diff] [blame] | 480 | 	dma_cookie_init(chan); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 481 | 	spin_unlock_bh(&td_chan->lock); | 
 | 482 |  | 
 | 483 | 	return 0; | 
 | 484 | } | 
 | 485 |  | 
 | 486 | static void td_free_chan_resources(struct dma_chan *chan) | 
 | 487 | { | 
 | 488 | 	struct timb_dma_chan *td_chan = | 
 | 489 | 		container_of(chan, struct timb_dma_chan, chan); | 
 | 490 | 	struct timb_dma_desc *td_desc, *_td_desc; | 
 | 491 | 	LIST_HEAD(list); | 
 | 492 |  | 
 | 493 | 	dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 
 | 494 |  | 
 | 495 | 	/* check that all descriptors are free */ | 
 | 496 | 	BUG_ON(!list_empty(&td_chan->active_list)); | 
 | 497 | 	BUG_ON(!list_empty(&td_chan->queue)); | 
 | 498 |  | 
 | 499 | 	spin_lock_bh(&td_chan->lock); | 
 | 500 | 	list_splice_init(&td_chan->free_list, &list); | 
 | 501 | 	spin_unlock_bh(&td_chan->lock); | 
 | 502 |  | 
 | 503 | 	list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { | 
 | 504 | 		dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, | 
 | 505 | 			td_desc); | 
 | 506 | 		td_free_desc(td_desc); | 
 | 507 | 	} | 
 | 508 | } | 
 | 509 |  | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 510 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 
 | 511 | 				    struct dma_tx_state *txstate) | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 512 | { | 
| Russell King - ARM Linux | 96a2af4 | 2012-03-06 22:35:27 +0000 | [diff] [blame] | 513 | 	enum dma_status ret; | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 514 |  | 
 | 515 | 	dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 
 | 516 |  | 
| Russell King - ARM Linux | 96a2af4 | 2012-03-06 22:35:27 +0000 | [diff] [blame] | 517 | 	ret = dma_cookie_status(chan, cookie, txstate); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 518 |  | 
| Vinod Koul | 949ff5b | 2012-03-13 11:58:12 +0530 | [diff] [blame] | 519 | 	dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", 	__func__, ret); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 520 |  | 
 | 521 | 	return ret; | 
 | 522 | } | 
 | 523 |  | 
 | 524 | static void td_issue_pending(struct dma_chan *chan) | 
 | 525 | { | 
 | 526 | 	struct timb_dma_chan *td_chan = | 
 | 527 | 		container_of(chan, struct timb_dma_chan, chan); | 
 | 528 |  | 
 | 529 | 	dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 
 | 530 | 	spin_lock_bh(&td_chan->lock); | 
 | 531 |  | 
 | 532 | 	if (!list_empty(&td_chan->active_list)) | 
 | 533 | 		/* transfer ongoing */ | 
 | 534 | 		if (__td_dma_done_ack(td_chan)) | 
 | 535 | 			__td_finish(td_chan); | 
 | 536 |  | 
 | 537 | 	if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) | 
 | 538 | 		__td_start_next(td_chan); | 
 | 539 |  | 
 | 540 | 	spin_unlock_bh(&td_chan->lock); | 
 | 541 | } | 
 | 542 |  | 
 | 543 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | 
 | 544 | 	struct scatterlist *sgl, unsigned int sg_len, | 
| Alexandre Bounine | 185ecb5 | 2012-03-08 15:35:13 -0500 | [diff] [blame] | 545 | 	enum dma_transfer_direction direction, unsigned long flags, | 
 | 546 | 	void *context) | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 547 | { | 
 | 548 | 	struct timb_dma_chan *td_chan = | 
 | 549 | 		container_of(chan, struct timb_dma_chan, chan); | 
 | 550 | 	struct timb_dma_desc *td_desc; | 
 | 551 | 	struct scatterlist *sg; | 
 | 552 | 	unsigned int i; | 
 | 553 | 	unsigned int desc_usage = 0; | 
 | 554 |  | 
 | 555 | 	if (!sgl || !sg_len) { | 
 | 556 | 		dev_err(chan2dev(chan), "%s: No SG list\n", __func__); | 
 | 557 | 		return NULL; | 
 | 558 | 	} | 
 | 559 |  | 
 | 560 | 	/* even channels are for RX, odd for TX */ | 
 | 561 | 	if (td_chan->direction != direction) { | 
 | 562 | 		dev_err(chan2dev(chan), | 
 | 563 | 			"Requesting channel in wrong direction\n"); | 
 | 564 | 		return NULL; | 
 | 565 | 	} | 
 | 566 |  | 
 | 567 | 	td_desc = td_desc_get(td_chan); | 
 | 568 | 	if (!td_desc) { | 
 | 569 | 		dev_err(chan2dev(chan), "Not enough descriptors available\n"); | 
 | 570 | 		return NULL; | 
 | 571 | 	} | 
 | 572 |  | 
 | 573 | 	td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; | 
 | 574 |  | 
 | 575 | 	for_each_sg(sgl, sg, sg_len, i) { | 
 | 576 | 		int err; | 
 | 577 | 		if (desc_usage > td_desc->desc_list_len) { | 
 | 578 | 			dev_err(chan2dev(chan), "No descriptor space\n"); | 
 | 579 | 			return NULL; | 
 | 580 | 		} | 
 | 581 |  | 
 | 582 | 		err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, | 
 | 583 | 			i == (sg_len - 1)); | 
 | 584 | 		if (err) { | 
 | 585 | 			dev_err(chan2dev(chan), "Failed to update desc: %d\n", | 
 | 586 | 				err); | 
 | 587 | 			td_desc_put(td_chan, td_desc); | 
 | 588 | 			return NULL; | 
 | 589 | 		} | 
 | 590 | 		desc_usage += TIMB_DMA_DESC_SIZE; | 
 | 591 | 	} | 
 | 592 |  | 
 | 593 | 	dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 594 | 		td_desc->desc_list_len, DMA_MEM_TO_DEV); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 595 |  | 
 | 596 | 	return &td_desc->txd; | 
 | 597 | } | 
 | 598 |  | 
| Linus Walleij | 0582763 | 2010-05-17 16:30:42 -0700 | [diff] [blame] | 599 | static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 
 | 600 | 		      unsigned long arg) | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 601 | { | 
 | 602 | 	struct timb_dma_chan *td_chan = | 
 | 603 | 		container_of(chan, struct timb_dma_chan, chan); | 
 | 604 | 	struct timb_dma_desc *td_desc, *_td_desc; | 
 | 605 |  | 
 | 606 | 	dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 
 | 607 |  | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 608 | 	if (cmd != DMA_TERMINATE_ALL) | 
 | 609 | 		return -ENXIO; | 
 | 610 |  | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 611 | 	/* first the easy part, put the queue into the free list */ | 
 | 612 | 	spin_lock_bh(&td_chan->lock); | 
 | 613 | 	list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, | 
 | 614 | 		desc_node) | 
 | 615 | 		list_move(&td_desc->desc_node, &td_chan->free_list); | 
 | 616 |  | 
| Justin P. Mattock | ae0e47f | 2011-03-01 15:06:02 +0100 | [diff] [blame] | 617 | 	/* now tear down the running */ | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 618 | 	__td_finish(td_chan); | 
 | 619 | 	spin_unlock_bh(&td_chan->lock); | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 620 |  | 
 | 621 | 	return 0; | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 622 | } | 
 | 623 |  | 
 | 624 | static void td_tasklet(unsigned long data) | 
 | 625 | { | 
 | 626 | 	struct timb_dma *td = (struct timb_dma *)data; | 
 | 627 | 	u32 isr; | 
 | 628 | 	u32 ipr; | 
 | 629 | 	u32 ier; | 
 | 630 | 	int i; | 
 | 631 |  | 
 | 632 | 	isr = ioread32(td->membase + TIMBDMA_ISR); | 
 | 633 | 	ipr = isr & __td_ier_mask(td); | 
 | 634 |  | 
 | 635 | 	/* ack the interrupts */ | 
 | 636 | 	iowrite32(ipr, td->membase + TIMBDMA_ISR); | 
 | 637 |  | 
 | 638 | 	for (i = 0; i < td->dma.chancnt; i++) | 
 | 639 | 		if (ipr & (1 << i)) { | 
 | 640 | 			struct timb_dma_chan *td_chan = td->channels + i; | 
 | 641 | 			spin_lock(&td_chan->lock); | 
 | 642 | 			__td_finish(td_chan); | 
 | 643 | 			if (!list_empty(&td_chan->queue)) | 
 | 644 | 				__td_start_next(td_chan); | 
 | 645 | 			spin_unlock(&td_chan->lock); | 
 | 646 | 		} | 
 | 647 |  | 
 | 648 | 	ier = __td_ier_mask(td); | 
 | 649 | 	iowrite32(ier, td->membase + TIMBDMA_IER); | 
 | 650 | } | 
 | 651 |  | 
 | 652 |  | 
 | 653 | static irqreturn_t td_irq(int irq, void *devid) | 
 | 654 | { | 
 | 655 | 	struct timb_dma *td = devid; | 
 | 656 | 	u32 ipr = ioread32(td->membase + TIMBDMA_IPR); | 
 | 657 |  | 
 | 658 | 	if (ipr) { | 
 | 659 | 		/* disable interrupts, will be re-enabled in tasklet */ | 
 | 660 | 		iowrite32(0, td->membase + TIMBDMA_IER); | 
 | 661 |  | 
 | 662 | 		tasklet_schedule(&td->tasklet); | 
 | 663 |  | 
 | 664 | 		return IRQ_HANDLED; | 
 | 665 | 	} else | 
 | 666 | 		return IRQ_NONE; | 
 | 667 | } | 
 | 668 |  | 
 | 669 |  | 
| Bill Pemberton | 463a1f8 | 2012-11-19 13:22:55 -0500 | [diff] [blame] | 670 | static int td_probe(struct platform_device *pdev) | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 671 | { | 
| Jingoo Han | d4adcc0 | 2013-07-30 17:09:11 +0900 | [diff] [blame] | 672 | 	struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 673 | 	struct timb_dma *td; | 
 | 674 | 	struct resource *iomem; | 
 | 675 | 	int irq; | 
 | 676 | 	int err; | 
 | 677 | 	int i; | 
 | 678 |  | 
 | 679 | 	if (!pdata) { | 
 | 680 | 		dev_err(&pdev->dev, "No platform data\n"); | 
 | 681 | 		return -EINVAL; | 
 | 682 | 	} | 
 | 683 |  | 
 | 684 | 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 685 | 	if (!iomem) | 
 | 686 | 		return -EINVAL; | 
 | 687 |  | 
 | 688 | 	irq = platform_get_irq(pdev, 0); | 
 | 689 | 	if (irq < 0) | 
 | 690 | 		return irq; | 
 | 691 |  | 
 | 692 | 	if (!request_mem_region(iomem->start, resource_size(iomem), | 
 | 693 | 		DRIVER_NAME)) | 
 | 694 | 		return -EBUSY; | 
 | 695 |  | 
 | 696 | 	td  = kzalloc(sizeof(struct timb_dma) + | 
 | 697 | 		sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); | 
 | 698 | 	if (!td) { | 
 | 699 | 		err = -ENOMEM; | 
 | 700 | 		goto err_release_region; | 
 | 701 | 	} | 
 | 702 |  | 
 | 703 | 	dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); | 
 | 704 |  | 
 | 705 | 	td->membase = ioremap(iomem->start, resource_size(iomem)); | 
 | 706 | 	if (!td->membase) { | 
 | 707 | 		dev_err(&pdev->dev, "Failed to remap I/O memory\n"); | 
 | 708 | 		err = -ENOMEM; | 
 | 709 | 		goto err_free_mem; | 
 | 710 | 	} | 
 | 711 |  | 
 | 712 | 	/* 32bit addressing */ | 
 | 713 | 	iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); | 
 | 714 |  | 
 | 715 | 	/* disable and clear any interrupts */ | 
 | 716 | 	iowrite32(0x0, td->membase + TIMBDMA_IER); | 
 | 717 | 	iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); | 
 | 718 |  | 
 | 719 | 	tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); | 
 | 720 |  | 
 | 721 | 	err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); | 
 | 722 | 	if (err) { | 
 | 723 | 		dev_err(&pdev->dev, "Failed to request IRQ\n"); | 
 | 724 | 		goto err_tasklet_kill; | 
 | 725 | 	} | 
 | 726 |  | 
 | 727 | 	td->dma.device_alloc_chan_resources	= td_alloc_chan_resources; | 
 | 728 | 	td->dma.device_free_chan_resources	= td_free_chan_resources; | 
| Linus Walleij | 0793448 | 2010-03-26 16:50:49 -0700 | [diff] [blame] | 729 | 	td->dma.device_tx_status		= td_tx_status; | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 730 | 	td->dma.device_issue_pending		= td_issue_pending; | 
 | 731 |  | 
 | 732 | 	dma_cap_set(DMA_SLAVE, td->dma.cap_mask); | 
 | 733 | 	dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); | 
 | 734 | 	td->dma.device_prep_slave_sg = td_prep_slave_sg; | 
| Linus Walleij | c3635c7 | 2010-03-26 16:44:01 -0700 | [diff] [blame] | 735 | 	td->dma.device_control = td_control; | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 736 |  | 
 | 737 | 	td->dma.dev = &pdev->dev; | 
 | 738 |  | 
 | 739 | 	INIT_LIST_HEAD(&td->dma.channels); | 
 | 740 |  | 
| Barry Song | 46389470 | 2011-09-15 03:06:30 -0700 | [diff] [blame] | 741 | 	for (i = 0; i < pdata->nr_channels; i++) { | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 742 | 		struct timb_dma_chan *td_chan = &td->channels[i]; | 
 | 743 | 		struct timb_dma_platform_data_channel *pchan = | 
 | 744 | 			pdata->channels + i; | 
 | 745 |  | 
 | 746 | 		/* even channels are RX, odd are TX */ | 
| Nicolas Kaiser | 9cb047d | 2010-10-08 00:48:01 +0200 | [diff] [blame] | 747 | 		if ((i % 2) == pchan->rx) { | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 748 | 			dev_err(&pdev->dev, "Wrong channel configuration\n"); | 
 | 749 | 			err = -EINVAL; | 
| Dan Carpenter | f80befe | 2011-09-23 09:16:01 +0300 | [diff] [blame] | 750 | 			goto err_free_irq; | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 751 | 		} | 
 | 752 |  | 
 | 753 | 		td_chan->chan.device = &td->dma; | 
| Russell King - ARM Linux | d3ee98cdc | 2012-03-06 22:35:47 +0000 | [diff] [blame] | 754 | 		dma_cookie_init(&td_chan->chan); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 755 | 		spin_lock_init(&td_chan->lock); | 
 | 756 | 		INIT_LIST_HEAD(&td_chan->active_list); | 
 | 757 | 		INIT_LIST_HEAD(&td_chan->queue); | 
 | 758 | 		INIT_LIST_HEAD(&td_chan->free_list); | 
 | 759 |  | 
 | 760 | 		td_chan->descs = pchan->descriptors; | 
 | 761 | 		td_chan->desc_elems = pchan->descriptor_elements; | 
 | 762 | 		td_chan->bytes_per_line = pchan->bytes_per_line; | 
| Vinod Koul | db8196d | 2011-10-13 22:34:23 +0530 | [diff] [blame] | 763 | 		td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : | 
 | 764 | 			DMA_MEM_TO_DEV; | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 765 |  | 
 | 766 | 		td_chan->membase = td->membase + | 
 | 767 | 			(i / 2) * TIMBDMA_INSTANCE_OFFSET + | 
 | 768 | 			(pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); | 
 | 769 |  | 
 | 770 | 		dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", | 
 | 771 | 			i, td_chan->membase); | 
 | 772 |  | 
 | 773 | 		list_add_tail(&td_chan->chan.device_node, &td->dma.channels); | 
 | 774 | 	} | 
 | 775 |  | 
 | 776 | 	err = dma_async_device_register(&td->dma); | 
 | 777 | 	if (err) { | 
 | 778 | 		dev_err(&pdev->dev, "Failed to register async device\n"); | 
 | 779 | 		goto err_free_irq; | 
 | 780 | 	} | 
 | 781 |  | 
 | 782 | 	platform_set_drvdata(pdev, td); | 
 | 783 |  | 
 | 784 | 	dev_dbg(&pdev->dev, "Probe result: %d\n", err); | 
 | 785 | 	return err; | 
 | 786 |  | 
 | 787 | err_free_irq: | 
 | 788 | 	free_irq(irq, td); | 
 | 789 | err_tasklet_kill: | 
 | 790 | 	tasklet_kill(&td->tasklet); | 
 | 791 | 	iounmap(td->membase); | 
 | 792 | err_free_mem: | 
 | 793 | 	kfree(td); | 
 | 794 | err_release_region: | 
 | 795 | 	release_mem_region(iomem->start, resource_size(iomem)); | 
 | 796 |  | 
 | 797 | 	return err; | 
 | 798 |  | 
 | 799 | } | 
 | 800 |  | 
| Greg Kroah-Hartman | 4bf27b8 | 2012-12-21 15:09:59 -0800 | [diff] [blame] | 801 | static int td_remove(struct platform_device *pdev) | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 802 | { | 
 | 803 | 	struct timb_dma *td = platform_get_drvdata(pdev); | 
 | 804 | 	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 805 | 	int irq = platform_get_irq(pdev, 0); | 
 | 806 |  | 
 | 807 | 	dma_async_device_unregister(&td->dma); | 
 | 808 | 	free_irq(irq, td); | 
 | 809 | 	tasklet_kill(&td->tasklet); | 
 | 810 | 	iounmap(td->membase); | 
 | 811 | 	kfree(td); | 
 | 812 | 	release_mem_region(iomem->start, resource_size(iomem)); | 
 | 813 |  | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 814 | 	dev_dbg(&pdev->dev, "Removed...\n"); | 
 | 815 | 	return 0; | 
 | 816 | } | 
 | 817 |  | 
 | 818 | static struct platform_driver td_driver = { | 
 | 819 | 	.driver = { | 
 | 820 | 		.name	= DRIVER_NAME, | 
 | 821 | 		.owner  = THIS_MODULE, | 
 | 822 | 	}, | 
 | 823 | 	.probe	= td_probe, | 
| Maxin B. John | 234846d | 2013-02-19 22:33:53 +0200 | [diff] [blame] | 824 | 	.remove	= td_remove, | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 825 | }; | 
 | 826 |  | 
| Axel Lin | c94e910 | 2011-11-26 15:11:12 +0800 | [diff] [blame] | 827 | module_platform_driver(td_driver); | 
| Richard Röjfors | de5d445 | 2010-03-25 19:44:21 +0100 | [diff] [blame] | 828 |  | 
 | 829 | MODULE_LICENSE("GPL v2"); | 
 | 830 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); | 
 | 831 | MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); | 
 | 832 | MODULE_ALIAS("platform:"DRIVER_NAME); |