blob: 914268bc9990c838551c39dbdef523164aaca3db [file] [log] [blame]
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301/*
2 * DMA driver for Xilinx Video DMA Engine
3 *
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
5 *
6 * Based on the Freescale DMA driver.
7 *
8 * Description:
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
18 *
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +053019 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
23 *
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +053024 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
27 *
Srikanth Thokala9cd43602014-04-23 20:23:26 +053028 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
32 */
33
Srikanth Thokala9cd43602014-04-23 20:23:26 +053034#include <linux/bitops.h>
35#include <linux/dmapool.h>
Kedareswara rao Appana937abe82015-03-02 23:24:24 +053036#include <linux/dma/xilinx_dma.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053037#include <linux/init.h>
38#include <linux/interrupt.h>
39#include <linux/io.h>
Kedareswara rao Appana9495f262016-02-26 19:33:54 +053040#include <linux/iopoll.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053041#include <linux/module.h>
42#include <linux/of_address.h>
43#include <linux/of_dma.h>
44#include <linux/of_platform.h>
45#include <linux/of_irq.h>
46#include <linux/slab.h>
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +053047#include <linux/clk.h>
Srikanth Thokala9cd43602014-04-23 20:23:26 +053048
49#include "../dmaengine.h"
50
51/* Register/Descriptor Offsets */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053052#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
53#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
Srikanth Thokala9cd43602014-04-23 20:23:26 +053054#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
55#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
56
57/* Control Registers */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053058#define XILINX_DMA_REG_DMACR 0x0000
59#define XILINX_DMA_DMACR_DELAY_MAX 0xff
60#define XILINX_DMA_DMACR_DELAY_SHIFT 24
61#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
62#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
63#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
64#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
65#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
66#define XILINX_DMA_DMACR_MASTER_SHIFT 8
67#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
68#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
69#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
70#define XILINX_DMA_DMACR_RESET BIT(2)
71#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
72#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
73#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
Srikanth Thokala9cd43602014-04-23 20:23:26 +053074
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053075#define XILINX_DMA_REG_DMASR 0x0004
76#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
77#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
78#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
79#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
80#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
81#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
82#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
83#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
84#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
85#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
86#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
87#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
88#define XILINX_DMA_DMASR_IDLE BIT(1)
89#define XILINX_DMA_DMASR_HALTED BIT(0)
90#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
91#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
Srikanth Thokala9cd43602014-04-23 20:23:26 +053092
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +053093#define XILINX_DMA_REG_CURDESC 0x0008
94#define XILINX_DMA_REG_TAILDESC 0x0010
95#define XILINX_DMA_REG_REG_INDEX 0x0014
96#define XILINX_DMA_REG_FRMSTORE 0x0018
97#define XILINX_DMA_REG_THRESHOLD 0x001c
98#define XILINX_DMA_REG_FRMPTR_STS 0x0024
99#define XILINX_DMA_REG_PARK_PTR 0x0028
100#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
101#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
102#define XILINX_DMA_REG_VDMA_VERSION 0x002c
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530103
104/* Register Direct Mode Registers */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530105#define XILINX_DMA_REG_VSIZE 0x0000
106#define XILINX_DMA_REG_HSIZE 0x0004
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530107
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530108#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
109#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
110#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530111
112#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530113#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530114
115/* HW specific definitions */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530116#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530117
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530118#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
119 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
120 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
121 XILINX_DMA_DMASR_ERR_IRQ)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530122
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530123#define XILINX_DMA_DMASR_ALL_ERR_MASK \
124 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
125 XILINX_DMA_DMASR_SOF_LATE_ERR | \
126 XILINX_DMA_DMASR_SG_DEC_ERR | \
127 XILINX_DMA_DMASR_SG_SLV_ERR | \
128 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
129 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
130 XILINX_DMA_DMASR_DMA_DEC_ERR | \
131 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
132 XILINX_DMA_DMASR_DMA_INT_ERR)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530133
134/*
135 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
136 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
137 * is enabled in the h/w system.
138 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530139#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
140 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
141 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
142 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
143 XILINX_DMA_DMASR_DMA_INT_ERR)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530144
145/* Axi VDMA Flush on Fsync bits */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530146#define XILINX_DMA_FLUSH_S2MM 3
147#define XILINX_DMA_FLUSH_MM2S 2
148#define XILINX_DMA_FLUSH_BOTH 1
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530149
150/* Delay loop counter to prevent hardware failure */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530151#define XILINX_DMA_LOOP_COUNT 1000000
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530152
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530153/* AXI DMA Specific Registers/Offsets */
154#define XILINX_DMA_REG_SRCDSTADDR 0x18
155#define XILINX_DMA_REG_BTT 0x28
156
157/* AXI DMA Specific Masks/Bit fields */
158#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
159#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530160#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530161#define XILINX_DMA_CR_COALESCE_SHIFT 16
162#define XILINX_DMA_BD_SOP BIT(27)
163#define XILINX_DMA_BD_EOP BIT(26)
164#define XILINX_DMA_COALESCE_MAX 255
165#define XILINX_DMA_NUM_APP_WORDS 5
166
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530167/* AXI CDMA Specific Registers/Offsets */
168#define XILINX_CDMA_REG_SRCADDR 0x18
169#define XILINX_CDMA_REG_DSTADDR 0x20
170
171/* AXI CDMA Specific Masks */
172#define XILINX_CDMA_CR_SGMODE BIT(3)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530173
174/**
175 * struct xilinx_vdma_desc_hw - Hardware Descriptor
176 * @next_desc: Next Descriptor Pointer @0x00
177 * @pad1: Reserved @0x04
178 * @buf_addr: Buffer address @0x08
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530179 * @buf_addr_msb: MSB of Buffer address @0x0C
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530180 * @vsize: Vertical Size @0x10
181 * @hsize: Horizontal Size @0x14
182 * @stride: Number of bytes between the first
183 * pixels of each horizontal line @0x18
184 */
185struct xilinx_vdma_desc_hw {
186 u32 next_desc;
187 u32 pad1;
188 u32 buf_addr;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530189 u32 buf_addr_msb;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530190 u32 vsize;
191 u32 hsize;
192 u32 stride;
193} __aligned(64);
194
195/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530196 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
197 * @next_desc: Next Descriptor Pointer @0x00
198 * @pad1: Reserved @0x04
199 * @buf_addr: Buffer address @0x08
200 * @pad2: Reserved @0x0C
201 * @pad3: Reserved @0x10
202 * @pad4: Reserved @0x14
203 * @control: Control field @0x18
204 * @status: Status field @0x1C
205 * @app: APP Fields @0x20 - 0x30
206 */
207struct xilinx_axidma_desc_hw {
208 u32 next_desc;
209 u32 pad1;
210 u32 buf_addr;
211 u32 pad2;
212 u32 pad3;
213 u32 pad4;
214 u32 control;
215 u32 status;
216 u32 app[XILINX_DMA_NUM_APP_WORDS];
217} __aligned(64);
218
219/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530220 * struct xilinx_cdma_desc_hw - Hardware Descriptor
221 * @next_desc: Next Descriptor Pointer @0x00
222 * @pad1: Reserved @0x04
223 * @src_addr: Source address @0x08
224 * @pad2: Reserved @0x0C
225 * @dest_addr: Destination address @0x10
226 * @pad3: Reserved @0x14
227 * @control: Control field @0x18
228 * @status: Status field @0x1C
229 */
230struct xilinx_cdma_desc_hw {
231 u32 next_desc;
232 u32 pad1;
233 u32 src_addr;
234 u32 pad2;
235 u32 dest_addr;
236 u32 pad3;
237 u32 control;
238 u32 status;
239} __aligned(64);
240
241/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530242 * struct xilinx_vdma_tx_segment - Descriptor segment
243 * @hw: Hardware descriptor
244 * @node: Node in the descriptor segments list
245 * @phys: Physical address of segment
246 */
247struct xilinx_vdma_tx_segment {
248 struct xilinx_vdma_desc_hw hw;
249 struct list_head node;
250 dma_addr_t phys;
251} __aligned(64);
252
253/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530254 * struct xilinx_axidma_tx_segment - Descriptor segment
255 * @hw: Hardware descriptor
256 * @node: Node in the descriptor segments list
257 * @phys: Physical address of segment
258 */
259struct xilinx_axidma_tx_segment {
260 struct xilinx_axidma_desc_hw hw;
261 struct list_head node;
262 dma_addr_t phys;
263} __aligned(64);
264
265/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530266 * struct xilinx_cdma_tx_segment - Descriptor segment
267 * @hw: Hardware descriptor
268 * @node: Node in the descriptor segments list
269 * @phys: Physical address of segment
270 */
271struct xilinx_cdma_tx_segment {
272 struct xilinx_cdma_desc_hw hw;
273 struct list_head node;
274 dma_addr_t phys;
275} __aligned(64);
276
277/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530278 * struct xilinx_dma_tx_descriptor - Per Transaction structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530279 * @async_tx: Async transaction descriptor
280 * @segments: TX segments list
281 * @node: Node in the channel descriptors list
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530282 * @cyclic: Check for cyclic transfers.
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530283 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530284struct xilinx_dma_tx_descriptor {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530285 struct dma_async_tx_descriptor async_tx;
286 struct list_head segments;
287 struct list_head node;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530288 bool cyclic;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530289};
290
291/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530292 * struct xilinx_dma_chan - Driver specific DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530293 * @xdev: Driver specific device structure
294 * @ctrl_offset: Control registers offset
295 * @desc_offset: TX descriptor registers offset
296 * @lock: Descriptor operation lock
297 * @pending_list: Descriptors waiting
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530298 * @active_list: Descriptors ready to submit
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530299 * @done_list: Complete descriptors
300 * @common: DMA common channel
301 * @desc_pool: Descriptors pool
302 * @dev: The dma device
303 * @irq: Channel IRQ
304 * @id: Channel ID
305 * @direction: Transfer direction
306 * @num_frms: Number of frames
307 * @has_sg: Support scatter transfers
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530308 * @cyclic: Check for cyclic transfers.
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530309 * @genlock: Support genlock mode
310 * @err: Channel has errors
311 * @tasklet: Cleanup work after irq
312 * @config: Device configuration info
313 * @flush_on_fsync: Flush on Frame sync
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530314 * @desc_pendingcount: Descriptor pending count
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530315 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +0530316 * @desc_submitcount: Descriptor h/w submitted count
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530317 * @residue: Residue for AXI DMA
318 * @seg_v: Statically allocated segments base
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530319 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530320 * @start_transfer: Differentiate b/w DMA IP's transfer
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530321 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530322struct xilinx_dma_chan {
323 struct xilinx_dma_device *xdev;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530324 u32 ctrl_offset;
325 u32 desc_offset;
326 spinlock_t lock;
327 struct list_head pending_list;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530328 struct list_head active_list;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530329 struct list_head done_list;
330 struct dma_chan common;
331 struct dma_pool *desc_pool;
332 struct device *dev;
333 int irq;
334 int id;
335 enum dma_transfer_direction direction;
336 int num_frms;
337 bool has_sg;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530338 bool cyclic;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530339 bool genlock;
340 bool err;
341 struct tasklet_struct tasklet;
342 struct xilinx_vdma_config config;
343 bool flush_on_fsync;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530344 u32 desc_pendingcount;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530345 bool ext_addr;
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +0530346 u32 desc_submitcount;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530347 u32 residue;
348 struct xilinx_axidma_tx_segment *seg_v;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530349 struct xilinx_axidma_tx_segment *cyclic_seg_v;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530350 void (*start_transfer)(struct xilinx_dma_chan *chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530351};
352
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530353struct xilinx_dma_config {
354 enum xdma_ip_type dmatype;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530355 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
356 struct clk **tx_clk, struct clk **txs_clk,
357 struct clk **rx_clk, struct clk **rxs_clk);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530358};
359
360/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530361 * struct xilinx_dma_device - DMA device structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530362 * @regs: I/O mapped base address
363 * @dev: Device Structure
364 * @common: DMA device structure
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530365 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530366 * @has_sg: Specifies whether Scatter-Gather is present or not
367 * @flush_on_fsync: Flush on frame sync
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530368 * @ext_addr: Indicates 64 bit addressing is supported by dma device
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530369 * @pdev: Platform device structure pointer
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530370 * @dma_config: DMA config structure
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530371 * @axi_clk: DMA Axi4-lite interace clock
372 * @tx_clk: DMA mm2s clock
373 * @txs_clk: DMA mm2s stream clock
374 * @rx_clk: DMA s2mm clock
375 * @rxs_clk: DMA s2mm stream clock
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530376 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530377struct xilinx_dma_device {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530378 void __iomem *regs;
379 struct device *dev;
380 struct dma_device common;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530381 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530382 bool has_sg;
383 u32 flush_on_fsync;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530384 bool ext_addr;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530385 struct platform_device *pdev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530386 const struct xilinx_dma_config *dma_config;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +0530387 struct clk *axi_clk;
388 struct clk *tx_clk;
389 struct clk *txs_clk;
390 struct clk *rx_clk;
391 struct clk *rxs_clk;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530392};
393
394/* Macros */
395#define to_xilinx_chan(chan) \
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530396 container_of(chan, struct xilinx_dma_chan, common)
397#define to_dma_tx_descriptor(tx) \
398 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
399#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
Kedareswara rao Appana9495f262016-02-26 19:33:54 +0530400 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
401 cond, delay_us, timeout_us)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530402
403/* IO accessors */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530404static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530405{
406 return ioread32(chan->xdev->regs + reg);
407}
408
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530409static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530410{
411 iowrite32(value, chan->xdev->regs + reg);
412}
413
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530414static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530415 u32 value)
416{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530417 dma_write(chan, chan->desc_offset + reg, value);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530418}
419
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530420static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530421{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530422 return dma_read(chan, chan->ctrl_offset + reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530423}
424
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530425static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530426 u32 value)
427{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530428 dma_write(chan, chan->ctrl_offset + reg, value);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530429}
430
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530431static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530432 u32 clr)
433{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530434 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530435}
436
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530437static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530438 u32 set)
439{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530440 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530441}
442
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530443/**
444 * vdma_desc_write_64 - 64-bit descriptor write
445 * @chan: Driver specific VDMA channel
446 * @reg: Register to write
447 * @value_lsb: lower address of the descriptor.
448 * @value_msb: upper address of the descriptor.
449 *
450 * Since vdma driver is trying to write to a register offset which is not a
451 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
452 * instead of a single 64 bit register write.
453 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530454static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
Kedareswara rao Appanab72db402016-04-06 10:38:08 +0530455 u32 value_lsb, u32 value_msb)
456{
457 /* Write the lsb 32 bits*/
458 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
459
460 /* Write the msb 32 bits */
461 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530462}
463
464/* -----------------------------------------------------------------------------
465 * Descriptors and segments alloc and free
466 */
467
468/**
469 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530470 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530471 *
472 * Return: The allocated segment on success and NULL on failure.
473 */
474static struct xilinx_vdma_tx_segment *
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530475xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530476{
477 struct xilinx_vdma_tx_segment *segment;
478 dma_addr_t phys;
479
Julia Lawall2ba4f8a2016-04-29 22:09:09 +0200480 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530481 if (!segment)
482 return NULL;
483
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530484 segment->phys = phys;
485
486 return segment;
487}
488
489/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530490 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
491 * @chan: Driver specific DMA channel
492 *
493 * Return: The allocated segment on success and NULL on failure.
494 */
495static struct xilinx_cdma_tx_segment *
496xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
497{
498 struct xilinx_cdma_tx_segment *segment;
499 dma_addr_t phys;
500
Kedareswara rao Appana62147862016-05-18 13:17:31 +0530501 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530502 if (!segment)
503 return NULL;
504
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530505 segment->phys = phys;
506
507 return segment;
508}
509
510/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530511 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
512 * @chan: Driver specific DMA channel
513 *
514 * Return: The allocated segment on success and NULL on failure.
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530515 */
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530516static struct xilinx_axidma_tx_segment *
517xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
518{
519 struct xilinx_axidma_tx_segment *segment;
520 dma_addr_t phys;
521
Kedareswara rao Appana62147862016-05-18 13:17:31 +0530522 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530523 if (!segment)
524 return NULL;
525
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530526 segment->phys = phys;
527
528 return segment;
529}
530
531/**
532 * xilinx_dma_free_tx_segment - Free transaction segment
533 * @chan: Driver specific DMA channel
534 * @segment: DMA transaction segment
535 */
536static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
537 struct xilinx_axidma_tx_segment *segment)
538{
539 dma_pool_free(chan->desc_pool, segment, segment->phys);
540}
541
542/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530543 * xilinx_cdma_free_tx_segment - Free transaction segment
544 * @chan: Driver specific DMA channel
545 * @segment: DMA transaction segment
546 */
547static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
548 struct xilinx_cdma_tx_segment *segment)
549{
550 dma_pool_free(chan->desc_pool, segment, segment->phys);
551}
552
553/**
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530554 * xilinx_vdma_free_tx_segment - Free transaction segment
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530555 * @chan: Driver specific DMA channel
556 * @segment: DMA transaction segment
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530557 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530558static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530559 struct xilinx_vdma_tx_segment *segment)
560{
561 dma_pool_free(chan->desc_pool, segment, segment->phys);
562}
563
564/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530565 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
566 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530567 *
568 * Return: The allocated descriptor on success and NULL on failure.
569 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530570static struct xilinx_dma_tx_descriptor *
571xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530572{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530573 struct xilinx_dma_tx_descriptor *desc;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530574
575 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
576 if (!desc)
577 return NULL;
578
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530579 INIT_LIST_HEAD(&desc->segments);
580
581 return desc;
582}
583
584/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530585 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
586 * @chan: Driver specific DMA channel
587 * @desc: DMA transaction descriptor
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530588 */
589static void
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530590xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
591 struct xilinx_dma_tx_descriptor *desc)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530592{
593 struct xilinx_vdma_tx_segment *segment, *next;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530594 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530595 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530596
597 if (!desc)
598 return;
599
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530600 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530601 list_for_each_entry_safe(segment, next, &desc->segments, node) {
602 list_del(&segment->node);
603 xilinx_vdma_free_tx_segment(chan, segment);
604 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530605 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530606 list_for_each_entry_safe(cdma_segment, cdma_next,
607 &desc->segments, node) {
608 list_del(&cdma_segment->node);
609 xilinx_cdma_free_tx_segment(chan, cdma_segment);
610 }
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530611 } else {
612 list_for_each_entry_safe(axidma_segment, axidma_next,
613 &desc->segments, node) {
614 list_del(&axidma_segment->node);
615 xilinx_dma_free_tx_segment(chan, axidma_segment);
616 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530617 }
618
619 kfree(desc);
620}
621
622/* Required functions */
623
624/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530625 * xilinx_dma_free_desc_list - Free descriptors list
626 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530627 * @list: List to parse and delete the descriptor
628 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530629static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530630 struct list_head *list)
631{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530632 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530633
634 list_for_each_entry_safe(desc, next, list, node) {
635 list_del(&desc->node);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530636 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530637 }
638}
639
640/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530641 * xilinx_dma_free_descriptors - Free channel descriptors
642 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530643 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530644static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530645{
646 unsigned long flags;
647
648 spin_lock_irqsave(&chan->lock, flags);
649
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530650 xilinx_dma_free_desc_list(chan, &chan->pending_list);
651 xilinx_dma_free_desc_list(chan, &chan->done_list);
652 xilinx_dma_free_desc_list(chan, &chan->active_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530653
654 spin_unlock_irqrestore(&chan->lock, flags);
655}
656
657/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530658 * xilinx_dma_free_chan_resources - Free channel resources
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530659 * @dchan: DMA channel
660 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530661static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530662{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530663 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530664
665 dev_dbg(chan->dev, "Free all channel resources.\n");
666
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530667 xilinx_dma_free_descriptors(chan);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530668 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
669 xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530670 xilinx_dma_free_tx_segment(chan, chan->seg_v);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530671 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530672 dma_pool_destroy(chan->desc_pool);
673 chan->desc_pool = NULL;
674}
675
676/**
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530677 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
678 * @chan: Driver specific dma channel
679 * @desc: dma transaction descriptor
680 * @flags: flags for spin lock
681 */
682static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
683 struct xilinx_dma_tx_descriptor *desc,
684 unsigned long *flags)
685{
686 dma_async_tx_callback callback;
687 void *callback_param;
688
689 callback = desc->async_tx.callback;
690 callback_param = desc->async_tx.callback_param;
691 if (callback) {
692 spin_unlock_irqrestore(&chan->lock, *flags);
693 callback(callback_param);
694 spin_lock_irqsave(&chan->lock, *flags);
695 }
696}
697
698/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530699 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
700 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530701 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530702static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530703{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530704 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530705 unsigned long flags;
706
707 spin_lock_irqsave(&chan->lock, flags);
708
709 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
710 dma_async_tx_callback callback;
711 void *callback_param;
712
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530713 if (desc->cyclic) {
714 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
715 break;
716 }
717
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530718 /* Remove from the list of running transactions */
719 list_del(&desc->node);
720
721 /* Run the link descriptor callback function */
722 callback = desc->async_tx.callback;
723 callback_param = desc->async_tx.callback_param;
724 if (callback) {
725 spin_unlock_irqrestore(&chan->lock, flags);
726 callback(callback_param);
727 spin_lock_irqsave(&chan->lock, flags);
728 }
729
730 /* Run any dependencies, then free the descriptor */
731 dma_run_dependencies(&desc->async_tx);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530732 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530733 }
734
735 spin_unlock_irqrestore(&chan->lock, flags);
736}
737
738/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530739 * xilinx_dma_do_tasklet - Schedule completion tasklet
740 * @data: Pointer to the Xilinx DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530741 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530742static void xilinx_dma_do_tasklet(unsigned long data)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530743{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530744 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530745
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530746 xilinx_dma_chan_desc_cleanup(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530747}
748
749/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530750 * xilinx_dma_alloc_chan_resources - Allocate channel resources
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530751 * @dchan: DMA channel
752 *
753 * Return: '0' on success and failure value on error
754 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530755static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530756{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530757 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530758
759 /* Has this channel already been allocated? */
760 if (chan->desc_pool)
761 return 0;
762
763 /*
764 * We need the descriptor to be aligned to 64bytes
765 * for meeting Xilinx VDMA specification requirement.
766 */
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530767 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530768 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
769 chan->dev,
770 sizeof(struct xilinx_axidma_tx_segment),
771 __alignof__(struct xilinx_axidma_tx_segment),
772 0);
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530773 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530774 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
775 chan->dev,
776 sizeof(struct xilinx_cdma_tx_segment),
777 __alignof__(struct xilinx_cdma_tx_segment),
778 0);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530779 } else {
780 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
781 chan->dev,
782 sizeof(struct xilinx_vdma_tx_segment),
783 __alignof__(struct xilinx_vdma_tx_segment),
784 0);
785 }
786
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530787 if (!chan->desc_pool) {
788 dev_err(chan->dev,
789 "unable to allocate channel %d descriptor pool\n",
790 chan->id);
791 return -ENOMEM;
792 }
793
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530794 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530795 /*
796 * For AXI DMA case after submitting a pending_list, keep
797 * an extra segment allocated so that the "next descriptor"
798 * pointer on the tail descriptor always points to a
799 * valid descriptor, even when paused after reaching taildesc.
800 * This way, it is possible to issue additional
801 * transfers without halting and restarting the channel.
802 */
803 chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
804
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +0530805 /*
806 * For cyclic DMA mode we need to program the tail Descriptor
807 * register with a value which is not a part of the BD chain
808 * so allocating a desc segment during channel allocation for
809 * programming tail descriptor.
810 */
811 chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
812 }
813
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530814 dma_cookie_init(dchan);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530815
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530816 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530817 /* For AXI DMA resetting once channel will reset the
818 * other channel as well so enable the interrupts here.
819 */
820 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
821 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
822 }
823
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530824 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +0530825 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
826 XILINX_CDMA_CR_SGMODE);
827
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530828 return 0;
829}
830
831/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530832 * xilinx_dma_tx_status - Get DMA transaction status
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530833 * @dchan: DMA channel
834 * @cookie: Transaction identifier
835 * @txstate: Transaction state
836 *
837 * Return: DMA transaction status
838 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530839static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530840 dma_cookie_t cookie,
841 struct dma_tx_state *txstate)
842{
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530843 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
844 struct xilinx_dma_tx_descriptor *desc;
845 struct xilinx_axidma_tx_segment *segment;
846 struct xilinx_axidma_desc_hw *hw;
847 enum dma_status ret;
848 unsigned long flags;
849 u32 residue = 0;
850
851 ret = dma_cookie_status(dchan, cookie, txstate);
852 if (ret == DMA_COMPLETE || !txstate)
853 return ret;
854
Kedareswara rao Appanafb236662016-05-13 12:33:29 +0530855 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +0530856 spin_lock_irqsave(&chan->lock, flags);
857
858 desc = list_last_entry(&chan->active_list,
859 struct xilinx_dma_tx_descriptor, node);
860 if (chan->has_sg) {
861 list_for_each_entry(segment, &desc->segments, node) {
862 hw = &segment->hw;
863 residue += (hw->control - hw->status) &
864 XILINX_DMA_MAX_TRANS_LEN;
865 }
866 }
867 spin_unlock_irqrestore(&chan->lock, flags);
868
869 chan->residue = residue;
870 dma_set_residue(txstate, chan->residue);
871 }
872
873 return ret;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530874}
875
876/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530877 * xilinx_dma_is_running - Check if DMA channel is running
878 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530879 *
880 * Return: '1' if running, '0' if not.
881 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530882static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530883{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530884 return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
885 XILINX_DMA_DMASR_HALTED) &&
886 (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
887 XILINX_DMA_DMACR_RUNSTOP);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530888}
889
890/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530891 * xilinx_dma_is_idle - Check if DMA channel is idle
892 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530893 *
894 * Return: '1' if idle, '0' if not.
895 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530896static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530897{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530898 return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
899 XILINX_DMA_DMASR_IDLE;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530900}
901
902/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530903 * xilinx_dma_halt - Halt DMA channel
904 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530905 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530906static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530907{
Kedareswara rao Appana69490632016-03-03 23:02:42 +0530908 int err;
Kedareswara rao Appana9495f262016-02-26 19:33:54 +0530909 u32 val;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530910
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530911 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530912
913 /* Wait for the hardware to halt */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530914 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
915 (val & XILINX_DMA_DMASR_HALTED), 0,
916 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530917
Kedareswara rao Appana9495f262016-02-26 19:33:54 +0530918 if (err) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530919 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530920 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530921 chan->err = true;
922 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530923}
924
925/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530926 * xilinx_dma_start - Start DMA channel
927 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530928 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530929static void xilinx_dma_start(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530930{
Kedareswara rao Appana69490632016-03-03 23:02:42 +0530931 int err;
Kedareswara rao Appana9495f262016-02-26 19:33:54 +0530932 u32 val;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530933
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530934 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530935
936 /* Wait for the hardware to start */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530937 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
938 !(val & XILINX_DMA_DMASR_HALTED), 0,
939 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530940
Kedareswara rao Appana9495f262016-02-26 19:33:54 +0530941 if (err) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530942 dev_err(chan->dev, "Cannot start channel %p: %x\n",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530943 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530944
945 chan->err = true;
946 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530947}
948
949/**
950 * xilinx_vdma_start_transfer - Starts VDMA transfer
951 * @chan: Driver specific channel struct pointer
952 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530953static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530954{
955 struct xilinx_vdma_config *config = &chan->config;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530956 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530957 u32 reg;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530958 struct xilinx_vdma_tx_segment *tail_segment;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530959
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +0530960 /* This function was invoked with lock held */
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530961 if (chan->err)
962 return;
963
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530964 if (list_empty(&chan->pending_list))
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +0530965 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530966
967 desc = list_first_entry(&chan->pending_list,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530968 struct xilinx_dma_tx_descriptor, node);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530969 tail_desc = list_last_entry(&chan->pending_list,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530970 struct xilinx_dma_tx_descriptor, node);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530971
972 tail_segment = list_last_entry(&tail_desc->segments,
973 struct xilinx_vdma_tx_segment, node);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530974
975 /* If it is SG mode and hardware is busy, cannot submit */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530976 if (chan->has_sg && xilinx_dma_is_running(chan) &&
977 !xilinx_dma_is_idle(chan)) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530978 dev_dbg(chan->dev, "DMA controller still busy\n");
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +0530979 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530980 }
981
982 /*
983 * If hardware is idle, then all descriptors on the running lists are
984 * done, start new transfers
985 */
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530986 if (chan->has_sg)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530987 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
Kedareswara rao Appana7096f362016-02-26 19:33:51 +0530988 desc->async_tx.phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530989
990 /* Configure the hardware using info in the config structure */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530991 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530992
993 if (config->frm_cnt_en)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530994 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530995 else
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530996 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +0530997
Kedareswara rao Appanae2b538a2016-02-26 19:33:53 +0530998 /* Configure channel to allow number frame buffers */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +0530999 dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
Kedareswara rao Appanae2b538a2016-02-26 19:33:53 +05301000 chan->desc_pendingcount);
1001
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301002 /*
1003 * With SG, start with circular mode, so that BDs can be fetched.
1004 * In direct register mode, if not parking, enable circular mode
1005 */
1006 if (chan->has_sg || !config->park)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301007 reg |= XILINX_DMA_DMACR_CIRC_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301008
1009 if (config->park)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301010 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301011
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301012 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301013
1014 if (config->park && (config->park_frm >= 0) &&
1015 (config->park_frm < chan->num_frms)) {
1016 if (chan->direction == DMA_MEM_TO_DEV)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301017 dma_write(chan, XILINX_DMA_REG_PARK_PTR,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301018 config->park_frm <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301019 XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301020 else
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301021 dma_write(chan, XILINX_DMA_REG_PARK_PTR,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301022 config->park_frm <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301023 XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301024 }
1025
1026 /* Start the hardware */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301027 xilinx_dma_start(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301028
1029 if (chan->err)
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301030 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301031
1032 /* Start the transfer */
1033 if (chan->has_sg) {
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301034 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301035 tail_segment->phys);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301036 } else {
1037 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1038 int i = 0;
1039
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +05301040 if (chan->desc_submitcount < chan->num_frms)
1041 i = chan->desc_submitcount;
1042
1043 list_for_each_entry(segment, &desc->segments, node) {
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05301044 if (chan->ext_addr)
1045 vdma_desc_write_64(chan,
1046 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1047 segment->hw.buf_addr,
1048 segment->hw.buf_addr_msb);
1049 else
1050 vdma_desc_write(chan,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301051 XILINX_VDMA_REG_START_ADDRESS(i++),
1052 segment->hw.buf_addr);
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05301053
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301054 last = segment;
1055 }
1056
1057 if (!last)
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301058 return;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301059
1060 /* HW expects these parameters to be same for one transaction */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301061 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1062 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301063 last->hw.stride);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301064 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301065 }
1066
Kedareswara rao Appanaa65cf5122016-04-06 10:38:09 +05301067 if (!chan->has_sg) {
1068 list_del(&desc->node);
1069 list_add_tail(&desc->node, &chan->active_list);
1070 chan->desc_submitcount++;
1071 chan->desc_pendingcount--;
1072 if (chan->desc_submitcount == chan->num_frms)
1073 chan->desc_submitcount = 0;
1074 } else {
1075 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1076 chan->desc_pendingcount = 0;
1077 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301078}
1079
1080/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301081 * xilinx_cdma_start_transfer - Starts cdma transfer
1082 * @chan: Driver specific channel struct pointer
1083 */
1084static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1085{
1086 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1087 struct xilinx_cdma_tx_segment *tail_segment;
1088 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1089
1090 if (chan->err)
1091 return;
1092
1093 if (list_empty(&chan->pending_list))
1094 return;
1095
1096 head_desc = list_first_entry(&chan->pending_list,
1097 struct xilinx_dma_tx_descriptor, node);
1098 tail_desc = list_last_entry(&chan->pending_list,
1099 struct xilinx_dma_tx_descriptor, node);
1100 tail_segment = list_last_entry(&tail_desc->segments,
1101 struct xilinx_cdma_tx_segment, node);
1102
1103 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1104 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1105 ctrl_reg |= chan->desc_pendingcount <<
1106 XILINX_DMA_CR_COALESCE_SHIFT;
1107 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1108 }
1109
1110 if (chan->has_sg) {
1111 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1112 head_desc->async_tx.phys);
1113
1114 /* Update tail ptr register which will start the transfer */
1115 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1116 tail_segment->phys);
1117 } else {
1118 /* In simple mode */
1119 struct xilinx_cdma_tx_segment *segment;
1120 struct xilinx_cdma_desc_hw *hw;
1121
1122 segment = list_first_entry(&head_desc->segments,
1123 struct xilinx_cdma_tx_segment,
1124 node);
1125
1126 hw = &segment->hw;
1127
1128 dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1129 dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1130
1131 /* Start the transfer */
1132 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1133 hw->control & XILINX_DMA_MAX_TRANS_LEN);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301134 }
1135
1136 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1137 chan->desc_pendingcount = 0;
1138}
1139
1140/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301141 * xilinx_dma_start_transfer - Starts DMA transfer
1142 * @chan: Driver specific channel struct pointer
1143 */
1144static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1145{
1146 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1147 struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
1148 u32 reg;
1149
1150 if (chan->err)
1151 return;
1152
1153 if (list_empty(&chan->pending_list))
1154 return;
1155
1156 /* If it is SG mode and hardware is busy, cannot submit */
1157 if (chan->has_sg && xilinx_dma_is_running(chan) &&
1158 !xilinx_dma_is_idle(chan)) {
1159 dev_dbg(chan->dev, "DMA controller still busy\n");
1160 return;
1161 }
1162
1163 head_desc = list_first_entry(&chan->pending_list,
1164 struct xilinx_dma_tx_descriptor, node);
1165 tail_desc = list_last_entry(&chan->pending_list,
1166 struct xilinx_dma_tx_descriptor, node);
1167 tail_segment = list_last_entry(&tail_desc->segments,
1168 struct xilinx_axidma_tx_segment, node);
1169
1170 old_head = list_first_entry(&head_desc->segments,
1171 struct xilinx_axidma_tx_segment, node);
1172 new_head = chan->seg_v;
1173 /* Copy Buffer Descriptor fields. */
1174 new_head->hw = old_head->hw;
1175
1176 /* Swap and save new reserve */
1177 list_replace_init(&old_head->node, &new_head->node);
1178 chan->seg_v = old_head;
1179
1180 tail_segment->hw.next_desc = chan->seg_v->phys;
1181 head_desc->async_tx.phys = new_head->phys;
1182
1183 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1184
1185 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1186 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1187 reg |= chan->desc_pendingcount <<
1188 XILINX_DMA_CR_COALESCE_SHIFT;
1189 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1190 }
1191
1192 if (chan->has_sg)
1193 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1194 head_desc->async_tx.phys);
1195
1196 xilinx_dma_start(chan);
1197
1198 if (chan->err)
1199 return;
1200
1201 /* Start the transfer */
1202 if (chan->has_sg) {
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301203 if (chan->cyclic)
1204 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1205 chan->cyclic_seg_v->phys);
1206 else
1207 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1208 tail_segment->phys);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301209 } else {
1210 struct xilinx_axidma_tx_segment *segment;
1211 struct xilinx_axidma_desc_hw *hw;
1212
1213 segment = list_first_entry(&head_desc->segments,
1214 struct xilinx_axidma_tx_segment,
1215 node);
1216 hw = &segment->hw;
1217
1218 dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1219
1220 /* Start the transfer */
1221 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1222 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1223 }
1224
1225 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1226 chan->desc_pendingcount = 0;
1227}
1228
1229/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301230 * xilinx_dma_issue_pending - Issue pending transactions
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301231 * @dchan: DMA channel
1232 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301233static void xilinx_dma_issue_pending(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301234{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301235 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301236 unsigned long flags;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301237
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301238 spin_lock_irqsave(&chan->lock, flags);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301239 chan->start_transfer(chan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301240 spin_unlock_irqrestore(&chan->lock, flags);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301241}
1242
1243/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301244 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301245 * @chan : xilinx DMA channel
1246 *
1247 * CONTEXT: hardirq
1248 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301249static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301250{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301251 struct xilinx_dma_tx_descriptor *desc, *next;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301252
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301253 /* This function was invoked with lock held */
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301254 if (list_empty(&chan->active_list))
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301255 return;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301256
1257 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1258 list_del(&desc->node);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301259 if (!desc->cyclic)
1260 dma_cookie_complete(&desc->async_tx);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301261 list_add_tail(&desc->node, &chan->done_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301262 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301263}
1264
1265/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301266 * xilinx_dma_reset - Reset DMA channel
1267 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301268 *
1269 * Return: '0' on success and failure value on error
1270 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301271static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301272{
Kedareswara rao Appana69490632016-03-03 23:02:42 +05301273 int err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301274 u32 tmp;
1275
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301276 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301277
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301278 /* Wait for the hardware to finish reset */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301279 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1280 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1281 XILINX_DMA_LOOP_COUNT);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301282
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301283 if (err) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301284 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301285 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1286 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301287 return -ETIMEDOUT;
1288 }
1289
1290 chan->err = false;
1291
Kedareswara rao Appana9495f262016-02-26 19:33:54 +05301292 return err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301293}
1294
1295/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301296 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1297 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301298 *
1299 * Return: '0' on success and failure value on error
1300 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301301static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301302{
1303 int err;
1304
1305 /* Reset VDMA */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301306 err = xilinx_dma_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301307 if (err)
1308 return err;
1309
1310 /* Enable interrupts */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301311 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1312 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301313
1314 return 0;
1315}
1316
1317/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301318 * xilinx_dma_irq_handler - DMA Interrupt handler
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301319 * @irq: IRQ number
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301320 * @data: Pointer to the Xilinx DMA channel structure
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301321 *
1322 * Return: IRQ_HANDLED/IRQ_NONE
1323 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301324static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301325{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301326 struct xilinx_dma_chan *chan = data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301327 u32 status;
1328
1329 /* Read the status and ack the interrupts. */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301330 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1331 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301332 return IRQ_NONE;
1333
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301334 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1335 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301336
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301337 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301338 /*
1339 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1340 * error is recoverable, ignore it. Otherwise flag the error.
1341 *
1342 * Only recoverable errors can be cleared in the DMASR register,
1343 * make sure not to write to other error bits to 1.
1344 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301345 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
Kedareswara rao Appana48a59ed2016-04-06 10:44:55 +05301346
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301347 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1348 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301349
1350 if (!chan->flush_on_fsync ||
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301351 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301352 dev_err(chan->dev,
1353 "Channel %p has errors %x, cdr %x tdr %x\n",
1354 chan, errors,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301355 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1356 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301357 chan->err = true;
1358 }
1359 }
1360
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301361 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301362 /*
1363 * Device takes too long to do the transfer when user requires
1364 * responsiveness.
1365 */
1366 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1367 }
1368
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301369 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301370 spin_lock(&chan->lock);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301371 xilinx_dma_complete_descriptor(chan);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301372 chan->start_transfer(chan);
Kedareswara rao Appana26c5e362016-02-26 19:33:52 +05301373 spin_unlock(&chan->lock);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301374 }
1375
1376 tasklet_schedule(&chan->tasklet);
1377 return IRQ_HANDLED;
1378}
1379
1380/**
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301381 * append_desc_queue - Queuing descriptor
1382 * @chan: Driver specific dma channel
1383 * @desc: dma transaction descriptor
1384 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301385static void append_desc_queue(struct xilinx_dma_chan *chan,
1386 struct xilinx_dma_tx_descriptor *desc)
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301387{
1388 struct xilinx_vdma_tx_segment *tail_segment;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301389 struct xilinx_dma_tx_descriptor *tail_desc;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301390 struct xilinx_axidma_tx_segment *axidma_tail_segment;
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301391 struct xilinx_cdma_tx_segment *cdma_tail_segment;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301392
1393 if (list_empty(&chan->pending_list))
1394 goto append;
1395
1396 /*
1397 * Add the hardware descriptor to the chain of hardware descriptors
1398 * that already exists in memory.
1399 */
1400 tail_desc = list_last_entry(&chan->pending_list,
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301401 struct xilinx_dma_tx_descriptor, node);
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301402 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301403 tail_segment = list_last_entry(&tail_desc->segments,
1404 struct xilinx_vdma_tx_segment,
1405 node);
1406 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301407 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301408 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1409 struct xilinx_cdma_tx_segment,
1410 node);
1411 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301412 } else {
1413 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1414 struct xilinx_axidma_tx_segment,
1415 node);
1416 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1417 }
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301418
1419 /*
1420 * Add the software descriptor and all children to the list
1421 * of pending transactions
1422 */
1423append:
1424 list_add_tail(&desc->node, &chan->pending_list);
1425 chan->desc_pendingcount++;
1426
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05301427 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1428 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301429 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1430 chan->desc_pendingcount = chan->num_frms;
1431 }
1432}
1433
1434/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301435 * xilinx_dma_tx_submit - Submit DMA transaction
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301436 * @tx: Async transaction descriptor
1437 *
1438 * Return: cookie value on success and failure value on error
1439 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301440static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301441{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301442 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1443 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301444 dma_cookie_t cookie;
1445 unsigned long flags;
1446 int err;
1447
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301448 if (chan->cyclic) {
1449 xilinx_dma_free_tx_descriptor(chan, desc);
1450 return -EBUSY;
1451 }
1452
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301453 if (chan->err) {
1454 /*
1455 * If reset fails, need to hard reset the system.
1456 * Channel is no longer functional
1457 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301458 err = xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301459 if (err < 0)
1460 return err;
1461 }
1462
1463 spin_lock_irqsave(&chan->lock, flags);
1464
1465 cookie = dma_cookie_assign(tx);
1466
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301467 /* Put this transaction onto the tail of the pending queue */
1468 append_desc_queue(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301469
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301470 if (desc->cyclic)
1471 chan->cyclic = true;
1472
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301473 spin_unlock_irqrestore(&chan->lock, flags);
1474
1475 return cookie;
1476}
1477
1478/**
1479 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1480 * DMA_SLAVE transaction
1481 * @dchan: DMA channel
1482 * @xt: Interleaved template pointer
1483 * @flags: transfer ack flags
1484 *
1485 * Return: Async transaction descriptor on success and NULL on failure
1486 */
1487static struct dma_async_tx_descriptor *
1488xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1489 struct dma_interleaved_template *xt,
1490 unsigned long flags)
1491{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301492 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1493 struct xilinx_dma_tx_descriptor *desc;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301494 struct xilinx_vdma_tx_segment *segment, *prev = NULL;
1495 struct xilinx_vdma_desc_hw *hw;
1496
1497 if (!is_slave_direction(xt->dir))
1498 return NULL;
1499
1500 if (!xt->numf || !xt->sgl[0].size)
1501 return NULL;
1502
Srikanth Thokalaa5e48e22014-11-05 20:37:01 +02001503 if (xt->frame_size != 1)
1504 return NULL;
1505
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301506 /* Allocate a transaction descriptor. */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301507 desc = xilinx_dma_alloc_tx_descriptor(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301508 if (!desc)
1509 return NULL;
1510
1511 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301512 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301513 async_tx_ack(&desc->async_tx);
1514
1515 /* Allocate the link descriptor from DMA pool */
1516 segment = xilinx_vdma_alloc_tx_segment(chan);
1517 if (!segment)
1518 goto error;
1519
1520 /* Fill in the hardware descriptor */
1521 hw = &segment->hw;
1522 hw->vsize = xt->numf;
1523 hw->hsize = xt->sgl[0].size;
Srikanth Thokala6d80f452014-11-05 20:37:02 +02001524 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301525 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301526 hw->stride |= chan->config.frm_dly <<
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301527 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301528
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05301529 if (xt->dir != DMA_MEM_TO_DEV) {
1530 if (chan->ext_addr) {
1531 hw->buf_addr = lower_32_bits(xt->dst_start);
1532 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1533 } else {
1534 hw->buf_addr = xt->dst_start;
1535 }
1536 } else {
1537 if (chan->ext_addr) {
1538 hw->buf_addr = lower_32_bits(xt->src_start);
1539 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1540 } else {
1541 hw->buf_addr = xt->src_start;
1542 }
1543 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301544
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301545 /* Insert the segment into the descriptor segments list. */
1546 list_add_tail(&segment->node, &desc->segments);
1547
1548 prev = segment;
1549
1550 /* Link the last hardware descriptor with the first. */
1551 segment = list_first_entry(&desc->segments,
1552 struct xilinx_vdma_tx_segment, node);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05301553 desc->async_tx.phys = segment->phys;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301554
1555 return &desc->async_tx;
1556
1557error:
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301558 xilinx_dma_free_tx_descriptor(chan, desc);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301559 return NULL;
1560}
1561
1562/**
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301563 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1564 * @dchan: DMA channel
1565 * @dma_dst: destination address
1566 * @dma_src: source address
1567 * @len: transfer length
1568 * @flags: transfer ack flags
1569 *
1570 * Return: Async transaction descriptor on success and NULL on failure
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301571 */
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301572static struct dma_async_tx_descriptor *
1573xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1574 dma_addr_t dma_src, size_t len, unsigned long flags)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301575{
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05301576 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1577 struct xilinx_dma_tx_descriptor *desc;
1578 struct xilinx_cdma_tx_segment *segment, *prev;
1579 struct xilinx_cdma_desc_hw *hw;
1580
1581 if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1582 return NULL;
1583
1584 desc = xilinx_dma_alloc_tx_descriptor(chan);
1585 if (!desc)
1586 return NULL;
1587
1588 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1589 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1590
1591 /* Allocate the link descriptor from DMA pool */
1592 segment = xilinx_cdma_alloc_tx_segment(chan);
1593 if (!segment)
1594 goto error;
1595
1596 hw = &segment->hw;
1597 hw->control = len;
1598 hw->src_addr = dma_src;
1599 hw->dest_addr = dma_dst;
1600
1601 /* Fill the previous next descriptor with current */
1602 prev = list_last_entry(&desc->segments,
1603 struct xilinx_cdma_tx_segment, node);
1604 prev->hw.next_desc = segment->phys;
1605
1606 /* Insert the segment into the descriptor segments list. */
1607 list_add_tail(&segment->node, &desc->segments);
1608
1609 prev = segment;
1610
1611 /* Link the last hardware descriptor with the first. */
1612 segment = list_first_entry(&desc->segments,
1613 struct xilinx_cdma_tx_segment, node);
1614 desc->async_tx.phys = segment->phys;
1615 prev->hw.next_desc = segment->phys;
1616
1617 return &desc->async_tx;
1618
1619error:
1620 xilinx_dma_free_tx_descriptor(chan, desc);
1621 return NULL;
1622}
1623
1624/**
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05301625 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1626 * @dchan: DMA channel
1627 * @sgl: scatterlist to transfer to/from
1628 * @sg_len: number of entries in @scatterlist
1629 * @direction: DMA direction
1630 * @flags: transfer ack flags
1631 * @context: APP words of the descriptor
1632 *
1633 * Return: Async transaction descriptor on success and NULL on failure
1634 */
1635static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1636 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1637 enum dma_transfer_direction direction, unsigned long flags,
1638 void *context)
1639{
1640 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1641 struct xilinx_dma_tx_descriptor *desc;
1642 struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
1643 u32 *app_w = (u32 *)context;
1644 struct scatterlist *sg;
1645 size_t copy;
1646 size_t sg_used;
1647 unsigned int i;
1648
1649 if (!is_slave_direction(direction))
1650 return NULL;
1651
1652 /* Allocate a transaction descriptor. */
1653 desc = xilinx_dma_alloc_tx_descriptor(chan);
1654 if (!desc)
1655 return NULL;
1656
1657 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1658 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1659
1660 /* Build transactions using information in the scatter gather list */
1661 for_each_sg(sgl, sg, sg_len, i) {
1662 sg_used = 0;
1663
1664 /* Loop until the entire scatterlist entry is used */
1665 while (sg_used < sg_dma_len(sg)) {
1666 struct xilinx_axidma_desc_hw *hw;
1667
1668 /* Get a free segment */
1669 segment = xilinx_axidma_alloc_tx_segment(chan);
1670 if (!segment)
1671 goto error;
1672
1673 /*
1674 * Calculate the maximum number of bytes to transfer,
1675 * making sure it is less than the hw limit
1676 */
1677 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1678 XILINX_DMA_MAX_TRANS_LEN);
1679 hw = &segment->hw;
1680
1681 /* Fill in the descriptor */
1682 hw->buf_addr = sg_dma_address(sg) + sg_used;
1683
1684 hw->control = copy;
1685
1686 if (chan->direction == DMA_MEM_TO_DEV) {
1687 if (app_w)
1688 memcpy(hw->app, app_w, sizeof(u32) *
1689 XILINX_DMA_NUM_APP_WORDS);
1690 }
1691
1692 if (prev)
1693 prev->hw.next_desc = segment->phys;
1694
1695 prev = segment;
1696 sg_used += copy;
1697
1698 /*
1699 * Insert the segment into the descriptor segments
1700 * list.
1701 */
1702 list_add_tail(&segment->node, &desc->segments);
1703 }
1704 }
1705
1706 segment = list_first_entry(&desc->segments,
1707 struct xilinx_axidma_tx_segment, node);
1708 desc->async_tx.phys = segment->phys;
1709 prev->hw.next_desc = segment->phys;
1710
1711 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1712 if (chan->direction == DMA_MEM_TO_DEV) {
1713 segment->hw.control |= XILINX_DMA_BD_SOP;
1714 segment = list_last_entry(&desc->segments,
1715 struct xilinx_axidma_tx_segment,
1716 node);
1717 segment->hw.control |= XILINX_DMA_BD_EOP;
1718 }
1719
1720 return &desc->async_tx;
1721
1722error:
1723 xilinx_dma_free_tx_descriptor(chan, desc);
1724 return NULL;
1725}
1726
1727/**
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301728 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1729 * @chan: DMA channel
1730 * @sgl: scatterlist to transfer to/from
1731 * @sg_len: number of entries in @scatterlist
1732 * @direction: DMA direction
1733 * @flags: transfer ack flags
1734 */
1735static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1736 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1737 size_t period_len, enum dma_transfer_direction direction,
1738 unsigned long flags)
1739{
1740 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1741 struct xilinx_dma_tx_descriptor *desc;
1742 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1743 size_t copy, sg_used;
1744 unsigned int num_periods;
1745 int i;
1746 u32 reg;
1747
Arnd Bergmannf67c3bd2016-06-13 17:07:33 +02001748 if (!period_len)
1749 return NULL;
1750
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301751 num_periods = buf_len / period_len;
1752
Arnd Bergmannf67c3bd2016-06-13 17:07:33 +02001753 if (!num_periods)
1754 return NULL;
1755
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301756 if (!is_slave_direction(direction))
1757 return NULL;
1758
1759 /* Allocate a transaction descriptor. */
1760 desc = xilinx_dma_alloc_tx_descriptor(chan);
1761 if (!desc)
1762 return NULL;
1763
1764 chan->direction = direction;
1765 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1766 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1767
1768 for (i = 0; i < num_periods; ++i) {
1769 sg_used = 0;
1770
1771 while (sg_used < period_len) {
1772 struct xilinx_axidma_desc_hw *hw;
1773
1774 /* Get a free segment */
1775 segment = xilinx_axidma_alloc_tx_segment(chan);
1776 if (!segment)
1777 goto error;
1778
1779 /*
1780 * Calculate the maximum number of bytes to transfer,
1781 * making sure it is less than the hw limit
1782 */
1783 copy = min_t(size_t, period_len - sg_used,
1784 XILINX_DMA_MAX_TRANS_LEN);
1785 hw = &segment->hw;
1786 hw->buf_addr = buf_addr + sg_used + (period_len * i);
1787 hw->control = copy;
1788
1789 if (prev)
1790 prev->hw.next_desc = segment->phys;
1791
1792 prev = segment;
1793 sg_used += copy;
1794
1795 /*
1796 * Insert the segment into the descriptor segments
1797 * list.
1798 */
1799 list_add_tail(&segment->node, &desc->segments);
1800 }
1801 }
1802
1803 head_segment = list_first_entry(&desc->segments,
1804 struct xilinx_axidma_tx_segment, node);
1805 desc->async_tx.phys = head_segment->phys;
1806
1807 desc->cyclic = true;
1808 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1809 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1810 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1811
1812 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1813 if (direction == DMA_MEM_TO_DEV) {
Kedareswara rao Appanae167a0b2016-06-09 11:32:12 +05301814 head_segment->hw.control |= XILINX_DMA_BD_SOP;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301815 segment = list_last_entry(&desc->segments,
1816 struct xilinx_axidma_tx_segment,
1817 node);
1818 segment->hw.control |= XILINX_DMA_BD_EOP;
1819 segment->hw.next_desc = (u32) head_segment->phys;
1820 }
1821
1822 return &desc->async_tx;
1823
1824error:
1825 xilinx_dma_free_tx_descriptor(chan, desc);
1826 return NULL;
1827}
1828
1829/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301830 * xilinx_dma_terminate_all - Halt the channel and free descriptors
1831 * @chan: Driver specific DMA Channel pointer
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301832 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301833static int xilinx_dma_terminate_all(struct dma_chan *dchan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301834{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301835 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301836 u32 reg;
1837
1838 if (chan->cyclic)
1839 xilinx_dma_chan_reset(chan);
Maxime Ripardba714042014-11-17 14:42:38 +01001840
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301841 /* Halt the DMA engine */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301842 xilinx_dma_halt(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301843
1844 /* Remove and free all of the descriptors in the lists */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301845 xilinx_dma_free_descriptors(chan);
Maxime Ripardba714042014-11-17 14:42:38 +01001846
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05301847 if (chan->cyclic) {
1848 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1849 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1850 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1851 chan->cyclic = false;
1852 }
1853
Maxime Ripardba714042014-11-17 14:42:38 +01001854 return 0;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301855}
1856
1857/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301858 * xilinx_dma_channel_set_config - Configure VDMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301859 * Run-time configuration for Axi VDMA, supports:
1860 * . halt the channel
1861 * . configure interrupt coalescing and inter-packet delay threshold
1862 * . start/stop parking
1863 * . enable genlock
1864 *
1865 * @dchan: DMA channel
1866 * @cfg: VDMA device configuration pointer
1867 *
1868 * Return: '0' on success and failure value on error
1869 */
1870int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
1871 struct xilinx_vdma_config *cfg)
1872{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301873 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301874 u32 dmacr;
1875
1876 if (cfg->reset)
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301877 return xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301878
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301879 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301880
1881 chan->config.frm_dly = cfg->frm_dly;
1882 chan->config.park = cfg->park;
1883
1884 /* genlock settings */
1885 chan->config.gen_lock = cfg->gen_lock;
1886 chan->config.master = cfg->master;
1887
1888 if (cfg->gen_lock && chan->genlock) {
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301889 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
1890 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301891 }
1892
1893 chan->config.frm_cnt_en = cfg->frm_cnt_en;
1894 if (cfg->park)
1895 chan->config.park_frm = cfg->park_frm;
1896 else
1897 chan->config.park_frm = -1;
1898
1899 chan->config.coalesc = cfg->coalesc;
1900 chan->config.delay = cfg->delay;
1901
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301902 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
1903 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301904 chan->config.coalesc = cfg->coalesc;
1905 }
1906
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301907 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
1908 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301909 chan->config.delay = cfg->delay;
1910 }
1911
1912 /* FSync Source selection */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301913 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
1914 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301915
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301916 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301917
1918 return 0;
1919}
1920EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
1921
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301922/* -----------------------------------------------------------------------------
1923 * Probe and remove
1924 */
1925
1926/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301927 * xilinx_dma_chan_remove - Per Channel remove function
1928 * @chan: Driver specific DMA channel
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301929 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301930static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301931{
1932 /* Disable all interrupts */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05301933 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1934 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05301935
1936 if (chan->irq > 0)
1937 free_irq(chan->irq, chan);
1938
1939 tasklet_kill(&chan->tasklet);
1940
1941 list_del(&chan->common.device_node);
1942}
1943
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05301944static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
1945 struct clk **tx_clk, struct clk **rx_clk,
1946 struct clk **sg_clk, struct clk **tmp_clk)
1947{
1948 int err;
1949
1950 *tmp_clk = NULL;
1951
1952 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
1953 if (IS_ERR(*axi_clk)) {
1954 err = PTR_ERR(*axi_clk);
1955 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
1956 return err;
1957 }
1958
1959 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
1960 if (IS_ERR(*tx_clk))
1961 *tx_clk = NULL;
1962
1963 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
1964 if (IS_ERR(*rx_clk))
1965 *rx_clk = NULL;
1966
1967 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
1968 if (IS_ERR(*sg_clk))
1969 *sg_clk = NULL;
1970
1971 err = clk_prepare_enable(*axi_clk);
1972 if (err) {
1973 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
1974 return err;
1975 }
1976
1977 err = clk_prepare_enable(*tx_clk);
1978 if (err) {
1979 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
1980 goto err_disable_axiclk;
1981 }
1982
1983 err = clk_prepare_enable(*rx_clk);
1984 if (err) {
1985 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
1986 goto err_disable_txclk;
1987 }
1988
1989 err = clk_prepare_enable(*sg_clk);
1990 if (err) {
1991 dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
1992 goto err_disable_rxclk;
1993 }
1994
1995 return 0;
1996
1997err_disable_rxclk:
1998 clk_disable_unprepare(*rx_clk);
1999err_disable_txclk:
2000 clk_disable_unprepare(*tx_clk);
2001err_disable_axiclk:
2002 clk_disable_unprepare(*axi_clk);
2003
2004 return err;
2005}
2006
2007static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2008 struct clk **dev_clk, struct clk **tmp_clk,
2009 struct clk **tmp1_clk, struct clk **tmp2_clk)
2010{
2011 int err;
2012
2013 *tmp_clk = NULL;
2014 *tmp1_clk = NULL;
2015 *tmp2_clk = NULL;
2016
2017 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2018 if (IS_ERR(*axi_clk)) {
2019 err = PTR_ERR(*axi_clk);
2020 dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
2021 return err;
2022 }
2023
2024 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2025 if (IS_ERR(*dev_clk)) {
2026 err = PTR_ERR(*dev_clk);
2027 dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
2028 return err;
2029 }
2030
2031 err = clk_prepare_enable(*axi_clk);
2032 if (err) {
2033 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2034 return err;
2035 }
2036
2037 err = clk_prepare_enable(*dev_clk);
2038 if (err) {
2039 dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
2040 goto err_disable_axiclk;
2041 }
2042
2043 return 0;
2044
2045err_disable_axiclk:
2046 clk_disable_unprepare(*axi_clk);
2047
2048 return err;
2049}
2050
2051static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2052 struct clk **tx_clk, struct clk **txs_clk,
2053 struct clk **rx_clk, struct clk **rxs_clk)
2054{
2055 int err;
2056
2057 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2058 if (IS_ERR(*axi_clk)) {
2059 err = PTR_ERR(*axi_clk);
2060 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
2061 return err;
2062 }
2063
2064 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2065 if (IS_ERR(*tx_clk))
2066 *tx_clk = NULL;
2067
2068 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2069 if (IS_ERR(*txs_clk))
2070 *txs_clk = NULL;
2071
2072 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2073 if (IS_ERR(*rx_clk))
2074 *rx_clk = NULL;
2075
2076 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2077 if (IS_ERR(*rxs_clk))
2078 *rxs_clk = NULL;
2079
2080 err = clk_prepare_enable(*axi_clk);
2081 if (err) {
2082 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
2083 return err;
2084 }
2085
2086 err = clk_prepare_enable(*tx_clk);
2087 if (err) {
2088 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2089 goto err_disable_axiclk;
2090 }
2091
2092 err = clk_prepare_enable(*txs_clk);
2093 if (err) {
2094 dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
2095 goto err_disable_txclk;
2096 }
2097
2098 err = clk_prepare_enable(*rx_clk);
2099 if (err) {
2100 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2101 goto err_disable_txsclk;
2102 }
2103
2104 err = clk_prepare_enable(*rxs_clk);
2105 if (err) {
2106 dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
2107 goto err_disable_rxclk;
2108 }
2109
2110 return 0;
2111
2112err_disable_rxclk:
2113 clk_disable_unprepare(*rx_clk);
2114err_disable_txsclk:
2115 clk_disable_unprepare(*txs_clk);
2116err_disable_txclk:
2117 clk_disable_unprepare(*tx_clk);
2118err_disable_axiclk:
2119 clk_disable_unprepare(*axi_clk);
2120
2121 return err;
2122}
2123
2124static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2125{
2126 clk_disable_unprepare(xdev->rxs_clk);
2127 clk_disable_unprepare(xdev->rx_clk);
2128 clk_disable_unprepare(xdev->txs_clk);
2129 clk_disable_unprepare(xdev->tx_clk);
2130 clk_disable_unprepare(xdev->axi_clk);
2131}
2132
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302133/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302134 * xilinx_dma_chan_probe - Per Channel Probing
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302135 * It get channel features from the device tree entry and
2136 * initialize special channel handling routines
2137 *
2138 * @xdev: Driver specific device structure
2139 * @node: Device node
2140 *
2141 * Return: '0' on success and failure value on error
2142 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302143static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302144 struct device_node *node)
2145{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302146 struct xilinx_dma_chan *chan;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302147 bool has_dre = false;
2148 u32 value, width;
2149 int err;
2150
2151 /* Allocate and initialize the channel structure */
2152 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2153 if (!chan)
2154 return -ENOMEM;
2155
2156 chan->dev = xdev->dev;
2157 chan->xdev = xdev;
2158 chan->has_sg = xdev->has_sg;
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05302159 chan->desc_pendingcount = 0x0;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302160 chan->ext_addr = xdev->ext_addr;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302161
2162 spin_lock_init(&chan->lock);
2163 INIT_LIST_HEAD(&chan->pending_list);
2164 INIT_LIST_HEAD(&chan->done_list);
Kedareswara rao Appana7096f362016-02-26 19:33:51 +05302165 INIT_LIST_HEAD(&chan->active_list);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302166
2167 /* Retrieve the channel properties from the device tree */
2168 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2169
2170 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2171
2172 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2173 if (err) {
2174 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2175 return err;
2176 }
2177 width = value >> 3; /* Convert bits to bytes */
2178
2179 /* If data width is greater than 8 bytes, DRE is not in hw */
2180 if (width > 8)
2181 has_dre = false;
2182
2183 if (!has_dre)
2184 xdev->common.copy_align = fls(width - 1);
2185
2186 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) {
2187 chan->direction = DMA_MEM_TO_DEV;
2188 chan->id = 0;
2189
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302190 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302191 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302192 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302193
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302194 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2195 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2196 chan->flush_on_fsync = true;
2197 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302198 } else if (of_device_is_compatible(node,
2199 "xlnx,axi-vdma-s2mm-channel")) {
2200 chan->direction = DMA_DEV_TO_MEM;
2201 chan->id = 1;
2202
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302203 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302204 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302205 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302206
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302207 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2208 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2209 chan->flush_on_fsync = true;
2210 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302211 } else {
2212 dev_err(xdev->dev, "Invalid channel compatible node\n");
2213 return -EINVAL;
2214 }
2215
2216 /* Request the interrupt */
2217 chan->irq = irq_of_parse_and_map(node, 0);
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302218 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2219 "xilinx-dma-controller", chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302220 if (err) {
2221 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2222 return err;
2223 }
2224
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302225 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302226 chan->start_transfer = xilinx_dma_start_transfer;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302227 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302228 chan->start_transfer = xilinx_cdma_start_transfer;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302229 else
2230 chan->start_transfer = xilinx_vdma_start_transfer;
2231
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302232 /* Initialize the tasklet */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302233 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302234 (unsigned long)chan);
2235
2236 /*
2237 * Initialize the DMA channel and add it to the DMA engine channels
2238 * list.
2239 */
2240 chan->common.device = &xdev->common;
2241
2242 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2243 xdev->chan[chan->id] = chan;
2244
2245 /* Reset the channel */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302246 err = xilinx_dma_chan_reset(chan);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302247 if (err < 0) {
2248 dev_err(xdev->dev, "Reset channel failed\n");
2249 return err;
2250 }
2251
2252 return 0;
2253}
2254
2255/**
2256 * of_dma_xilinx_xlate - Translation function
2257 * @dma_spec: Pointer to DMA specifier as found in the device tree
2258 * @ofdma: Pointer to DMA controller data
2259 *
2260 * Return: DMA channel pointer on success and NULL on error
2261 */
2262static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2263 struct of_dma *ofdma)
2264{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302265 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302266 int chan_id = dma_spec->args[0];
2267
Linus Torvaldsa0d3c7c2016-05-19 11:47:18 -07002268 if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302269 return NULL;
2270
2271 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2272}
2273
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302274static const struct xilinx_dma_config axidma_config = {
2275 .dmatype = XDMA_TYPE_AXIDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302276 .clk_init = axidma_clk_init,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302277};
2278
2279static const struct xilinx_dma_config axicdma_config = {
2280 .dmatype = XDMA_TYPE_CDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302281 .clk_init = axicdma_clk_init,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302282};
2283
2284static const struct xilinx_dma_config axivdma_config = {
2285 .dmatype = XDMA_TYPE_VDMA,
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302286 .clk_init = axivdma_clk_init,
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302287};
2288
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302289static const struct of_device_id xilinx_dma_of_ids[] = {
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302290 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2291 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2292 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302293 {}
2294};
2295MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2296
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302297/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302298 * xilinx_dma_probe - Driver probe function
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302299 * @pdev: Pointer to the platform_device structure
2300 *
2301 * Return: '0' on success and failure value on error
2302 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302303static int xilinx_dma_probe(struct platform_device *pdev)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302304{
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302305 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2306 struct clk **, struct clk **, struct clk **)
2307 = axivdma_clk_init;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302308 struct device_node *node = pdev->dev.of_node;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302309 struct xilinx_dma_device *xdev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302310 struct device_node *child, *np = pdev->dev.of_node;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302311 struct resource *io;
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302312 u32 num_frames, addr_width;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302313 int i, err;
2314
2315 /* Allocate and initialize the DMA engine structure */
2316 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2317 if (!xdev)
2318 return -ENOMEM;
2319
2320 xdev->dev = &pdev->dev;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302321 if (np) {
2322 const struct of_device_id *match;
2323
2324 match = of_match_node(xilinx_dma_of_ids, np);
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302325 if (match && match->data) {
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302326 xdev->dma_config = match->data;
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302327 clk_init = xdev->dma_config->clk_init;
2328 }
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302329 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302330
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302331 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2332 &xdev->rx_clk, &xdev->rxs_clk);
2333 if (err)
2334 return err;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302335
2336 /* Request and map I/O memory */
2337 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2338 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2339 if (IS_ERR(xdev->regs))
2340 return PTR_ERR(xdev->regs);
2341
2342 /* Retrieve the DMA engine properties from the device tree */
2343 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2344
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302345 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302346 err = of_property_read_u32(node, "xlnx,num-fstores",
2347 &num_frames);
2348 if (err < 0) {
2349 dev_err(xdev->dev,
2350 "missing xlnx,num-fstores property\n");
2351 return err;
2352 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302353
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302354 err = of_property_read_u32(node, "xlnx,flush-fsync",
2355 &xdev->flush_on_fsync);
2356 if (err < 0)
2357 dev_warn(xdev->dev,
2358 "missing xlnx,flush-fsync property\n");
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302359 }
2360
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302361 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302362 if (err < 0)
Kedareswara rao Appanab72db402016-04-06 10:38:08 +05302363 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2364
2365 if (addr_width > 32)
2366 xdev->ext_addr = true;
2367 else
2368 xdev->ext_addr = false;
2369
2370 /* Set the dma mask bits */
2371 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302372
2373 /* Initialize the DMA engine */
2374 xdev->common.dev = &pdev->dev;
2375
2376 INIT_LIST_HEAD(&xdev->common.channels);
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302377 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302378 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2379 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2380 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302381
2382 xdev->common.device_alloc_chan_resources =
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302383 xilinx_dma_alloc_chan_resources;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302384 xdev->common.device_free_chan_resources =
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302385 xilinx_dma_free_chan_resources;
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302386 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2387 xdev->common.device_tx_status = xilinx_dma_tx_status;
2388 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302389 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302390 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302391 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
Kedareswara rao Appana92d794d2016-05-18 13:17:30 +05302392 xdev->common.device_prep_dma_cyclic =
2393 xilinx_dma_prep_dma_cyclic;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302394 /* Residue calculation is supported by only AXI DMA */
2395 xdev->common.residue_granularity =
2396 DMA_RESIDUE_GRANULARITY_SEGMENT;
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302397 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
Kedareswara rao Appana07b0e7d2016-04-07 10:59:45 +05302398 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2399 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302400 } else {
2401 xdev->common.device_prep_interleaved_dma =
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302402 xilinx_vdma_dma_prep_interleaved;
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302403 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302404
2405 platform_set_drvdata(pdev, xdev);
2406
2407 /* Initialize the channels */
2408 for_each_child_of_node(node, child) {
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302409 err = xilinx_dma_chan_probe(xdev, child);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302410 if (err < 0)
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302411 goto disable_clks;
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302412 }
2413
Kedareswara rao Appanafb236662016-05-13 12:33:29 +05302414 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
Kedareswara rao Appanac0bba3a2016-04-07 10:59:43 +05302415 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
2416 if (xdev->chan[i])
2417 xdev->chan[i]->num_frms = num_frames;
2418 }
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302419
2420 /* Register the DMA engine with the core */
2421 dma_async_device_register(&xdev->common);
2422
2423 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2424 xdev);
2425 if (err < 0) {
2426 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2427 dma_async_device_unregister(&xdev->common);
2428 goto error;
2429 }
2430
2431 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2432
2433 return 0;
2434
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302435disable_clks:
2436 xdma_disable_allclks(xdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302437error:
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302438 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302439 if (xdev->chan[i])
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302440 xilinx_dma_chan_remove(xdev->chan[i]);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302441
2442 return err;
2443}
2444
2445/**
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302446 * xilinx_dma_remove - Driver remove function
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302447 * @pdev: Pointer to the platform_device structure
2448 *
2449 * Return: Always '0'
2450 */
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302451static int xilinx_dma_remove(struct platform_device *pdev)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302452{
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302453 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302454 int i;
2455
2456 of_dma_controller_free(pdev->dev.of_node);
2457
2458 dma_async_device_unregister(&xdev->common);
2459
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302460 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302461 if (xdev->chan[i])
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302462 xilinx_dma_chan_remove(xdev->chan[i]);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302463
Kedareswara rao Appanaba16db32016-05-13 12:33:31 +05302464 xdma_disable_allclks(xdev);
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302465
2466 return 0;
2467}
2468
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302469static struct platform_driver xilinx_vdma_driver = {
2470 .driver = {
2471 .name = "xilinx-vdma",
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302472 .of_match_table = xilinx_dma_of_ids,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302473 },
Kedareswara rao Appana42c1a2e2016-04-07 10:59:41 +05302474 .probe = xilinx_dma_probe,
2475 .remove = xilinx_dma_remove,
Srikanth Thokala9cd43602014-04-23 20:23:26 +05302476};
2477
2478module_platform_driver(xilinx_vdma_driver);
2479
2480MODULE_AUTHOR("Xilinx, Inc.");
2481MODULE_DESCRIPTION("Xilinx VDMA driver");
2482MODULE_LICENSE("GPL v2");