blob: 0b82bc00b83ab7e8f49ad02d7d43691ada886553 [file] [log] [blame]
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301/*
2 * Applied Micro X-Gene SoC DMA engine Driver
3 *
4 * Copyright (c) 2015, Applied Micro Circuits Corporation
5 * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
6 * Loc Ho <lho@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 *
21 * NOTE: PM support is currently not available.
22 */
23
Rameshwar Prasad Sahu89079492015-07-21 18:44:39 +053024#include <linux/acpi.h>
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +053025#include <linux/clk.h>
26#include <linux/delay.h>
27#include <linux/dma-mapping.h>
28#include <linux/dmaengine.h>
29#include <linux/dmapool.h>
30#include <linux/interrupt.h>
31#include <linux/io.h>
32#include <linux/module.h>
33#include <linux/of_device.h>
34
35#include "dmaengine.h"
36
37/* X-Gene DMA ring csr registers and bit definations */
38#define XGENE_DMA_RING_CONFIG 0x04
39#define XGENE_DMA_RING_ENABLE BIT(31)
40#define XGENE_DMA_RING_ID 0x08
41#define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31))
42#define XGENE_DMA_RING_ID_BUF 0x0C
43#define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21))
44#define XGENE_DMA_RING_THRESLD0_SET1 0x30
45#define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64
46#define XGENE_DMA_RING_THRESLD1_SET1 0x34
47#define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8
48#define XGENE_DMA_RING_HYSTERESIS 0x68
49#define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF
50#define XGENE_DMA_RING_STATE 0x6C
51#define XGENE_DMA_RING_STATE_WR_BASE 0x70
52#define XGENE_DMA_RING_NE_INT_MODE 0x017C
53#define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \
54 ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
55#define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \
56 ((m) &= (~BIT(31 - (v))))
57#define XGENE_DMA_RING_CLKEN 0xC208
58#define XGENE_DMA_RING_SRST 0xC200
59#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
60#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
61#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
62#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
63#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
64#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
65#define XGENE_DMA_RING_CMD_OFFSET 0x2C
66#define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6)
67#define XGENE_DMA_RING_COHERENT_SET(m) \
68 (((u32 *)(m))[2] |= BIT(4))
69#define XGENE_DMA_RING_ADDRL_SET(m, v) \
70 (((u32 *)(m))[2] |= (((v) >> 8) << 5))
71#define XGENE_DMA_RING_ADDRH_SET(m, v) \
72 (((u32 *)(m))[3] |= ((v) >> 35))
73#define XGENE_DMA_RING_ACCEPTLERR_SET(m) \
74 (((u32 *)(m))[3] |= BIT(19))
75#define XGENE_DMA_RING_SIZE_SET(m, v) \
76 (((u32 *)(m))[3] |= ((v) << 23))
77#define XGENE_DMA_RING_RECOMBBUF_SET(m) \
78 (((u32 *)(m))[3] |= BIT(27))
79#define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \
80 (((u32 *)(m))[3] |= (0x7 << 28))
81#define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \
82 (((u32 *)(m))[4] |= 0x3)
83#define XGENE_DMA_RING_SELTHRSH_SET(m) \
84 (((u32 *)(m))[4] |= BIT(3))
85#define XGENE_DMA_RING_TYPE_SET(m, v) \
86 (((u32 *)(m))[4] |= ((v) << 19))
87
88/* X-Gene DMA device csr registers and bit definitions */
89#define XGENE_DMA_IPBRR 0x0
90#define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF)
91#define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3)
92#define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3)
93#define XGENE_DMA_GCR 0x10
94#define XGENE_DMA_CH_SETUP(v) \
95 ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
96#define XGENE_DMA_ENABLE(v) ((v) |= BIT(31))
97#define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31))
98#define XGENE_DMA_RAID6_CONT 0x14
99#define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24)
100#define XGENE_DMA_INT 0x70
101#define XGENE_DMA_INT_MASK 0x74
102#define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF
103#define XGENE_DMA_INT_ALL_UNMASK 0x0
104#define XGENE_DMA_INT_MASK_SHIFT 0x14
105#define XGENE_DMA_RING_INT0_MASK 0x90A0
106#define XGENE_DMA_RING_INT1_MASK 0x90A8
107#define XGENE_DMA_RING_INT2_MASK 0x90B0
108#define XGENE_DMA_RING_INT3_MASK 0x90B8
109#define XGENE_DMA_RING_INT4_MASK 0x90C0
110#define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0
111#define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF
112#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
113#define XGENE_DMA_BLK_MEM_RDY 0xD074
114#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
115
116/* X-Gene SoC EFUSE csr register and bit defination */
117#define XGENE_SOC_JTAG1_SHADOW 0x18
118#define XGENE_DMA_PQ_DISABLE_MASK BIT(13)
119
120/* X-Gene DMA Descriptor format */
121#define XGENE_DMA_DESC_NV_BIT BIT_ULL(50)
122#define XGENE_DMA_DESC_IN_BIT BIT_ULL(55)
123#define XGENE_DMA_DESC_C_BIT BIT_ULL(63)
124#define XGENE_DMA_DESC_DR_BIT BIT_ULL(61)
125#define XGENE_DMA_DESC_ELERR_POS 46
126#define XGENE_DMA_DESC_RTYPE_POS 56
127#define XGENE_DMA_DESC_LERR_POS 60
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530128#define XGENE_DMA_DESC_BUFLEN_POS 48
129#define XGENE_DMA_DESC_HOENQ_NUM_POS 48
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530130#define XGENE_DMA_DESC_ELERR_RD(m) \
131 (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
132#define XGENE_DMA_DESC_LERR_RD(m) \
133 (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
134#define XGENE_DMA_DESC_STATUS(elerr, lerr) \
135 (((elerr) << 4) | (lerr))
136
137/* X-Gene DMA descriptor empty s/w signature */
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530138#define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530139
140/* X-Gene DMA configurable parameters defines */
141#define XGENE_DMA_RING_NUM 512
142#define XGENE_DMA_BUFNUM 0x0
143#define XGENE_DMA_CPU_BUFNUM 0x18
144#define XGENE_DMA_RING_OWNER_DMA 0x03
145#define XGENE_DMA_RING_OWNER_CPU 0x0F
146#define XGENE_DMA_RING_TYPE_REGULAR 0x01
147#define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */
148#define XGENE_DMA_RING_NUM_CONFIG 5
149#define XGENE_DMA_MAX_CHANNEL 4
150#define XGENE_DMA_XOR_CHANNEL 0
151#define XGENE_DMA_PQ_CHANNEL 1
152#define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */
153#define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530154#define XGENE_DMA_MAX_XOR_SRC 5
155#define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530156#define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530157
158/* X-Gene DMA descriptor error codes */
159#define ERR_DESC_AXI 0x01
160#define ERR_BAD_DESC 0x02
161#define ERR_READ_DATA_AXI 0x03
162#define ERR_WRITE_DATA_AXI 0x04
163#define ERR_FBP_TIMEOUT 0x05
164#define ERR_ECC 0x06
165#define ERR_DIFF_SIZE 0x08
166#define ERR_SCT_GAT_LEN 0x09
167#define ERR_CRC_ERR 0x11
168#define ERR_CHKSUM 0x12
169#define ERR_DIF 0x13
170
171/* X-Gene DMA error interrupt codes */
172#define ERR_DIF_SIZE_INT 0x0
173#define ERR_GS_ERR_INT 0x1
174#define ERR_FPB_TIMEO_INT 0x2
175#define ERR_WFIFO_OVF_INT 0x3
176#define ERR_RFIFO_OVF_INT 0x4
177#define ERR_WR_TIMEO_INT 0x5
178#define ERR_RD_TIMEO_INT 0x6
179#define ERR_WR_ERR_INT 0x7
180#define ERR_RD_ERR_INT 0x8
181#define ERR_BAD_DESC_INT 0x9
182#define ERR_DESC_DST_INT 0xA
183#define ERR_DESC_SRC_INT 0xB
184
185/* X-Gene DMA flyby operation code */
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530186#define FLYBY_2SRC_XOR 0x80
187#define FLYBY_3SRC_XOR 0x90
188#define FLYBY_4SRC_XOR 0xA0
189#define FLYBY_5SRC_XOR 0xB0
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530190
191/* X-Gene DMA SW descriptor flags */
192#define XGENE_DMA_FLAG_64B_DESC BIT(0)
193
194/* Define to dump X-Gene DMA descriptor */
195#define XGENE_DMA_DESC_DUMP(desc, m) \
196 print_hex_dump(KERN_ERR, (m), \
197 DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
198
199#define to_dma_desc_sw(tx) \
200 container_of(tx, struct xgene_dma_desc_sw, tx)
201#define to_dma_chan(dchan) \
202 container_of(dchan, struct xgene_dma_chan, dma_chan)
203
204#define chan_dbg(chan, fmt, arg...) \
205 dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
206#define chan_err(chan, fmt, arg...) \
207 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
208
209struct xgene_dma_desc_hw {
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530210 __le64 m0;
211 __le64 m1;
212 __le64 m2;
213 __le64 m3;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530214};
215
216enum xgene_dma_ring_cfgsize {
217 XGENE_DMA_RING_CFG_SIZE_512B,
218 XGENE_DMA_RING_CFG_SIZE_2KB,
219 XGENE_DMA_RING_CFG_SIZE_16KB,
220 XGENE_DMA_RING_CFG_SIZE_64KB,
221 XGENE_DMA_RING_CFG_SIZE_512KB,
222 XGENE_DMA_RING_CFG_SIZE_INVALID
223};
224
225struct xgene_dma_ring {
226 struct xgene_dma *pdma;
227 u8 buf_num;
228 u16 id;
229 u16 num;
230 u16 head;
231 u16 owner;
232 u16 slots;
233 u16 dst_ring_num;
234 u32 size;
235 void __iomem *cmd;
236 void __iomem *cmd_base;
237 dma_addr_t desc_paddr;
238 u32 state[XGENE_DMA_RING_NUM_CONFIG];
239 enum xgene_dma_ring_cfgsize cfgsize;
240 union {
241 void *desc_vaddr;
242 struct xgene_dma_desc_hw *desc_hw;
243 };
244};
245
246struct xgene_dma_desc_sw {
247 struct xgene_dma_desc_hw desc1;
248 struct xgene_dma_desc_hw desc2;
249 u32 flags;
250 struct list_head node;
251 struct list_head tx_list;
252 struct dma_async_tx_descriptor tx;
253};
254
255/**
256 * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
257 * @dma_chan: dmaengine channel object member
258 * @pdma: X-Gene DMA device structure reference
259 * @dev: struct device reference for dma mapping api
260 * @id: raw id of this channel
261 * @rx_irq: channel IRQ
262 * @name: name of X-Gene DMA channel
263 * @lock: serializes enqueue/dequeue operations to the descriptor pool
264 * @pending: number of transaction request pushed to DMA controller for
265 * execution, but still waiting for completion,
266 * @max_outstanding: max number of outstanding request we can push to channel
267 * @ld_pending: descriptors which are queued to run, but have not yet been
268 * submitted to the hardware for execution
269 * @ld_running: descriptors which are currently being executing by the hardware
270 * @ld_completed: descriptors which have finished execution by the hardware.
271 * These descriptors have already had their cleanup actions run. They
272 * are waiting for the ACK bit to be set by the async tx API.
273 * @desc_pool: descriptor pool for DMA operations
274 * @tasklet: bottom half where all completed descriptors cleans
275 * @tx_ring: transmit ring descriptor that we use to prepare actual
276 * descriptors for further executions
277 * @rx_ring: receive ring descriptor that we use to get completed DMA
278 * descriptors during cleanup time
279 */
280struct xgene_dma_chan {
281 struct dma_chan dma_chan;
282 struct xgene_dma *pdma;
283 struct device *dev;
284 int id;
285 int rx_irq;
Dan Carpentered1f0412015-04-09 12:05:04 +0300286 char name[10];
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530287 spinlock_t lock;
288 int pending;
289 int max_outstanding;
290 struct list_head ld_pending;
291 struct list_head ld_running;
292 struct list_head ld_completed;
293 struct dma_pool *desc_pool;
294 struct tasklet_struct tasklet;
295 struct xgene_dma_ring tx_ring;
296 struct xgene_dma_ring rx_ring;
297};
298
299/**
300 * struct xgene_dma - internal representation of an X-Gene DMA device
301 * @err_irq: DMA error irq number
302 * @ring_num: start id number for DMA ring
303 * @csr_dma: base for DMA register access
304 * @csr_ring: base for DMA ring register access
305 * @csr_ring_cmd: base for DMA ring command register access
306 * @csr_efuse: base for efuse register access
307 * @dma_dev: embedded struct dma_device
308 * @chan: reference to X-Gene DMA channels
309 */
310struct xgene_dma {
311 struct device *dev;
312 struct clk *clk;
313 int err_irq;
314 int ring_num;
315 void __iomem *csr_dma;
316 void __iomem *csr_ring;
317 void __iomem *csr_ring_cmd;
318 void __iomem *csr_efuse;
319 struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL];
320 struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL];
321};
322
323static const char * const xgene_dma_desc_err[] = {
324 [ERR_DESC_AXI] = "AXI error when reading src/dst link list",
325 [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc",
326 [ERR_READ_DATA_AXI] = "AXI error when reading data",
327 [ERR_WRITE_DATA_AXI] = "AXI error when writing data",
328 [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch",
329 [ERR_ECC] = "ECC double bit error",
330 [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result",
331 [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same",
332 [ERR_CRC_ERR] = "CRC error",
333 [ERR_CHKSUM] = "Checksum error",
334 [ERR_DIF] = "DIF error",
335};
336
337static const char * const xgene_dma_err[] = {
338 [ERR_DIF_SIZE_INT] = "DIF size error",
339 [ERR_GS_ERR_INT] = "Gather scatter not same size error",
340 [ERR_FPB_TIMEO_INT] = "Free pool time out error",
341 [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error",
342 [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error",
343 [ERR_WR_TIMEO_INT] = "Write time out error",
344 [ERR_RD_TIMEO_INT] = "Read time out error",
345 [ERR_WR_ERR_INT] = "HBF bus write error",
346 [ERR_RD_ERR_INT] = "HBF bus read error",
347 [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error",
348 [ERR_DESC_DST_INT] = "HFB reading dst link address error",
349 [ERR_DESC_SRC_INT] = "HFB reading src link address error",
350};
351
352static bool is_pq_enabled(struct xgene_dma *pdma)
353{
354 u32 val;
355
356 val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW);
357 return !(val & XGENE_DMA_PQ_DISABLE_MASK);
358}
359
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530360static u64 xgene_dma_encode_len(size_t len)
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530361{
362 return (len < XGENE_DMA_MAX_BYTE_CNT) ?
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530363 ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) :
364 XGENE_DMA_16K_BUFFER_LEN_CODE;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530365}
366
367static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
368{
369 static u8 flyby_type[] = {
370 FLYBY_2SRC_XOR, /* Dummy */
371 FLYBY_2SRC_XOR, /* Dummy */
372 FLYBY_2SRC_XOR,
373 FLYBY_3SRC_XOR,
374 FLYBY_4SRC_XOR,
375 FLYBY_5SRC_XOR
376 };
377
378 return flyby_type[src_cnt];
379}
380
381static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
382{
383 u32 __iomem *cmd_base = ring->cmd_base;
384 u32 ring_state = ioread32(&cmd_base[1]);
385
386 return XGENE_DMA_RING_DESC_CNT(ring_state);
387}
388
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530389static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530390 dma_addr_t *paddr)
391{
392 size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ?
393 *len : XGENE_DMA_MAX_BYTE_CNT;
394
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530395 *ext8 |= cpu_to_le64(*paddr);
396 *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes));
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530397 *len -= nbytes;
398 *paddr += nbytes;
399}
400
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530401static void xgene_dma_invalidate_buffer(__le64 *ext8)
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530402{
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530403 *ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530404}
405
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530406static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530407{
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530408 switch (idx) {
409 case 0:
410 return &desc->m1;
411 case 1:
412 return &desc->m0;
413 case 2:
414 return &desc->m3;
415 case 3:
416 return &desc->m2;
417 default:
418 pr_err("Invalid dma descriptor index\n");
419 }
420
421 return NULL;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530422}
423
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530424static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
425 u16 dst_ring_num)
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530426{
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530427 desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT);
428 desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA <<
429 XGENE_DMA_DESC_RTYPE_POS);
430 desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT);
431 desc->m3 |= cpu_to_le64((u64)dst_ring_num <<
432 XGENE_DMA_DESC_HOENQ_NUM_POS);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530433}
434
435static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
436 struct xgene_dma_desc_sw *desc_sw,
437 dma_addr_t dst, dma_addr_t src,
438 size_t len)
439{
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530440 struct xgene_dma_desc_hw *desc1, *desc2;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530441 int i;
442
443 /* Get 1st descriptor */
444 desc1 = &desc_sw->desc1;
445 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
446
447 /* Set destination address */
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530448 desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
449 desc1->m3 |= cpu_to_le64(dst);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530450
451 /* Set 1st source address */
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530452 xgene_dma_set_src_buffer(&desc1->m1, &len, &src);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530453
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530454 if (!len)
455 return;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530456
457 /*
458 * We need to split this source buffer,
459 * and need to use 2nd descriptor
460 */
461 desc2 = &desc_sw->desc2;
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530462 desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530463
464 /* Set 2nd to 5th source address */
465 for (i = 0; i < 4 && len; i++)
466 xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i),
467 &len, &src);
468
469 /* Invalidate unused source address field */
470 for (; i < 4; i++)
471 xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i));
472
473 /* Updated flag that we have prepared 64B descriptor */
474 desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530475}
476
477static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
478 struct xgene_dma_desc_sw *desc_sw,
479 dma_addr_t *dst, dma_addr_t *src,
480 u32 src_cnt, size_t *nbytes,
481 const u8 *scf)
482{
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530483 struct xgene_dma_desc_hw *desc1, *desc2;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530484 size_t len = *nbytes;
485 int i;
486
487 desc1 = &desc_sw->desc1;
488 desc2 = &desc_sw->desc2;
489
490 /* Initialize DMA descriptor */
491 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
492
493 /* Set destination address */
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530494 desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
495 desc1->m3 |= cpu_to_le64(*dst);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530496
497 /* We have multiple source addresses, so need to set NV bit*/
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530498 desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530499
500 /* Set flyby opcode */
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530501 desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt));
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530502
503 /* Set 1st to 5th source addresses */
504 for (i = 0; i < src_cnt; i++) {
505 len = *nbytes;
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530506 xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 :
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530507 xgene_dma_lookup_ext8(desc2, i - 1),
508 &len, &src[i]);
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530509 desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8)));
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530510 }
511
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530512 /* Update meta data */
513 *nbytes = len;
514 *dst += XGENE_DMA_MAX_BYTE_CNT;
515
516 /* We need always 64B descriptor to perform xor or pq operations */
517 desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
518}
519
520static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx)
521{
522 struct xgene_dma_desc_sw *desc;
523 struct xgene_dma_chan *chan;
524 dma_cookie_t cookie;
525
526 if (unlikely(!tx))
527 return -EINVAL;
528
529 chan = to_dma_chan(tx->chan);
530 desc = to_dma_desc_sw(tx);
531
532 spin_lock_bh(&chan->lock);
533
534 cookie = dma_cookie_assign(tx);
535
536 /* Add this transaction list onto the tail of the pending queue */
537 list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
538
539 spin_unlock_bh(&chan->lock);
540
541 return cookie;
542}
543
544static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan,
545 struct xgene_dma_desc_sw *desc)
546{
547 list_del(&desc->node);
548 chan_dbg(chan, "LD %p free\n", desc);
549 dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
550}
551
552static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
553 struct xgene_dma_chan *chan)
554{
555 struct xgene_dma_desc_sw *desc;
556 dma_addr_t phys;
557
558 desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys);
559 if (!desc) {
560 chan_err(chan, "Failed to allocate LDs\n");
561 return NULL;
562 }
563
564 memset(desc, 0, sizeof(*desc));
565
566 INIT_LIST_HEAD(&desc->tx_list);
567 desc->tx.phys = phys;
568 desc->tx.tx_submit = xgene_dma_tx_submit;
569 dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan);
570
571 chan_dbg(chan, "LD %p allocated\n", desc);
572
573 return desc;
574}
575
576/**
577 * xgene_dma_clean_completed_descriptor - free all descriptors which
578 * has been completed and acked
579 * @chan: X-Gene DMA channel
580 *
581 * This function is used on all completed and acked descriptors.
582 */
583static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan)
584{
585 struct xgene_dma_desc_sw *desc, *_desc;
586
587 /* Run the callback for each descriptor, in order */
588 list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) {
589 if (async_tx_test_ack(&desc->tx))
590 xgene_dma_clean_descriptor(chan, desc);
591 }
592}
593
594/**
595 * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
596 * @chan: X-Gene DMA channel
597 * @desc: descriptor to cleanup and free
598 *
599 * This function is used on a descriptor which has been executed by the DMA
600 * controller. It will run any callbacks, submit any dependencies.
601 */
602static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
603 struct xgene_dma_desc_sw *desc)
604{
605 struct dma_async_tx_descriptor *tx = &desc->tx;
606
607 /*
608 * If this is not the last transaction in the group,
609 * then no need to complete cookie and run any callback as
610 * this is not the tx_descriptor which had been sent to caller
611 * of this DMA request
612 */
613
614 if (tx->cookie == 0)
615 return;
616
617 dma_cookie_complete(tx);
618
619 /* Run the link descriptor callback function */
620 if (tx->callback)
621 tx->callback(tx->callback_param);
622
623 dma_descriptor_unmap(tx);
624
625 /* Run any dependencies */
626 dma_run_dependencies(tx);
627}
628
629/**
630 * xgene_dma_clean_running_descriptor - move the completed descriptor from
631 * ld_running to ld_completed
632 * @chan: X-Gene DMA channel
633 * @desc: the descriptor which is completed
634 *
635 * Free the descriptor directly if acked by async_tx api,
636 * else move it to queue ld_completed.
637 */
638static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
639 struct xgene_dma_desc_sw *desc)
640{
641 /* Remove from the list of running transactions */
642 list_del(&desc->node);
643
644 /*
645 * the client is allowed to attach dependent operations
646 * until 'ack' is set
647 */
648 if (!async_tx_test_ack(&desc->tx)) {
649 /*
650 * Move this descriptor to the list of descriptors which is
651 * completed, but still awaiting the 'ack' bit to be set.
652 */
653 list_add_tail(&desc->node, &chan->ld_completed);
654 return;
655 }
656
657 chan_dbg(chan, "LD %p free\n", desc);
658 dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
659}
660
661static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
662 struct xgene_dma_desc_sw *desc_sw)
663{
664 struct xgene_dma_desc_hw *desc_hw;
665
666 /* Check if can push more descriptor to hw for execution */
667 if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
668 return -EBUSY;
669
670 /* Get hw descriptor from DMA tx ring */
671 desc_hw = &ring->desc_hw[ring->head];
672
673 /*
674 * Increment the head count to point next
675 * descriptor for next time
676 */
677 if (++ring->head == ring->slots)
678 ring->head = 0;
679
680 /* Copy prepared sw descriptor data to hw descriptor */
681 memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw));
682
683 /*
684 * Check if we have prepared 64B descriptor,
685 * in this case we need one more hw descriptor
686 */
687 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) {
688 desc_hw = &ring->desc_hw[ring->head];
689
690 if (++ring->head == ring->slots)
691 ring->head = 0;
692
693 memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
694 }
695
696 /* Notify the hw that we have descriptor ready for execution */
697 iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
698 2 : 1, ring->cmd);
699
700 return 0;
701}
702
703/**
704 * xgene_chan_xfer_ld_pending - push any pending transactions to hw
705 * @chan : X-Gene DMA channel
706 *
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530707 * LOCKING: must hold chan->lock
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530708 */
709static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
710{
711 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
712 int ret;
713
714 /*
715 * If the list of pending descriptors is empty, then we
716 * don't need to do any work at all
717 */
718 if (list_empty(&chan->ld_pending)) {
719 chan_dbg(chan, "No pending LDs\n");
720 return;
721 }
722
723 /*
724 * Move elements from the queue of pending transactions onto the list
725 * of running transactions and push it to hw for further executions
726 */
727 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) {
728 /*
729 * Check if have pushed max number of transactions to hw
730 * as capable, so let's stop here and will push remaining
731 * elements from pening ld queue after completing some
732 * descriptors that we have already pushed
733 */
734 if (chan->pending >= chan->max_outstanding)
735 return;
736
737 ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
738 if (ret)
739 return;
740
741 /*
742 * Delete this element from ld pending queue and append it to
743 * ld running queue
744 */
745 list_move_tail(&desc_sw->node, &chan->ld_running);
746
747 /* Increment the pending transaction count */
748 chan->pending++;
749 }
750}
751
752/**
753 * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
754 * and move them to ld_completed to free until flag 'ack' is set
755 * @chan: X-Gene DMA channel
756 *
757 * This function is used on descriptors which have been executed by the DMA
758 * controller. It will run any callbacks, submit any dependencies, then
759 * free these descriptors if flag 'ack' is set.
760 */
761static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
762{
763 struct xgene_dma_ring *ring = &chan->rx_ring;
764 struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
765 struct xgene_dma_desc_hw *desc_hw;
Rameshwar Prasad Sahu005ce702015-08-21 14:33:34 +0530766 struct list_head ld_completed;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530767 u8 status;
768
Rameshwar Prasad Sahu005ce702015-08-21 14:33:34 +0530769 INIT_LIST_HEAD(&ld_completed);
770
771 spin_lock_bh(&chan->lock);
772
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530773 /* Clean already completed and acked descriptors */
774 xgene_dma_clean_completed_descriptor(chan);
775
Rameshwar Prasad Sahu005ce702015-08-21 14:33:34 +0530776 /* Move all completed descriptors to ld completed queue, in order */
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530777 list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
778 /* Get subsequent hw descriptor from DMA rx ring */
779 desc_hw = &ring->desc_hw[ring->head];
780
781 /* Check if this descriptor has been completed */
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530782 if (unlikely(le64_to_cpu(desc_hw->m0) ==
783 XGENE_DMA_DESC_EMPTY_SIGNATURE))
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530784 break;
785
786 if (++ring->head == ring->slots)
787 ring->head = 0;
788
789 /* Check if we have any error with DMA transactions */
790 status = XGENE_DMA_DESC_STATUS(
791 XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
792 desc_hw->m0)),
793 XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
794 desc_hw->m0)));
795 if (status) {
796 /* Print the DMA error type */
797 chan_err(chan, "%s\n", xgene_dma_desc_err[status]);
798
799 /*
800 * We have DMA transactions error here. Dump DMA Tx
801 * and Rx descriptors for this request */
802 XGENE_DMA_DESC_DUMP(&desc_sw->desc1,
803 "X-Gene DMA TX DESC1: ");
804
805 if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC)
806 XGENE_DMA_DESC_DUMP(&desc_sw->desc2,
807 "X-Gene DMA TX DESC2: ");
808
809 XGENE_DMA_DESC_DUMP(desc_hw,
810 "X-Gene DMA RX ERR DESC: ");
811 }
812
813 /* Notify the hw about this completed descriptor */
814 iowrite32(-1, ring->cmd);
815
816 /* Mark this hw descriptor as processed */
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530817 desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530818
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530819 /*
820 * Decrement the pending transaction count
821 * as we have processed one
822 */
823 chan->pending--;
Rameshwar Prasad Sahu005ce702015-08-21 14:33:34 +0530824
825 /*
826 * Delete this node from ld running queue and append it to
827 * ld completed queue for further processing
828 */
829 list_move_tail(&desc_sw->node, &ld_completed);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530830 }
831
832 /*
833 * Start any pending transactions automatically
834 * In the ideal case, we keep the DMA controller busy while we go
835 * ahead and free the descriptors below.
836 */
837 xgene_chan_xfer_ld_pending(chan);
Rameshwar Prasad Sahu005ce702015-08-21 14:33:34 +0530838
839 spin_unlock_bh(&chan->lock);
840
841 /* Run the callback for each descriptor, in order */
842 list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
843 xgene_dma_run_tx_complete_actions(chan, desc_sw);
844 xgene_dma_clean_running_descriptor(chan, desc_sw);
845 }
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530846}
847
848static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
849{
850 struct xgene_dma_chan *chan = to_dma_chan(dchan);
851
852 /* Has this channel already been allocated? */
853 if (chan->desc_pool)
854 return 1;
855
856 chan->desc_pool = dma_pool_create(chan->name, chan->dev,
857 sizeof(struct xgene_dma_desc_sw),
858 0, 0);
859 if (!chan->desc_pool) {
860 chan_err(chan, "Failed to allocate descriptor pool\n");
861 return -ENOMEM;
862 }
863
864 chan_dbg(chan, "Allocate descripto pool\n");
865
866 return 1;
867}
868
869/**
870 * xgene_dma_free_desc_list - Free all descriptors in a queue
871 * @chan: X-Gene DMA channel
872 * @list: the list to free
873 *
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530874 * LOCKING: must hold chan->lock
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530875 */
876static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
877 struct list_head *list)
878{
879 struct xgene_dma_desc_sw *desc, *_desc;
880
881 list_for_each_entry_safe(desc, _desc, list, node)
882 xgene_dma_clean_descriptor(chan, desc);
883}
884
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530885static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
886{
887 struct xgene_dma_chan *chan = to_dma_chan(dchan);
888
889 chan_dbg(chan, "Free all resources\n");
890
891 if (!chan->desc_pool)
892 return;
893
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530894 /* Process all running descriptor */
895 xgene_dma_cleanup_descriptors(chan);
896
Rameshwar Prasad Sahu005ce702015-08-21 14:33:34 +0530897 spin_lock_bh(&chan->lock);
898
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530899 /* Clean all link descriptor queues */
900 xgene_dma_free_desc_list(chan, &chan->ld_pending);
901 xgene_dma_free_desc_list(chan, &chan->ld_running);
902 xgene_dma_free_desc_list(chan, &chan->ld_completed);
903
904 spin_unlock_bh(&chan->lock);
905
906 /* Delete this channel DMA pool */
907 dma_pool_destroy(chan->desc_pool);
908 chan->desc_pool = NULL;
909}
910
911static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy(
912 struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
913 size_t len, unsigned long flags)
914{
915 struct xgene_dma_desc_sw *first = NULL, *new;
916 struct xgene_dma_chan *chan;
917 size_t copy;
918
919 if (unlikely(!dchan || !len))
920 return NULL;
921
922 chan = to_dma_chan(dchan);
923
924 do {
925 /* Allocate the link descriptor from DMA pool */
926 new = xgene_dma_alloc_descriptor(chan);
927 if (!new)
928 goto fail;
929
930 /* Create the largest transaction possible */
931 copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
932
933 /* Prepare DMA descriptor */
934 xgene_dma_prep_cpy_desc(chan, new, dst, src, copy);
935
936 if (!first)
937 first = new;
938
939 new->tx.cookie = 0;
940 async_tx_ack(&new->tx);
941
942 /* Update metadata */
943 len -= copy;
944 dst += copy;
945 src += copy;
946
947 /* Insert the link descriptor to the LD ring */
948 list_add_tail(&new->node, &first->tx_list);
949 } while (len);
950
951 new->tx.flags = flags; /* client is in control of this ack */
952 new->tx.cookie = -EBUSY;
953 list_splice(&first->tx_list, &new->tx_list);
954
955 return &new->tx;
956
957fail:
958 if (!first)
959 return NULL;
960
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +0530961 xgene_dma_free_desc_list(chan, &first->tx_list);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +0530962 return NULL;
963}
964
965static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
966 struct dma_chan *dchan, struct scatterlist *dst_sg,
967 u32 dst_nents, struct scatterlist *src_sg,
968 u32 src_nents, unsigned long flags)
969{
970 struct xgene_dma_desc_sw *first = NULL, *new = NULL;
971 struct xgene_dma_chan *chan;
972 size_t dst_avail, src_avail;
973 dma_addr_t dst, src;
974 size_t len;
975
976 if (unlikely(!dchan))
977 return NULL;
978
979 if (unlikely(!dst_nents || !src_nents))
980 return NULL;
981
982 if (unlikely(!dst_sg || !src_sg))
983 return NULL;
984
985 chan = to_dma_chan(dchan);
986
987 /* Get prepared for the loop */
988 dst_avail = sg_dma_len(dst_sg);
989 src_avail = sg_dma_len(src_sg);
990 dst_nents--;
991 src_nents--;
992
993 /* Run until we are out of scatterlist entries */
994 while (true) {
995 /* Create the largest transaction possible */
996 len = min_t(size_t, src_avail, dst_avail);
997 len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
998 if (len == 0)
999 goto fetch;
1000
1001 dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
1002 src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
1003
1004 /* Allocate the link descriptor from DMA pool */
1005 new = xgene_dma_alloc_descriptor(chan);
1006 if (!new)
1007 goto fail;
1008
1009 /* Prepare DMA descriptor */
1010 xgene_dma_prep_cpy_desc(chan, new, dst, src, len);
1011
1012 if (!first)
1013 first = new;
1014
1015 new->tx.cookie = 0;
1016 async_tx_ack(&new->tx);
1017
1018 /* update metadata */
1019 dst_avail -= len;
1020 src_avail -= len;
1021
1022 /* Insert the link descriptor to the LD ring */
1023 list_add_tail(&new->node, &first->tx_list);
1024
1025fetch:
1026 /* fetch the next dst scatterlist entry */
1027 if (dst_avail == 0) {
1028 /* no more entries: we're done */
1029 if (dst_nents == 0)
1030 break;
1031
1032 /* fetch the next entry: if there are no more: done */
1033 dst_sg = sg_next(dst_sg);
1034 if (!dst_sg)
1035 break;
1036
1037 dst_nents--;
1038 dst_avail = sg_dma_len(dst_sg);
1039 }
1040
1041 /* fetch the next src scatterlist entry */
1042 if (src_avail == 0) {
1043 /* no more entries: we're done */
1044 if (src_nents == 0)
1045 break;
1046
1047 /* fetch the next entry: if there are no more: done */
1048 src_sg = sg_next(src_sg);
1049 if (!src_sg)
1050 break;
1051
1052 src_nents--;
1053 src_avail = sg_dma_len(src_sg);
1054 }
1055 }
1056
1057 if (!new)
1058 return NULL;
1059
1060 new->tx.flags = flags; /* client is in control of this ack */
1061 new->tx.cookie = -EBUSY;
1062 list_splice(&first->tx_list, &new->tx_list);
1063
1064 return &new->tx;
1065fail:
1066 if (!first)
1067 return NULL;
1068
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +05301069 xgene_dma_free_desc_list(chan, &first->tx_list);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301070 return NULL;
1071}
1072
1073static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
1074 struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
1075 u32 src_cnt, size_t len, unsigned long flags)
1076{
1077 struct xgene_dma_desc_sw *first = NULL, *new;
1078 struct xgene_dma_chan *chan;
1079 static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {
1080 0x01, 0x01, 0x01, 0x01, 0x01};
1081
1082 if (unlikely(!dchan || !len))
1083 return NULL;
1084
1085 chan = to_dma_chan(dchan);
1086
1087 do {
1088 /* Allocate the link descriptor from DMA pool */
1089 new = xgene_dma_alloc_descriptor(chan);
1090 if (!new)
1091 goto fail;
1092
1093 /* Prepare xor DMA descriptor */
1094 xgene_dma_prep_xor_desc(chan, new, &dst, src,
1095 src_cnt, &len, multi);
1096
1097 if (!first)
1098 first = new;
1099
1100 new->tx.cookie = 0;
1101 async_tx_ack(&new->tx);
1102
1103 /* Insert the link descriptor to the LD ring */
1104 list_add_tail(&new->node, &first->tx_list);
1105 } while (len);
1106
1107 new->tx.flags = flags; /* client is in control of this ack */
1108 new->tx.cookie = -EBUSY;
1109 list_splice(&first->tx_list, &new->tx_list);
1110
1111 return &new->tx;
1112
1113fail:
1114 if (!first)
1115 return NULL;
1116
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +05301117 xgene_dma_free_desc_list(chan, &first->tx_list);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301118 return NULL;
1119}
1120
1121static struct dma_async_tx_descriptor *xgene_dma_prep_pq(
1122 struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
1123 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1124{
1125 struct xgene_dma_desc_sw *first = NULL, *new;
1126 struct xgene_dma_chan *chan;
1127 size_t _len = len;
1128 dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC];
1129 static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01};
1130
1131 if (unlikely(!dchan || !len))
1132 return NULL;
1133
1134 chan = to_dma_chan(dchan);
1135
1136 /*
1137 * Save source addresses on local variable, may be we have to
1138 * prepare two descriptor to generate P and Q if both enabled
1139 * in the flags by client
1140 */
1141 memcpy(_src, src, sizeof(*src) * src_cnt);
1142
1143 if (flags & DMA_PREP_PQ_DISABLE_P)
1144 len = 0;
1145
1146 if (flags & DMA_PREP_PQ_DISABLE_Q)
1147 _len = 0;
1148
1149 do {
1150 /* Allocate the link descriptor from DMA pool */
1151 new = xgene_dma_alloc_descriptor(chan);
1152 if (!new)
1153 goto fail;
1154
1155 if (!first)
1156 first = new;
1157
1158 new->tx.cookie = 0;
1159 async_tx_ack(&new->tx);
1160
1161 /* Insert the link descriptor to the LD ring */
1162 list_add_tail(&new->node, &first->tx_list);
1163
1164 /*
1165 * Prepare DMA descriptor to generate P,
1166 * if DMA_PREP_PQ_DISABLE_P flag is not set
1167 */
1168 if (len) {
1169 xgene_dma_prep_xor_desc(chan, new, &dst[0], src,
1170 src_cnt, &len, multi);
1171 continue;
1172 }
1173
1174 /*
1175 * Prepare DMA descriptor to generate Q,
1176 * if DMA_PREP_PQ_DISABLE_Q flag is not set
1177 */
1178 if (_len) {
1179 xgene_dma_prep_xor_desc(chan, new, &dst[1], _src,
1180 src_cnt, &_len, scf);
1181 }
1182 } while (len || _len);
1183
1184 new->tx.flags = flags; /* client is in control of this ack */
1185 new->tx.cookie = -EBUSY;
1186 list_splice(&first->tx_list, &new->tx_list);
1187
1188 return &new->tx;
1189
1190fail:
1191 if (!first)
1192 return NULL;
1193
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +05301194 xgene_dma_free_desc_list(chan, &first->tx_list);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301195 return NULL;
1196}
1197
1198static void xgene_dma_issue_pending(struct dma_chan *dchan)
1199{
1200 struct xgene_dma_chan *chan = to_dma_chan(dchan);
1201
1202 spin_lock_bh(&chan->lock);
1203 xgene_chan_xfer_ld_pending(chan);
1204 spin_unlock_bh(&chan->lock);
1205}
1206
1207static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
1208 dma_cookie_t cookie,
1209 struct dma_tx_state *txstate)
1210{
1211 return dma_cookie_status(dchan, cookie, txstate);
1212}
1213
1214static void xgene_dma_tasklet_cb(unsigned long data)
1215{
1216 struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
1217
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301218 /* Run all cleanup for descriptors which have been completed */
1219 xgene_dma_cleanup_descriptors(chan);
1220
1221 /* Re-enable DMA channel IRQ */
1222 enable_irq(chan->rx_irq);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301223}
1224
1225static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
1226{
1227 struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id;
1228
1229 BUG_ON(!chan);
1230
1231 /*
1232 * Disable DMA channel IRQ until we process completed
1233 * descriptors
1234 */
1235 disable_irq_nosync(chan->rx_irq);
1236
1237 /*
1238 * Schedule the tasklet to handle all cleanup of the current
1239 * transaction. It will start a new transaction if there is
1240 * one pending.
1241 */
1242 tasklet_schedule(&chan->tasklet);
1243
1244 return IRQ_HANDLED;
1245}
1246
1247static irqreturn_t xgene_dma_err_isr(int irq, void *id)
1248{
1249 struct xgene_dma *pdma = (struct xgene_dma *)id;
1250 unsigned long int_mask;
1251 u32 val, i;
1252
1253 val = ioread32(pdma->csr_dma + XGENE_DMA_INT);
1254
1255 /* Clear DMA interrupts */
1256 iowrite32(val, pdma->csr_dma + XGENE_DMA_INT);
1257
1258 /* Print DMA error info */
1259 int_mask = val >> XGENE_DMA_INT_MASK_SHIFT;
1260 for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err))
1261 dev_err(pdma->dev,
1262 "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]);
1263
1264 return IRQ_HANDLED;
1265}
1266
1267static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring)
1268{
1269 int i;
1270
1271 iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE);
1272
1273 for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++)
1274 iowrite32(ring->state[i], ring->pdma->csr_ring +
1275 XGENE_DMA_RING_STATE_WR_BASE + (i * 4));
1276}
1277
1278static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring)
1279{
1280 memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG);
1281 xgene_dma_wr_ring_state(ring);
1282}
1283
1284static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
1285{
1286 void *ring_cfg = ring->state;
1287 u64 addr = ring->desc_paddr;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301288 u32 i, val;
1289
1290 ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
1291
1292 /* Clear DMA ring state */
1293 xgene_dma_clr_ring_state(ring);
1294
1295 /* Set DMA ring type */
1296 XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR);
1297
1298 if (ring->owner == XGENE_DMA_RING_OWNER_DMA) {
1299 /* Set recombination buffer and timeout */
1300 XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg);
1301 XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg);
1302 XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg);
1303 }
1304
1305 /* Initialize DMA ring state */
1306 XGENE_DMA_RING_SELTHRSH_SET(ring_cfg);
1307 XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg);
1308 XGENE_DMA_RING_COHERENT_SET(ring_cfg);
1309 XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr);
1310 XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr);
1311 XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize);
1312
1313 /* Write DMA ring configurations */
1314 xgene_dma_wr_ring_state(ring);
1315
1316 /* Set DMA ring id */
1317 iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id),
1318 ring->pdma->csr_ring + XGENE_DMA_RING_ID);
1319
1320 /* Set DMA ring buffer */
1321 iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num),
1322 ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
1323
1324 if (ring->owner != XGENE_DMA_RING_OWNER_CPU)
1325 return;
1326
1327 /* Set empty signature to DMA Rx ring descriptors */
1328 for (i = 0; i < ring->slots; i++) {
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +05301329 struct xgene_dma_desc_hw *desc;
1330
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301331 desc = &ring->desc_hw[i];
Rameshwar Prasad Sahu6d0767c2015-06-02 14:33:33 +05301332 desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301333 }
1334
1335 /* Enable DMA Rx ring interrupt */
1336 val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
1337 XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num);
1338 iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
1339}
1340
1341static void xgene_dma_clear_ring(struct xgene_dma_ring *ring)
1342{
1343 u32 ring_id, val;
1344
1345 if (ring->owner == XGENE_DMA_RING_OWNER_CPU) {
1346 /* Disable DMA Rx ring interrupt */
1347 val = ioread32(ring->pdma->csr_ring +
1348 XGENE_DMA_RING_NE_INT_MODE);
1349 XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num);
1350 iowrite32(val, ring->pdma->csr_ring +
1351 XGENE_DMA_RING_NE_INT_MODE);
1352 }
1353
1354 /* Clear DMA ring state */
1355 ring_id = XGENE_DMA_RING_ID_SETUP(ring->id);
1356 iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID);
1357
1358 iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
1359 xgene_dma_clr_ring_state(ring);
1360}
1361
1362static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring)
1363{
1364 ring->cmd_base = ring->pdma->csr_ring_cmd +
1365 XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num -
1366 XGENE_DMA_RING_NUM));
1367
1368 ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET;
1369}
1370
1371static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan,
1372 enum xgene_dma_ring_cfgsize cfgsize)
1373{
1374 int size;
1375
1376 switch (cfgsize) {
1377 case XGENE_DMA_RING_CFG_SIZE_512B:
1378 size = 0x200;
1379 break;
1380 case XGENE_DMA_RING_CFG_SIZE_2KB:
1381 size = 0x800;
1382 break;
1383 case XGENE_DMA_RING_CFG_SIZE_16KB:
1384 size = 0x4000;
1385 break;
1386 case XGENE_DMA_RING_CFG_SIZE_64KB:
1387 size = 0x10000;
1388 break;
1389 case XGENE_DMA_RING_CFG_SIZE_512KB:
1390 size = 0x80000;
1391 break;
1392 default:
1393 chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize);
1394 return -EINVAL;
1395 }
1396
1397 return size;
1398}
1399
1400static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring)
1401{
1402 /* Clear DMA ring configurations */
1403 xgene_dma_clear_ring(ring);
1404
1405 /* De-allocate DMA ring descriptor */
1406 if (ring->desc_vaddr) {
1407 dma_free_coherent(ring->pdma->dev, ring->size,
1408 ring->desc_vaddr, ring->desc_paddr);
1409 ring->desc_vaddr = NULL;
1410 }
1411}
1412
1413static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan)
1414{
1415 xgene_dma_delete_ring_one(&chan->rx_ring);
1416 xgene_dma_delete_ring_one(&chan->tx_ring);
1417}
1418
1419static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1420 struct xgene_dma_ring *ring,
1421 enum xgene_dma_ring_cfgsize cfgsize)
1422{
1423 /* Setup DMA ring descriptor variables */
1424 ring->pdma = chan->pdma;
1425 ring->cfgsize = cfgsize;
1426 ring->num = chan->pdma->ring_num++;
1427 ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
1428
1429 ring->size = xgene_dma_get_ring_size(chan, cfgsize);
1430 if (ring->size <= 0)
1431 return ring->size;
1432
1433 /* Allocate memory for DMA ring descriptor */
1434 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
1435 &ring->desc_paddr, GFP_KERNEL);
1436 if (!ring->desc_vaddr) {
1437 chan_err(chan, "Failed to allocate ring desc\n");
1438 return -ENOMEM;
1439 }
1440
1441 /* Configure and enable DMA ring */
1442 xgene_dma_set_ring_cmd(ring);
1443 xgene_dma_setup_ring(ring);
1444
1445 return 0;
1446}
1447
1448static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
1449{
1450 struct xgene_dma_ring *rx_ring = &chan->rx_ring;
1451 struct xgene_dma_ring *tx_ring = &chan->tx_ring;
1452 int ret;
1453
1454 /* Create DMA Rx ring descriptor */
1455 rx_ring->owner = XGENE_DMA_RING_OWNER_CPU;
1456 rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id;
1457
1458 ret = xgene_dma_create_ring_one(chan, rx_ring,
1459 XGENE_DMA_RING_CFG_SIZE_64KB);
1460 if (ret)
1461 return ret;
1462
1463 chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n",
1464 rx_ring->id, rx_ring->num, rx_ring->desc_vaddr);
1465
1466 /* Create DMA Tx ring descriptor */
1467 tx_ring->owner = XGENE_DMA_RING_OWNER_DMA;
1468 tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id;
1469
1470 ret = xgene_dma_create_ring_one(chan, tx_ring,
1471 XGENE_DMA_RING_CFG_SIZE_64KB);
1472 if (ret) {
1473 xgene_dma_delete_ring_one(rx_ring);
1474 return ret;
1475 }
1476
1477 tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num);
1478
1479 chan_dbg(chan,
1480 "Tx ring id 0x%X num %d desc 0x%p\n",
1481 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
1482
1483 /* Set the max outstanding request possible to this channel */
1484 chan->max_outstanding = rx_ring->slots;
1485
1486 return ret;
1487}
1488
1489static int xgene_dma_init_rings(struct xgene_dma *pdma)
1490{
1491 int ret, i, j;
1492
1493 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1494 ret = xgene_dma_create_chan_rings(&pdma->chan[i]);
1495 if (ret) {
1496 for (j = 0; j < i; j++)
1497 xgene_dma_delete_chan_rings(&pdma->chan[j]);
1498 return ret;
1499 }
1500 }
1501
1502 return ret;
1503}
1504
1505static void xgene_dma_enable(struct xgene_dma *pdma)
1506{
1507 u32 val;
1508
1509 /* Configure and enable DMA engine */
1510 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
1511 XGENE_DMA_CH_SETUP(val);
1512 XGENE_DMA_ENABLE(val);
1513 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
1514}
1515
1516static void xgene_dma_disable(struct xgene_dma *pdma)
1517{
1518 u32 val;
1519
1520 val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
1521 XGENE_DMA_DISABLE(val);
1522 iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
1523}
1524
1525static void xgene_dma_mask_interrupts(struct xgene_dma *pdma)
1526{
1527 /*
1528 * Mask DMA ring overflow, underflow and
1529 * AXI write/read error interrupts
1530 */
1531 iowrite32(XGENE_DMA_INT_ALL_MASK,
1532 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
1533 iowrite32(XGENE_DMA_INT_ALL_MASK,
1534 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
1535 iowrite32(XGENE_DMA_INT_ALL_MASK,
1536 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
1537 iowrite32(XGENE_DMA_INT_ALL_MASK,
1538 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
1539 iowrite32(XGENE_DMA_INT_ALL_MASK,
1540 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
1541
1542 /* Mask DMA error interrupts */
1543 iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK);
1544}
1545
1546static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma)
1547{
1548 /*
1549 * Unmask DMA ring overflow, underflow and
1550 * AXI write/read error interrupts
1551 */
1552 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1553 pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
1554 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1555 pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
1556 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1557 pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
1558 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1559 pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
1560 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1561 pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
1562
1563 /* Unmask DMA error interrupts */
1564 iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1565 pdma->csr_dma + XGENE_DMA_INT_MASK);
1566}
1567
1568static void xgene_dma_init_hw(struct xgene_dma *pdma)
1569{
1570 u32 val;
1571
1572 /* Associate DMA ring to corresponding ring HW */
1573 iowrite32(XGENE_DMA_ASSOC_RING_MNGR1,
1574 pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC);
1575
1576 /* Configure RAID6 polynomial control setting */
1577 if (is_pq_enabled(pdma))
1578 iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
1579 pdma->csr_dma + XGENE_DMA_RAID6_CONT);
1580 else
1581 dev_info(pdma->dev, "PQ is disabled in HW\n");
1582
1583 xgene_dma_enable(pdma);
1584 xgene_dma_unmask_interrupts(pdma);
1585
1586 /* Get DMA id and version info */
1587 val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR);
1588
1589 /* DMA device info */
1590 dev_info(pdma->dev,
1591 "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
1592 XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val),
1593 XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL);
1594}
1595
kbuild test robota3f92e82015-04-02 17:50:56 +08001596static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma)
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301597{
1598 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) &&
1599 (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST)))
1600 return 0;
1601
1602 iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN);
1603 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST);
1604
1605 /* Bring up memory */
1606 iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
1607
1608 /* Force a barrier */
1609 ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
1610
1611 /* reset may take up to 1ms */
1612 usleep_range(1000, 1100);
1613
1614 if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY)
1615 != XGENE_DMA_RING_BLK_MEM_RDY_VAL) {
1616 dev_err(pdma->dev,
1617 "Failed to release ring mngr memory from shutdown\n");
1618 return -ENODEV;
1619 }
1620
1621 /* program threshold set 1 and all hysteresis */
1622 iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL,
1623 pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1);
1624 iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL,
1625 pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1);
1626 iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL,
1627 pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS);
1628
1629 /* Enable QPcore and assign error queue */
1630 iowrite32(XGENE_DMA_RING_ENABLE,
1631 pdma->csr_ring + XGENE_DMA_RING_CONFIG);
1632
1633 return 0;
1634}
1635
1636static int xgene_dma_init_mem(struct xgene_dma *pdma)
1637{
1638 int ret;
1639
1640 ret = xgene_dma_init_ring_mngr(pdma);
1641 if (ret)
1642 return ret;
1643
1644 /* Bring up memory */
1645 iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
1646
1647 /* Force a barrier */
1648 ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
1649
1650 /* reset may take up to 1ms */
1651 usleep_range(1000, 1100);
1652
1653 if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY)
1654 != XGENE_DMA_BLK_MEM_RDY_VAL) {
1655 dev_err(pdma->dev,
1656 "Failed to release DMA memory from shutdown\n");
1657 return -ENODEV;
1658 }
1659
1660 return 0;
1661}
1662
1663static int xgene_dma_request_irqs(struct xgene_dma *pdma)
1664{
1665 struct xgene_dma_chan *chan;
1666 int ret, i, j;
1667
1668 /* Register DMA error irq */
1669 ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr,
1670 0, "dma_error", pdma);
1671 if (ret) {
1672 dev_err(pdma->dev,
1673 "Failed to register error IRQ %d\n", pdma->err_irq);
1674 return ret;
1675 }
1676
1677 /* Register DMA channel rx irq */
1678 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1679 chan = &pdma->chan[i];
1680 ret = devm_request_irq(chan->dev, chan->rx_irq,
1681 xgene_dma_chan_ring_isr,
1682 0, chan->name, chan);
1683 if (ret) {
1684 chan_err(chan, "Failed to register Rx IRQ %d\n",
1685 chan->rx_irq);
1686 devm_free_irq(pdma->dev, pdma->err_irq, pdma);
1687
1688 for (j = 0; j < i; j++) {
1689 chan = &pdma->chan[i];
1690 devm_free_irq(chan->dev, chan->rx_irq, chan);
1691 }
1692
1693 return ret;
1694 }
1695 }
1696
1697 return 0;
1698}
1699
1700static void xgene_dma_free_irqs(struct xgene_dma *pdma)
1701{
1702 struct xgene_dma_chan *chan;
1703 int i;
1704
1705 /* Free DMA device error irq */
1706 devm_free_irq(pdma->dev, pdma->err_irq, pdma);
1707
1708 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1709 chan = &pdma->chan[i];
1710 devm_free_irq(chan->dev, chan->rx_irq, chan);
1711 }
1712}
1713
1714static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
1715 struct dma_device *dma_dev)
1716{
1717 /* Initialize DMA device capability mask */
1718 dma_cap_zero(dma_dev->cap_mask);
1719
1720 /* Set DMA device capability */
1721 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1722 dma_cap_set(DMA_SG, dma_dev->cap_mask);
1723
1724 /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
1725 * and channel 1 supports XOR, PQ both. First thing here is we have
1726 * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
1727 * we can make sure this by reading SoC Efuse register.
1728 * Second thing, we have hw errata that if we run channel 0 and
1729 * channel 1 simultaneously with executing XOR and PQ request,
1730 * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
1731 * if XOR and PQ supports on channel 1 is disabled.
1732 */
1733 if ((chan->id == XGENE_DMA_PQ_CHANNEL) &&
1734 is_pq_enabled(chan->pdma)) {
1735 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1736 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1737 } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) &&
1738 !is_pq_enabled(chan->pdma)) {
1739 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1740 }
1741
1742 /* Set base and prep routines */
1743 dma_dev->dev = chan->dev;
1744 dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources;
1745 dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
1746 dma_dev->device_issue_pending = xgene_dma_issue_pending;
1747 dma_dev->device_tx_status = xgene_dma_tx_status;
1748 dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy;
1749 dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
1750
1751 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1752 dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
1753 dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
Maxime Ripard77a68e52015-07-20 10:41:32 +02001754 dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301755 }
1756
1757 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1758 dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
1759 dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
Maxime Ripard77a68e52015-07-20 10:41:32 +02001760 dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301761 }
1762}
1763
1764static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
1765{
1766 struct xgene_dma_chan *chan = &pdma->chan[id];
1767 struct dma_device *dma_dev = &pdma->dma_dev[id];
1768 int ret;
1769
1770 chan->dma_chan.device = dma_dev;
1771
1772 spin_lock_init(&chan->lock);
1773 INIT_LIST_HEAD(&chan->ld_pending);
1774 INIT_LIST_HEAD(&chan->ld_running);
1775 INIT_LIST_HEAD(&chan->ld_completed);
1776 tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb,
1777 (unsigned long)chan);
1778
1779 chan->pending = 0;
1780 chan->desc_pool = NULL;
1781 dma_cookie_init(&chan->dma_chan);
1782
1783 /* Setup dma device capabilities and prep routines */
1784 xgene_dma_set_caps(chan, dma_dev);
1785
1786 /* Initialize DMA device list head */
1787 INIT_LIST_HEAD(&dma_dev->channels);
1788 list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels);
1789
1790 /* Register with Linux async DMA framework*/
1791 ret = dma_async_device_register(dma_dev);
1792 if (ret) {
1793 chan_err(chan, "Failed to register async device %d", ret);
1794 tasklet_kill(&chan->tasklet);
1795
1796 return ret;
1797 }
1798
1799 /* DMA capability info */
1800 dev_info(pdma->dev,
1801 "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan),
1802 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "",
1803 dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
1804 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
1805 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
1806
1807 return 0;
1808}
1809
1810static int xgene_dma_init_async(struct xgene_dma *pdma)
1811{
1812 int ret, i, j;
1813
1814 for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) {
1815 ret = xgene_dma_async_register(pdma, i);
1816 if (ret) {
1817 for (j = 0; j < i; j++) {
1818 dma_async_device_unregister(&pdma->dma_dev[j]);
1819 tasklet_kill(&pdma->chan[j].tasklet);
1820 }
1821
1822 return ret;
1823 }
1824 }
1825
1826 return ret;
1827}
1828
1829static void xgene_dma_async_unregister(struct xgene_dma *pdma)
1830{
1831 int i;
1832
1833 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
1834 dma_async_device_unregister(&pdma->dma_dev[i]);
1835}
1836
1837static void xgene_dma_init_channels(struct xgene_dma *pdma)
1838{
1839 struct xgene_dma_chan *chan;
1840 int i;
1841
1842 pdma->ring_num = XGENE_DMA_RING_NUM;
1843
1844 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1845 chan = &pdma->chan[i];
1846 chan->dev = pdma->dev;
1847 chan->pdma = pdma;
1848 chan->id = i;
Dan Carpentered1f0412015-04-09 12:05:04 +03001849 snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301850 }
1851}
1852
1853static int xgene_dma_get_resources(struct platform_device *pdev,
1854 struct xgene_dma *pdma)
1855{
1856 struct resource *res;
1857 int irq, i;
1858
1859 /* Get DMA csr region */
1860 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1861 if (!res) {
1862 dev_err(&pdev->dev, "Failed to get csr region\n");
1863 return -ENXIO;
1864 }
1865
1866 pdma->csr_dma = devm_ioremap(&pdev->dev, res->start,
1867 resource_size(res));
Dan Carpenter9c361b12015-04-09 12:03:31 +03001868 if (!pdma->csr_dma) {
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301869 dev_err(&pdev->dev, "Failed to ioremap csr region");
Dan Carpenter9c361b12015-04-09 12:03:31 +03001870 return -ENOMEM;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301871 }
1872
1873 /* Get DMA ring csr region */
1874 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1875 if (!res) {
1876 dev_err(&pdev->dev, "Failed to get ring csr region\n");
1877 return -ENXIO;
1878 }
1879
1880 pdma->csr_ring = devm_ioremap(&pdev->dev, res->start,
1881 resource_size(res));
Dan Carpenter9c361b12015-04-09 12:03:31 +03001882 if (!pdma->csr_ring) {
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301883 dev_err(&pdev->dev, "Failed to ioremap ring csr region");
Dan Carpenter9c361b12015-04-09 12:03:31 +03001884 return -ENOMEM;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301885 }
1886
1887 /* Get DMA ring cmd csr region */
1888 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1889 if (!res) {
1890 dev_err(&pdev->dev, "Failed to get ring cmd csr region\n");
1891 return -ENXIO;
1892 }
1893
1894 pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start,
1895 resource_size(res));
Dan Carpenter9c361b12015-04-09 12:03:31 +03001896 if (!pdma->csr_ring_cmd) {
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301897 dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region");
Dan Carpenter9c361b12015-04-09 12:03:31 +03001898 return -ENOMEM;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301899 }
1900
1901 /* Get efuse csr region */
1902 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1903 if (!res) {
1904 dev_err(&pdev->dev, "Failed to get efuse csr region\n");
1905 return -ENXIO;
1906 }
1907
1908 pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start,
1909 resource_size(res));
Dan Carpenter9c361b12015-04-09 12:03:31 +03001910 if (!pdma->csr_efuse) {
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301911 dev_err(&pdev->dev, "Failed to ioremap efuse csr region");
Dan Carpenter9c361b12015-04-09 12:03:31 +03001912 return -ENOMEM;
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301913 }
1914
1915 /* Get DMA error interrupt */
1916 irq = platform_get_irq(pdev, 0);
1917 if (irq <= 0) {
1918 dev_err(&pdev->dev, "Failed to get Error IRQ\n");
1919 return -ENXIO;
1920 }
1921
1922 pdma->err_irq = irq;
1923
1924 /* Get DMA Rx ring descriptor interrupts for all DMA channels */
1925 for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
1926 irq = platform_get_irq(pdev, i);
1927 if (irq <= 0) {
1928 dev_err(&pdev->dev, "Failed to get Rx IRQ\n");
1929 return -ENXIO;
1930 }
1931
1932 pdma->chan[i - 1].rx_irq = irq;
1933 }
1934
1935 return 0;
1936}
1937
1938static int xgene_dma_probe(struct platform_device *pdev)
1939{
1940 struct xgene_dma *pdma;
1941 int ret, i;
1942
1943 pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL);
1944 if (!pdma)
1945 return -ENOMEM;
1946
1947 pdma->dev = &pdev->dev;
1948 platform_set_drvdata(pdev, pdma);
1949
1950 ret = xgene_dma_get_resources(pdev, pdma);
1951 if (ret)
1952 return ret;
1953
1954 pdma->clk = devm_clk_get(&pdev->dev, NULL);
Rameshwar Prasad Sahu89079492015-07-21 18:44:39 +05301955 if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301956 dev_err(&pdev->dev, "Failed to get clk\n");
1957 return PTR_ERR(pdma->clk);
1958 }
1959
1960 /* Enable clk before accessing registers */
Rameshwar Prasad Sahu89079492015-07-21 18:44:39 +05301961 if (!IS_ERR(pdma->clk)) {
1962 ret = clk_prepare_enable(pdma->clk);
1963 if (ret) {
1964 dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
1965 return ret;
1966 }
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05301967 }
1968
1969 /* Remove DMA RAM out of shutdown */
1970 ret = xgene_dma_init_mem(pdma);
1971 if (ret)
1972 goto err_clk_enable;
1973
1974 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42));
1975 if (ret) {
1976 dev_err(&pdev->dev, "No usable DMA configuration\n");
1977 goto err_dma_mask;
1978 }
1979
1980 /* Initialize DMA channels software state */
1981 xgene_dma_init_channels(pdma);
1982
1983 /* Configue DMA rings */
1984 ret = xgene_dma_init_rings(pdma);
1985 if (ret)
1986 goto err_clk_enable;
1987
1988 ret = xgene_dma_request_irqs(pdma);
1989 if (ret)
1990 goto err_request_irq;
1991
1992 /* Configure and enable DMA engine */
1993 xgene_dma_init_hw(pdma);
1994
1995 /* Register DMA device with linux async framework */
1996 ret = xgene_dma_init_async(pdma);
1997 if (ret)
1998 goto err_async_init;
1999
2000 return 0;
2001
2002err_async_init:
2003 xgene_dma_free_irqs(pdma);
2004
2005err_request_irq:
2006 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
2007 xgene_dma_delete_chan_rings(&pdma->chan[i]);
2008
2009err_dma_mask:
2010err_clk_enable:
Rameshwar Prasad Sahu89079492015-07-21 18:44:39 +05302011 if (!IS_ERR(pdma->clk))
2012 clk_disable_unprepare(pdma->clk);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05302013
2014 return ret;
2015}
2016
2017static int xgene_dma_remove(struct platform_device *pdev)
2018{
2019 struct xgene_dma *pdma = platform_get_drvdata(pdev);
2020 struct xgene_dma_chan *chan;
2021 int i;
2022
2023 xgene_dma_async_unregister(pdma);
2024
2025 /* Mask interrupts and disable DMA engine */
2026 xgene_dma_mask_interrupts(pdma);
2027 xgene_dma_disable(pdma);
2028 xgene_dma_free_irqs(pdma);
2029
2030 for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
2031 chan = &pdma->chan[i];
2032 tasklet_kill(&chan->tasklet);
2033 xgene_dma_delete_chan_rings(chan);
2034 }
2035
Rameshwar Prasad Sahu89079492015-07-21 18:44:39 +05302036 if (!IS_ERR(pdma->clk))
2037 clk_disable_unprepare(pdma->clk);
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05302038
2039 return 0;
2040}
2041
Rameshwar Prasad Sahu89079492015-07-21 18:44:39 +05302042#ifdef CONFIG_ACPI
2043static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
2044 {"APMC0D43", 0},
2045 {},
2046};
2047MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
2048#endif
2049
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05302050static const struct of_device_id xgene_dma_of_match_ptr[] = {
2051 {.compatible = "apm,xgene-storm-dma",},
2052 {},
2053};
2054MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
2055
2056static struct platform_driver xgene_dma_driver = {
2057 .probe = xgene_dma_probe,
2058 .remove = xgene_dma_remove,
2059 .driver = {
2060 .name = "X-Gene-DMA",
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05302061 .of_match_table = xgene_dma_of_match_ptr,
Rameshwar Prasad Sahu89079492015-07-21 18:44:39 +05302062 .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
Rameshwar Prasad Sahu9f2fd0d2015-03-18 19:17:34 +05302063 },
2064};
2065
2066module_platform_driver(xgene_dma_driver);
2067
2068MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
2069MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
2070MODULE_AUTHOR("Loc Ho <lho@apm.com>");
2071MODULE_LICENSE("GPL");
2072MODULE_VERSION("1.0");