blob: 25bc5b103aa2415b99dfa14b2f39bc3449ca6a82 [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Saeed Bisharaff7b0472008-07-08 11:58:36 -070013 */
14
15#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070017#include <linux/delay.h>
18#include <linux/dma-mapping.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
Lior Amsalem6f166312015-05-26 15:07:34 +020021#include <linux/of_device.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/platform_device.h>
23#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010024#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010025#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/irqdomain.h>
Thomas Petazzoni77757292015-07-08 16:28:19 +020028#include <linux/cpumask.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020029#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000030
31#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070032#include "mv_xor.h"
33
Gregory CLEMENTdd130c62016-04-29 09:49:06 +020034enum mv_xor_type {
35 XOR_ORION,
36 XOR_ARMADA_38X,
Marcin Wojtasac5f0f32016-04-29 09:49:07 +020037 XOR_ARMADA_37XX,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +020038};
39
Lior Amsalem6f166312015-05-26 15:07:34 +020040enum mv_xor_mode {
41 XOR_MODE_IN_REG,
42 XOR_MODE_IN_DESC,
43};
44
Saeed Bisharaff7b0472008-07-08 11:58:36 -070045static void mv_xor_issue_pending(struct dma_chan *chan);
46
47#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010048 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070049
50#define to_mv_xor_slot(tx) \
51 container_of(tx, struct mv_xor_desc_slot, async_tx)
52
Thomas Petazzonic98c1782012-11-15 14:17:18 +010053#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010054 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010055
Lior Amsalemdfc97662014-08-27 10:52:51 -030056static void mv_desc_init(struct mv_xor_desc_slot *desc,
Lior Amsalemba87d132014-08-27 10:52:53 -030057 dma_addr_t addr, u32 byte_count,
58 enum dma_ctrl_flags flags)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070059{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -030062 hw_desc->status = XOR_DESC_DMA_OWNED;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070063 hw_desc->phy_next_desc = 0;
Lior Amsalemba87d132014-08-27 10:52:53 -030064 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
65 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
66 XOR_DESC_EOD_INT_EN : 0;
Lior Amsalemdfc97662014-08-27 10:52:51 -030067 hw_desc->phy_dest_addr = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070068 hw_desc->byte_count = byte_count;
69}
70
Stefan Roesec5db8582016-10-26 10:10:25 +020071/* Populate the descriptor */
72static void mv_xor_config_sg_ll_desc(struct mv_xor_desc_slot *desc,
73 dma_addr_t dma_src, dma_addr_t dma_dst,
74 u32 len, struct mv_xor_desc_slot *prev)
75{
76 struct mv_xor_desc *hw_desc = desc->hw_desc;
77
78 hw_desc->status = XOR_DESC_DMA_OWNED;
79 hw_desc->phy_next_desc = 0;
80 /* Configure for XOR with only one src address -> MEMCPY */
81 hw_desc->desc_command = XOR_DESC_OPERATION_XOR | (0x1 << 0);
82 hw_desc->phy_dest_addr = dma_dst;
83 hw_desc->phy_src_addr[0] = dma_src;
84 hw_desc->byte_count = len;
85
86 if (prev) {
87 struct mv_xor_desc *hw_prev = prev->hw_desc;
88
89 hw_prev->phy_next_desc = desc->async_tx.phys;
90 }
91}
92
93static void mv_xor_desc_config_eod(struct mv_xor_desc_slot *desc)
94{
95 struct mv_xor_desc *hw_desc = desc->hw_desc;
96
97 /* Enable end-of-descriptor interrupt */
98 hw_desc->desc_command |= XOR_DESC_EOD_INT_EN;
99}
100
Lior Amsalem6f166312015-05-26 15:07:34 +0200101static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
102{
103 struct mv_xor_desc *hw_desc = desc->hw_desc;
104
105 switch (desc->type) {
106 case DMA_XOR:
107 case DMA_INTERRUPT:
108 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
109 break;
110 case DMA_MEMCPY:
111 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
112 break;
113 default:
114 BUG();
115 return;
116 }
117}
118
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700119static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
120 u32 next_desc_addr)
121{
122 struct mv_xor_desc *hw_desc = desc->hw_desc;
123 BUG_ON(hw_desc->phy_next_desc);
124 hw_desc->phy_next_desc = next_desc_addr;
125}
126
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700127static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
128 int index, dma_addr_t addr)
129{
130 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200131 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700132 if (desc->type == DMA_XOR)
133 hw_desc->desc_command |= (1 << index);
134}
135
136static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
137{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200138 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700139}
140
141static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
142 u32 next_desc_addr)
143{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200144 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700145}
146
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700147static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
148{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200149 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700150 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200151 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700152}
153
154static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
155{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200156 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700157 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
158 return intr_cause;
159}
160
Maxime Ripard0951e722015-05-26 15:07:33 +0200161static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700162{
Lior Amsalemba87d132014-08-27 10:52:53 -0300163 u32 val;
164
165 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
166 val = ~(val << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100167 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200168 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700169}
170
Maxime Ripard0951e722015-05-26 15:07:33 +0200171static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700172{
173 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200174 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700175}
176
Maxime Ripard0951e722015-05-26 15:07:33 +0200177static void mv_chan_set_mode(struct mv_xor_chan *chan,
Thomas Petazzoni81aafb32015-12-22 11:43:28 +0100178 u32 op_mode)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700179{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200180 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700181
Lior Amsalem6f166312015-05-26 15:07:34 +0200182 config &= ~0x7;
183 config |= op_mode;
184
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200185#if defined(__BIG_ENDIAN)
186 config |= XOR_DESCRIPTOR_SWAP;
187#else
188 config &= ~XOR_DESCRIPTOR_SWAP;
189#endif
190
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200191 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700192}
193
194static void mv_chan_activate(struct mv_xor_chan *chan)
195{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100196 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Ezequiel Garcia5a9a55b2014-05-21 14:02:35 -0700197
198 /* writel ensures all descriptors are flushed before activation */
199 writel(BIT(0), XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700200}
201
202static char mv_chan_is_busy(struct mv_xor_chan *chan)
203{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200204 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700205
206 state = (state >> 4) & 0x3;
207
208 return (state == 1) ? 1 : 0;
209}
210
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700211/*
Maxime Ripard0951e722015-05-26 15:07:33 +0200212 * mv_chan_start_new_chain - program the engine to operate on new
213 * chain headed by sw_desc
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700214 * Caller must hold &mv_chan->lock while calling this function
215 */
Maxime Ripard0951e722015-05-26 15:07:33 +0200216static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
217 struct mv_xor_desc_slot *sw_desc)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700218{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100219 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700220 __func__, __LINE__, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700221
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700222 /* set the hardware chain */
223 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
224
Lior Amsalemdfc97662014-08-27 10:52:51 -0300225 mv_chan->pending++;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100226 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700227}
228
229static dma_cookie_t
Maxime Ripard0951e722015-05-26 15:07:33 +0200230mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
231 struct mv_xor_chan *mv_chan,
232 dma_cookie_t cookie)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700233{
234 BUG_ON(desc->async_tx.cookie < 0);
235
236 if (desc->async_tx.cookie > 0) {
237 cookie = desc->async_tx.cookie;
238
Dave Jiang8058e252016-07-25 10:34:08 -0700239 dma_descriptor_unmap(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700240 /* call the callback (must not sleep or submit new
241 * operations to this channel)
242 */
Dave Jiangee7681a2016-07-20 13:12:13 -0700243 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700244 }
245
246 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700247 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700248
249 return cookie;
250}
251
252static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200253mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700254{
255 struct mv_xor_desc_slot *iter, *_iter;
256
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100257 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700258 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200259 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700260
Stefan Roesec5db8582016-10-26 10:10:25 +0200261 if (async_tx_test_ack(&iter->async_tx)) {
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200262 list_move_tail(&iter->node, &mv_chan->free_slots);
Stefan Roesec5db8582016-10-26 10:10:25 +0200263 if (!list_empty(&iter->sg_tx_list)) {
264 list_splice_tail_init(&iter->sg_tx_list,
265 &mv_chan->free_slots);
266 }
267 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700268 }
269 return 0;
270}
271
272static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200273mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
274 struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700275{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100276 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700277 __func__, __LINE__, desc, desc->async_tx.flags);
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200278
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700279 /* the client is allowed to attach dependent operations
280 * until 'ack' is set
281 */
Stefan Roesec5db8582016-10-26 10:10:25 +0200282 if (!async_tx_test_ack(&desc->async_tx)) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700283 /* move this slot to the completed_slots */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200284 list_move_tail(&desc->node, &mv_chan->completed_slots);
Stefan Roesec5db8582016-10-26 10:10:25 +0200285 if (!list_empty(&desc->sg_tx_list)) {
286 list_splice_tail_init(&desc->sg_tx_list,
287 &mv_chan->completed_slots);
288 }
289 } else {
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200290 list_move_tail(&desc->node, &mv_chan->free_slots);
Stefan Roesec5db8582016-10-26 10:10:25 +0200291 if (!list_empty(&desc->sg_tx_list)) {
292 list_splice_tail_init(&desc->sg_tx_list,
293 &mv_chan->free_slots);
294 }
295 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700296
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700297 return 0;
298}
299
Ezequiel Garciafbeec992014-03-07 16:46:47 -0300300/* This function must be called with the mv_xor_chan spinlock held */
Maxime Ripard0951e722015-05-26 15:07:33 +0200301static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700302{
303 struct mv_xor_desc_slot *iter, *_iter;
304 dma_cookie_t cookie = 0;
305 int busy = mv_chan_is_busy(mv_chan);
306 u32 current_desc = mv_chan_get_current_desc(mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200307 int current_cleaned = 0;
308 struct mv_xor_desc *hw_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700309
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100310 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
311 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Maxime Ripard0951e722015-05-26 15:07:33 +0200312 mv_chan_clean_completed_slots(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700313
314 /* free completed slots from the chain starting with
315 * the oldest descriptor
316 */
317
318 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200319 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700320
Lior Amsalem91362912015-05-26 15:07:32 +0200321 /* clean finished descriptors */
322 hw_desc = iter->hw_desc;
323 if (hw_desc->status & XOR_DESC_SUCCESS) {
Maxime Ripard0951e722015-05-26 15:07:33 +0200324 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
325 cookie);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700326
Lior Amsalem91362912015-05-26 15:07:32 +0200327 /* done processing desc, clean slot */
Maxime Ripard0951e722015-05-26 15:07:33 +0200328 mv_desc_clean_slot(iter, mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200329
330 /* break if we did cleaned the current */
331 if (iter->async_tx.phys == current_desc) {
332 current_cleaned = 1;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700333 break;
Lior Amsalem91362912015-05-26 15:07:32 +0200334 }
335 } else {
336 if (iter->async_tx.phys == current_desc) {
337 current_cleaned = 0;
338 break;
339 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700340 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700341 }
342
343 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200344 if (current_cleaned) {
345 /*
346 * current descriptor cleaned and removed, run
347 * from list head
348 */
349 iter = list_entry(mv_chan->chain.next,
350 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200351 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200352 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200353 } else {
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200354 if (!list_is_last(&iter->node, &mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200355 /*
356 * descriptors are still waiting after
357 * current, trigger them
358 */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200359 iter = list_entry(iter->node.next,
Lior Amsalem91362912015-05-26 15:07:32 +0200360 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200361 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200362 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200363 } else {
364 /*
365 * some descriptors are still waiting
366 * to be cleaned
367 */
368 tasklet_schedule(&mv_chan->irq_tasklet);
369 }
370 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700371 }
372
373 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100374 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700375}
376
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700377static void mv_xor_tasklet(unsigned long data)
378{
379 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300380
381 spin_lock_bh(&chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200382 mv_chan_slot_cleanup(chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300383 spin_unlock_bh(&chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700384}
385
386static struct mv_xor_desc_slot *
Maxime Ripard0951e722015-05-26 15:07:33 +0200387mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700388{
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200389 struct mv_xor_desc_slot *iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700390
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200391 spin_lock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700392
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200393 if (!list_empty(&mv_chan->free_slots)) {
394 iter = list_first_entry(&mv_chan->free_slots,
395 struct mv_xor_desc_slot,
396 node);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300397
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200398 list_move_tail(&iter->node, &mv_chan->allocated_slots);
399
400 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700401
Lior Amsalemdfc97662014-08-27 10:52:51 -0300402 /* pre-ack descriptor */
403 async_tx_ack(&iter->async_tx);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300404 iter->async_tx.cookie = -EBUSY;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700405
Lior Amsalemdfc97662014-08-27 10:52:51 -0300406 return iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700407
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700408 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200409
410 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700411
412 /* try to free some slots if the allocation fails */
413 tasklet_schedule(&mv_chan->irq_tasklet);
414
415 return NULL;
416}
417
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700418/************************ DMA engine API functions ****************************/
419static dma_cookie_t
420mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
421{
422 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
423 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300424 struct mv_xor_desc_slot *old_chain_tail;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700425 dma_cookie_t cookie;
426 int new_hw_chain = 1;
427
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100428 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700429 "%s sw_desc %p: async_tx %p\n",
430 __func__, sw_desc, &sw_desc->async_tx);
431
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700432 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000433 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700434
435 if (list_empty(&mv_chan->chain))
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200436 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700437 else {
438 new_hw_chain = 0;
439
440 old_chain_tail = list_entry(mv_chan->chain.prev,
441 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200442 node);
443 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700444
Olof Johansson31fd8f52014-02-03 17:13:23 -0800445 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
446 &old_chain_tail->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700447
448 /* fix up the hardware chain */
Lior Amsalemdfc97662014-08-27 10:52:51 -0300449 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700450
451 /* if the channel is not busy */
452 if (!mv_chan_is_busy(mv_chan)) {
453 u32 current_desc = mv_chan_get_current_desc(mv_chan);
454 /*
455 * and the curren desc is the end of the chain before
456 * the append, then we need to start the channel
457 */
458 if (current_desc == old_chain_tail->async_tx.phys)
459 new_hw_chain = 1;
460 }
461 }
462
463 if (new_hw_chain)
Maxime Ripard0951e722015-05-26 15:07:33 +0200464 mv_chan_start_new_chain(mv_chan, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700465
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700466 spin_unlock_bh(&mv_chan->lock);
467
468 return cookie;
469}
470
471/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700472static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700473{
Olof Johansson31fd8f52014-02-03 17:13:23 -0800474 void *virt_desc;
475 dma_addr_t dma_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700476 int idx;
477 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
478 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100479 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700480
481 /* Allocate descriptor slots */
482 idx = mv_chan->slots_allocated;
483 while (idx < num_descs_in_pool) {
484 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
485 if (!slot) {
Ezequiel Garciab8291dd2014-08-27 10:52:49 -0300486 dev_info(mv_chan_to_devp(mv_chan),
487 "channel only initialized %d descriptor slots",
488 idx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700489 break;
490 }
Olof Johansson31fd8f52014-02-03 17:13:23 -0800491 virt_desc = mv_chan->dma_desc_pool_virt;
492 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700493
494 dma_async_tx_descriptor_init(&slot->async_tx, chan);
495 slot->async_tx.tx_submit = mv_xor_tx_submit;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200496 INIT_LIST_HEAD(&slot->node);
Stefan Roesec5db8582016-10-26 10:10:25 +0200497 INIT_LIST_HEAD(&slot->sg_tx_list);
Olof Johansson31fd8f52014-02-03 17:13:23 -0800498 dma_desc = mv_chan->dma_desc_pool;
499 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700500 slot->idx = idx++;
501
502 spin_lock_bh(&mv_chan->lock);
503 mv_chan->slots_allocated = idx;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200504 list_add_tail(&slot->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700505 spin_unlock_bh(&mv_chan->lock);
506 }
507
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100508 dev_dbg(mv_chan_to_devp(mv_chan),
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200509 "allocated %d descriptor slots\n",
510 mv_chan->slots_allocated);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700511
512 return mv_chan->slots_allocated ? : -ENOMEM;
513}
514
Stefan Roese77ff7a72016-09-15 07:37:31 +0200515/*
516 * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
517 * a new MBus window if necessary. Use a cache for these check so that
518 * the MMIO mapped registers don't have to be accessed for this check
519 * to speed up this process.
520 */
521static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
522{
523 struct mv_xor_device *xordev = mv_chan->xordev;
524 void __iomem *base = mv_chan->mmr_high_base;
525 u32 win_enable;
526 u32 size;
527 u8 target, attr;
528 int ret;
529 int i;
530
531 /* Nothing needs to get done for the Armada 3700 */
532 if (xordev->xor_type == XOR_ARMADA_37XX)
533 return 0;
534
535 /*
536 * Loop over the cached windows to check, if the requested area
537 * is already mapped. If this the case, nothing needs to be done
538 * and we can return.
539 */
540 for (i = 0; i < WINDOW_COUNT; i++) {
541 if (addr >= xordev->win_start[i] &&
542 addr <= xordev->win_end[i]) {
543 /* Window is already mapped */
544 return 0;
545 }
546 }
547
548 /*
549 * The window is not mapped, so we need to create the new mapping
550 */
551
552 /* If no IO window is found that addr has to be located in SDRAM */
553 ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
554 if (ret < 0)
555 return 0;
556
557 /*
558 * Mask the base addr 'addr' according to 'size' read back from the
559 * MBus window. Otherwise we might end up with an address located
560 * somewhere in the middle of this area here.
561 */
562 size -= 1;
563 addr &= ~size;
564
565 /*
566 * Reading one of both enabled register is enough, as they are always
567 * programmed to the identical values
568 */
569 win_enable = readl(base + WINDOW_BAR_ENABLE(0));
570
571 /* Set 'i' to the first free window to write the new values to */
572 i = ffs(~win_enable) - 1;
573 if (i >= WINDOW_COUNT)
574 return -ENOMEM;
575
576 writel((addr & 0xffff0000) | (attr << 8) | target,
577 base + WINDOW_BASE(i));
578 writel(size & 0xffff0000, base + WINDOW_SIZE(i));
579
580 /* Fill the caching variables for later use */
581 xordev->win_start[i] = addr;
582 xordev->win_end[i] = addr + size;
583
584 win_enable |= (1 << i);
585 win_enable |= 3 << (16 + (2 * i));
586 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
587 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
588
589 return 0;
590}
591
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700592static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700593mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
594 unsigned int src_cnt, size_t len, unsigned long flags)
595{
596 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300597 struct mv_xor_desc_slot *sw_desc;
Stefan Roese77ff7a72016-09-15 07:37:31 +0200598 int ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700599
600 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
601 return NULL;
602
Coly Li7912d302011-03-27 01:26:53 +0800603 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700604
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100605 dev_dbg(mv_chan_to_devp(mv_chan),
Gregory CLEMENTbc822e12016-04-29 09:49:05 +0200606 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
Olof Johansson31fd8f52014-02-03 17:13:23 -0800607 __func__, src_cnt, len, &dest, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700608
Stefan Roese77ff7a72016-09-15 07:37:31 +0200609 /* Check if a new window needs to get added for 'dest' */
610 ret = mv_xor_add_io_win(mv_chan, dest);
611 if (ret)
612 return NULL;
613
Maxime Ripard0951e722015-05-26 15:07:33 +0200614 sw_desc = mv_chan_alloc_slot(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700615 if (sw_desc) {
616 sw_desc->type = DMA_XOR;
617 sw_desc->async_tx.flags = flags;
Lior Amsalemba87d132014-08-27 10:52:53 -0300618 mv_desc_init(sw_desc, dest, len, flags);
Lior Amsalem6f166312015-05-26 15:07:34 +0200619 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
620 mv_desc_set_mode(sw_desc);
Stefan Roese77ff7a72016-09-15 07:37:31 +0200621 while (src_cnt--) {
622 /* Check if a new window needs to get added for 'src' */
623 ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
624 if (ret)
625 return NULL;
Lior Amsalemdfc97662014-08-27 10:52:51 -0300626 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
Stefan Roese77ff7a72016-09-15 07:37:31 +0200627 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700628 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200629
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100630 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700631 "%s sw_desc %p async_tx %p \n",
632 __func__, sw_desc, &sw_desc->async_tx);
633 return sw_desc ? &sw_desc->async_tx : NULL;
634}
635
Lior Amsalem3e4f52e2014-08-27 10:52:50 -0300636static struct dma_async_tx_descriptor *
637mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
638 size_t len, unsigned long flags)
639{
640 /*
641 * A MEMCPY operation is identical to an XOR operation with only
642 * a single source address.
643 */
644 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
645}
646
Lior Amsalem22843542014-08-27 10:52:55 -0300647static struct dma_async_tx_descriptor *
648mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
649{
650 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
651 dma_addr_t src, dest;
652 size_t len;
653
654 src = mv_chan->dummy_src_addr;
655 dest = mv_chan->dummy_dst_addr;
656 len = MV_XOR_MIN_BYTE_COUNT;
657
658 /*
659 * We implement the DMA_INTERRUPT operation as a minimum sized
660 * XOR operation with a single dummy source address.
661 */
662 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
663}
664
Stefan Roesec5db8582016-10-26 10:10:25 +0200665/**
666 * mv_xor_prep_dma_sg - prepare descriptors for a memory sg transaction
667 * @chan: DMA channel
668 * @dst_sg: Destination scatter list
669 * @dst_sg_len: Number of entries in destination scatter list
670 * @src_sg: Source scatter list
671 * @src_sg_len: Number of entries in source scatter list
672 * @flags: transfer ack flags
673 *
674 * Return: Async transaction descriptor on success and NULL on failure
675 */
676static struct dma_async_tx_descriptor *
677mv_xor_prep_dma_sg(struct dma_chan *chan, struct scatterlist *dst_sg,
678 unsigned int dst_sg_len, struct scatterlist *src_sg,
679 unsigned int src_sg_len, unsigned long flags)
680{
681 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
682 struct mv_xor_desc_slot *new;
683 struct mv_xor_desc_slot *first = NULL;
684 struct mv_xor_desc_slot *prev = NULL;
685 size_t len, dst_avail, src_avail;
686 dma_addr_t dma_dst, dma_src;
687 int desc_cnt = 0;
688 int ret;
689
690 dev_dbg(mv_chan_to_devp(mv_chan),
691 "%s dst_sg_len: %d src_sg_len: %d flags: %ld\n",
692 __func__, dst_sg_len, src_sg_len, flags);
693
694 dst_avail = sg_dma_len(dst_sg);
695 src_avail = sg_dma_len(src_sg);
696
697 /* Run until we are out of scatterlist entries */
698 while (true) {
699 /* Allocate and populate the descriptor */
700 desc_cnt++;
701 new = mv_chan_alloc_slot(mv_chan);
702 if (!new) {
703 dev_err(mv_chan_to_devp(mv_chan),
704 "Out of descriptors (desc_cnt=%d)!\n",
705 desc_cnt);
706 goto err;
707 }
708
709 len = min_t(size_t, src_avail, dst_avail);
710 len = min_t(size_t, len, MV_XOR_MAX_BYTE_COUNT);
711 if (len == 0)
712 goto fetch;
713
714 if (len < MV_XOR_MIN_BYTE_COUNT) {
715 dev_err(mv_chan_to_devp(mv_chan),
716 "Transfer size of %zu too small!\n", len);
717 goto err;
718 }
719
720 dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
721 dst_avail;
722 dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
723 src_avail;
724
725 /* Check if a new window needs to get added for 'dst' */
726 ret = mv_xor_add_io_win(mv_chan, dma_dst);
727 if (ret)
728 goto err;
729
730 /* Check if a new window needs to get added for 'src' */
731 ret = mv_xor_add_io_win(mv_chan, dma_src);
732 if (ret)
733 goto err;
734
735 /* Populate the descriptor */
736 mv_xor_config_sg_ll_desc(new, dma_src, dma_dst, len, prev);
737 prev = new;
738 dst_avail -= len;
739 src_avail -= len;
740
741 if (!first)
742 first = new;
743 else
744 list_move_tail(&new->node, &first->sg_tx_list);
745
746fetch:
747 /* Fetch the next dst scatterlist entry */
748 if (dst_avail == 0) {
749 if (dst_sg_len == 0)
750 break;
751
752 /* Fetch the next entry: if there are no more: done */
753 dst_sg = sg_next(dst_sg);
754 if (dst_sg == NULL)
755 break;
756
757 dst_sg_len--;
758 dst_avail = sg_dma_len(dst_sg);
759 }
760
761 /* Fetch the next src scatterlist entry */
762 if (src_avail == 0) {
763 if (src_sg_len == 0)
764 break;
765
766 /* Fetch the next entry: if there are no more: done */
767 src_sg = sg_next(src_sg);
768 if (src_sg == NULL)
769 break;
770
771 src_sg_len--;
772 src_avail = sg_dma_len(src_sg);
773 }
774 }
775
776 /* Set the EOD flag in the last descriptor */
777 mv_xor_desc_config_eod(new);
778 first->async_tx.flags = flags;
779
780 return &first->async_tx;
781
782err:
783 /* Cleanup: Move all descriptors back into the free list */
784 spin_lock_bh(&mv_chan->lock);
785 mv_desc_clean_slot(first, mv_chan);
786 spin_unlock_bh(&mv_chan->lock);
787
788 return NULL;
789}
790
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700791static void mv_xor_free_chan_resources(struct dma_chan *chan)
792{
793 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
794 struct mv_xor_desc_slot *iter, *_iter;
795 int in_use_descs = 0;
796
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700797 spin_lock_bh(&mv_chan->lock);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300798
Maxime Ripard0951e722015-05-26 15:07:33 +0200799 mv_chan_slot_cleanup(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700800
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700801 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200802 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700803 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200804 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700805 }
806 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200807 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700808 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200809 list_move_tail(&iter->node, &mv_chan->free_slots);
810 }
811 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
812 node) {
813 in_use_descs++;
814 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700815 }
816 list_for_each_entry_safe_reverse(
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200817 iter, _iter, &mv_chan->free_slots, node) {
818 list_del(&iter->node);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700819 kfree(iter);
820 mv_chan->slots_allocated--;
821 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700822
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100823 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700824 __func__, mv_chan->slots_allocated);
825 spin_unlock_bh(&mv_chan->lock);
826
827 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100828 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700829 "freeing %d in use descriptors!\n", in_use_descs);
830}
831
832/**
Linus Walleij07934482010-03-26 16:50:49 -0700833 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700834 * @chan: XOR channel handle
835 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700836 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700837 */
Linus Walleij07934482010-03-26 16:50:49 -0700838static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700839 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700840 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700841{
842 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700843 enum dma_status ret;
844
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000845 ret = dma_cookie_status(chan, cookie, txstate);
Ezequiel Garcia890766d2014-03-07 16:46:45 -0300846 if (ret == DMA_COMPLETE)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700847 return ret;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300848
849 spin_lock_bh(&mv_chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200850 mv_chan_slot_cleanup(mv_chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300851 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700852
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000853 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700854}
855
Maxime Ripard0951e722015-05-26 15:07:33 +0200856static void mv_chan_dump_regs(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700857{
858 u32 val;
859
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200860 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700861 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700862
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200863 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700864 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700865
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200866 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700867 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700868
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200869 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700870 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700871
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200872 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700873 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700874
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200875 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700876 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700877}
878
Maxime Ripard0951e722015-05-26 15:07:33 +0200879static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
880 u32 intr_cause)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700881{
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300882 if (intr_cause & XOR_INT_ERR_DECODE) {
883 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
884 return;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700885 }
886
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300887 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100888 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700889
Maxime Ripard0951e722015-05-26 15:07:33 +0200890 mv_chan_dump_regs(chan);
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300891 WARN_ON(1);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700892}
893
894static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
895{
896 struct mv_xor_chan *chan = data;
897 u32 intr_cause = mv_chan_get_intr_cause(chan);
898
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100899 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700900
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300901 if (intr_cause & XOR_INTR_ERRORS)
Maxime Ripard0951e722015-05-26 15:07:33 +0200902 mv_chan_err_interrupt_handler(chan, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700903
904 tasklet_schedule(&chan->irq_tasklet);
905
Maxime Ripard0951e722015-05-26 15:07:33 +0200906 mv_chan_clear_eoc_cause(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700907
908 return IRQ_HANDLED;
909}
910
911static void mv_xor_issue_pending(struct dma_chan *chan)
912{
913 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
914
915 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
916 mv_chan->pending = 0;
917 mv_chan_activate(mv_chan);
918 }
919}
920
921/*
922 * Perform a transaction to verify the HW works.
923 */
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700924
Maxime Ripard0951e722015-05-26 15:07:33 +0200925static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700926{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300927 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700928 void *src, *dest;
929 dma_addr_t src_dma, dest_dma;
930 struct dma_chan *dma_chan;
931 dma_cookie_t cookie;
932 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300933 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700934 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700935
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300936 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700937 if (!src)
938 return -ENOMEM;
939
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300940 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700941 if (!dest) {
942 kfree(src);
943 return -ENOMEM;
944 }
945
946 /* Fill in src buffer */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300947 for (i = 0; i < PAGE_SIZE; i++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700948 ((u8 *) src)[i] = (u8)i;
949
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100950 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700951 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700952 err = -ENODEV;
953 goto out;
954 }
955
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300956 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
957 if (!unmap) {
958 err = -ENOMEM;
959 goto free_resources;
960 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700961
Stefan Roese51564632016-06-01 12:43:32 +0200962 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
Geliang Tangb70e52c2017-04-22 09:18:04 +0800963 offset_in_page(src), PAGE_SIZE,
Stefan Roese51564632016-06-01 12:43:32 +0200964 DMA_TO_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300965 unmap->addr[0] = src_dma;
966
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300967 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
968 if (ret) {
969 err = -ENOMEM;
970 goto free_resources;
971 }
972 unmap->to_cnt = 1;
973
Stefan Roese51564632016-06-01 12:43:32 +0200974 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
Geliang Tangb70e52c2017-04-22 09:18:04 +0800975 offset_in_page(dest), PAGE_SIZE,
Stefan Roese51564632016-06-01 12:43:32 +0200976 DMA_FROM_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300977 unmap->addr[1] = dest_dma;
978
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300979 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
980 if (ret) {
981 err = -ENOMEM;
982 goto free_resources;
983 }
984 unmap->from_cnt = 1;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300985 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700986
987 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300988 PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300989 if (!tx) {
990 dev_err(dma_chan->device->dev,
991 "Self-test cannot prepare operation, disabling\n");
992 err = -ENODEV;
993 goto free_resources;
994 }
995
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700996 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300997 if (dma_submit_error(cookie)) {
998 dev_err(dma_chan->device->dev,
999 "Self-test submit error, disabling\n");
1000 err = -ENODEV;
1001 goto free_resources;
1002 }
1003
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001004 mv_xor_issue_pending(dma_chan);
1005 async_tx_ack(tx);
1006 msleep(1);
1007
Linus Walleij07934482010-03-26 16:50:49 -07001008 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +05301009 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001010 dev_err(dma_chan->device->dev,
1011 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001012 err = -ENODEV;
1013 goto free_resources;
1014 }
1015
Thomas Petazzonic35064c2012-11-15 13:01:59 +01001016 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001017 PAGE_SIZE, DMA_FROM_DEVICE);
1018 if (memcmp(src, dest, PAGE_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001019 dev_err(dma_chan->device->dev,
1020 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001021 err = -ENODEV;
1022 goto free_resources;
1023 }
1024
1025free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001026 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001027 mv_xor_free_chan_resources(dma_chan);
1028out:
1029 kfree(src);
1030 kfree(dest);
1031 return err;
1032}
1033
1034#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -05001035static int
Maxime Ripard0951e722015-05-26 15:07:33 +02001036mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001037{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -03001038 int i, src_idx, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001039 struct page *dest;
1040 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
1041 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
1042 dma_addr_t dest_dma;
1043 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001044 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001045 struct dma_chan *dma_chan;
1046 dma_cookie_t cookie;
1047 u8 cmp_byte = 0;
1048 u32 cmp_word;
1049 int err = 0;
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001050 int src_count = MV_XOR_NUM_SRC_TEST;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001051
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001052 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001053 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +01001054 if (!xor_srcs[src_idx]) {
1055 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001056 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +01001057 return -ENOMEM;
1058 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001059 }
1060
1061 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +01001062 if (!dest) {
1063 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001064 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +01001065 return -ENOMEM;
1066 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001067
1068 /* Fill in src buffers */
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001069 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001070 u8 *ptr = page_address(xor_srcs[src_idx]);
1071 for (i = 0; i < PAGE_SIZE; i++)
1072 ptr[i] = (1 << src_idx);
1073 }
1074
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001075 for (src_idx = 0; src_idx < src_count; src_idx++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001076 cmp_byte ^= (u8) (1 << src_idx);
1077
1078 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1079 (cmp_byte << 8) | cmp_byte;
1080
1081 memset(page_address(dest), 0, PAGE_SIZE);
1082
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001083 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -07001084 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001085 err = -ENODEV;
1086 goto out;
1087 }
1088
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001089 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
1090 GFP_KERNEL);
1091 if (!unmap) {
1092 err = -ENOMEM;
1093 goto free_resources;
1094 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001095
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001096 /* test xor */
1097 for (i = 0; i < src_count; i++) {
1098 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1099 0, PAGE_SIZE, DMA_TO_DEVICE);
1100 dma_srcs[i] = unmap->addr[i];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -03001101 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
1102 if (ret) {
1103 err = -ENOMEM;
1104 goto free_resources;
1105 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001106 unmap->to_cnt++;
1107 }
1108
1109 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1110 DMA_FROM_DEVICE);
1111 dest_dma = unmap->addr[src_count];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -03001112 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
1113 if (ret) {
1114 err = -ENOMEM;
1115 goto free_resources;
1116 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001117 unmap->from_cnt = 1;
1118 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001119
1120 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001121 src_count, PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -03001122 if (!tx) {
1123 dev_err(dma_chan->device->dev,
1124 "Self-test cannot prepare operation, disabling\n");
1125 err = -ENODEV;
1126 goto free_resources;
1127 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001128
1129 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -03001130 if (dma_submit_error(cookie)) {
1131 dev_err(dma_chan->device->dev,
1132 "Self-test submit error, disabling\n");
1133 err = -ENODEV;
1134 goto free_resources;
1135 }
1136
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001137 mv_xor_issue_pending(dma_chan);
1138 async_tx_ack(tx);
1139 msleep(8);
1140
Linus Walleij07934482010-03-26 16:50:49 -07001141 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +05301142 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001143 dev_err(dma_chan->device->dev,
1144 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001145 err = -ENODEV;
1146 goto free_resources;
1147 }
1148
Thomas Petazzonic35064c2012-11-15 13:01:59 +01001149 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001150 PAGE_SIZE, DMA_FROM_DEVICE);
1151 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1152 u32 *ptr = page_address(dest);
1153 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001154 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -07001155 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
1156 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001157 err = -ENODEV;
1158 goto free_resources;
1159 }
1160 }
1161
1162free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001163 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001164 mv_xor_free_chan_resources(dma_chan);
1165out:
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001166 src_idx = src_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001167 while (src_idx--)
1168 __free_page(xor_srcs[src_idx]);
1169 __free_page(dest);
1170 return err;
1171}
1172
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001173static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001174{
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001175 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001176 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001177
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001178 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001179
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001180 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001181 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Lior Amsalem22843542014-08-27 10:52:55 -03001182 dma_unmap_single(dev, mv_chan->dummy_src_addr,
1183 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1184 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
1185 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001186
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001187 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001188 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001189 list_del(&chan->device_node);
1190 }
1191
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001192 free_irq(mv_chan->irq, mv_chan);
1193
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001194 return 0;
1195}
1196
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001197static struct mv_xor_chan *
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001198mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001199 struct platform_device *pdev,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001200 int idx, dma_cap_mask_t cap_mask, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001201{
1202 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001203 struct mv_xor_chan *mv_chan;
1204 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001205
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001206 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +05301207 if (!mv_chan)
1208 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001209
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +01001210 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001211 mv_chan->irq = irq;
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001212 if (xordev->xor_type == XOR_ORION)
1213 mv_chan->op_in_desc = XOR_MODE_IN_REG;
1214 else
1215 mv_chan->op_in_desc = XOR_MODE_IN_DESC;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001216
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001217 dma_dev = &mv_chan->dmadev;
Stefan Roese77ff7a72016-09-15 07:37:31 +02001218 mv_chan->xordev = xordev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001219
Lior Amsalem22843542014-08-27 10:52:55 -03001220 /*
1221 * These source and destination dummy buffers are used to implement
1222 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
1223 * Hence, we only need to map the buffers at initialization-time.
1224 */
1225 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
1226 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1227 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
1228 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1229
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001230 /* allocate coherent memory for hardware descriptors
1231 * note: writecombine gives slightly better performance, but
1232 * requires that we explicitly flush the writes
1233 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001234 mv_chan->dma_desc_pool_virt =
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -08001235 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1236 GFP_KERNEL);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001237 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001238 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001239
1240 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001241 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001242
1243 INIT_LIST_HEAD(&dma_dev->channels);
1244
1245 /* set base routines */
1246 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1247 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001248 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001249 dma_dev->device_issue_pending = mv_xor_issue_pending;
1250 dma_dev->dev = &pdev->dev;
1251
1252 /* set prep routines based on capability */
Lior Amsalem22843542014-08-27 10:52:55 -03001253 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1254 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001255 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1256 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Stefan Roesec5db8582016-10-26 10:10:25 +02001257 if (dma_has_cap(DMA_SG, dma_dev->cap_mask))
1258 dma_dev->device_prep_dma_sg = mv_xor_prep_dma_sg;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001259 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001260 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001261 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1262 }
1263
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001264 mv_chan->mmr_base = xordev->xor_base;
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001265 mv_chan->mmr_high_base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001266 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1267 mv_chan);
1268
1269 /* clear errors before enabling interrupts */
Maxime Ripard0951e722015-05-26 15:07:33 +02001270 mv_chan_clear_err_status(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001271
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001272 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1273 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001274 if (ret)
1275 goto err_free_dma;
1276
1277 mv_chan_unmask_interrupts(mv_chan);
1278
Lior Amsalem6f166312015-05-26 15:07:34 +02001279 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
Thomas Petazzoni81aafb32015-12-22 11:43:28 +01001280 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
Lior Amsalem6f166312015-05-26 15:07:34 +02001281 else
Thomas Petazzoni81aafb32015-12-22 11:43:28 +01001282 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001283
1284 spin_lock_init(&mv_chan->lock);
1285 INIT_LIST_HEAD(&mv_chan->chain);
1286 INIT_LIST_HEAD(&mv_chan->completed_slots);
Lior Amsalemfbea28a2015-05-26 15:07:36 +02001287 INIT_LIST_HEAD(&mv_chan->free_slots);
1288 INIT_LIST_HEAD(&mv_chan->allocated_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001289 mv_chan->dmachan.device = dma_dev;
1290 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001291
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001292 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001293
1294 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001295 ret = mv_chan_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001296 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1297 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001298 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001299 }
1300
1301 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001302 ret = mv_chan_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001303 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1304 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001305 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001306 }
1307
Stefan Roesec5db8582016-10-26 10:10:25 +02001308 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s%s)\n",
Lior Amsalem6f166312015-05-26 15:07:34 +02001309 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
Joe Perches1ba151c2012-10-28 01:05:44 -07001310 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001311 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
Stefan Roesec5db8582016-10-26 10:10:25 +02001312 dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "sg " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001313 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001314
1315 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001316 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001317
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001318err_free_irq:
1319 free_irq(mv_chan->irq, mv_chan);
Stefan Roesea4a1e53d2016-06-01 12:43:31 +02001320err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001321 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001322 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001323 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001324}
1325
1326static void
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001327mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001328 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001329{
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001330 void __iomem *base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001331 u32 win_enable = 0;
1332 int i;
1333
1334 for (i = 0; i < 8; i++) {
1335 writel(0, base + WINDOW_BASE(i));
1336 writel(0, base + WINDOW_SIZE(i));
1337 if (i < 4)
1338 writel(0, base + WINDOW_REMAP_HIGH(i));
1339 }
1340
1341 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001342 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001343
1344 writel((cs->base & 0xffff0000) |
1345 (cs->mbus_attr << 8) |
1346 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1347 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1348
Stefan Roese77ff7a72016-09-15 07:37:31 +02001349 /* Fill the caching variables for later use */
1350 xordev->win_start[i] = cs->base;
1351 xordev->win_end[i] = cs->base + cs->size - 1;
1352
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001353 win_enable |= (1 << i);
1354 win_enable |= 3 << (16 + (2 * i));
1355 }
1356
1357 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1358 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001359 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1360 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001361}
1362
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001363static void
1364mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1365{
1366 void __iomem *base = xordev->xor_high_base;
1367 u32 win_enable = 0;
1368 int i;
1369
1370 for (i = 0; i < 8; i++) {
1371 writel(0, base + WINDOW_BASE(i));
1372 writel(0, base + WINDOW_SIZE(i));
1373 if (i < 4)
1374 writel(0, base + WINDOW_REMAP_HIGH(i));
1375 }
1376 /*
1377 * For Armada3700 open default 4GB Mbus window. The dram
1378 * related configuration are done at AXIS level.
1379 */
1380 writel(0xffff0000, base + WINDOW_SIZE(0));
1381 win_enable |= 1;
1382 win_enable |= 3 << 16;
1383
1384 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1385 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1386 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1387 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1388}
1389
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001390/*
1391 * Since this XOR driver is basically used only for RAID5, we don't
1392 * need to care about synchronizing ->suspend with DMA activity,
1393 * because the DMA engine will naturally be quiet due to the block
1394 * devices being suspended.
1395 */
1396static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1397{
1398 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1399 int i;
1400
1401 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1402 struct mv_xor_chan *mv_chan = xordev->channels[i];
1403
1404 if (!mv_chan)
1405 continue;
1406
1407 mv_chan->saved_config_reg =
1408 readl_relaxed(XOR_CONFIG(mv_chan));
1409 mv_chan->saved_int_mask_reg =
1410 readl_relaxed(XOR_INTR_MASK(mv_chan));
1411 }
1412
1413 return 0;
1414}
1415
1416static int mv_xor_resume(struct platform_device *dev)
1417{
1418 struct mv_xor_device *xordev = platform_get_drvdata(dev);
1419 const struct mbus_dram_target_info *dram;
1420 int i;
1421
1422 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1423 struct mv_xor_chan *mv_chan = xordev->channels[i];
1424
1425 if (!mv_chan)
1426 continue;
1427
1428 writel_relaxed(mv_chan->saved_config_reg,
1429 XOR_CONFIG(mv_chan));
1430 writel_relaxed(mv_chan->saved_int_mask_reg,
1431 XOR_INTR_MASK(mv_chan));
1432 }
1433
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001434 if (xordev->xor_type == XOR_ARMADA_37XX) {
1435 mv_xor_conf_mbus_windows_a3700(xordev);
1436 return 0;
1437 }
1438
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001439 dram = mv_mbus_dram_info();
1440 if (dram)
1441 mv_xor_conf_mbus_windows(xordev, dram);
1442
1443 return 0;
1444}
1445
Lior Amsalem6f166312015-05-26 15:07:34 +02001446static const struct of_device_id mv_xor_dt_ids[] = {
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001447 { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1448 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001449 { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
Lior Amsalem6f166312015-05-26 15:07:34 +02001450 {},
1451};
Lior Amsalem6f166312015-05-26 15:07:34 +02001452
Thomas Petazzoni77757292015-07-08 16:28:19 +02001453static unsigned int mv_xor_engine_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001454
Linus Torvaldsc2714332012-12-14 14:54:26 -08001455static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001456{
Andrew Lunn63a93322011-12-07 21:48:07 +01001457 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001458 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001459 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001460 struct resource *res;
Thomas Petazzoni77757292015-07-08 16:28:19 +02001461 unsigned int max_engines, max_channels;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001462 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001463
Joe Perches1ba151c2012-10-28 01:05:44 -07001464 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001465
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001466 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1467 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001468 return -ENOMEM;
1469
1470 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1471 if (!res)
1472 return -ENODEV;
1473
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001474 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1475 resource_size(res));
1476 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001477 return -EBUSY;
1478
1479 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1480 if (!res)
1481 return -ENODEV;
1482
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001483 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1484 resource_size(res));
1485 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001486 return -EBUSY;
1487
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001488 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001489
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001490
1491 /*
1492 * We need to know which type of XOR device we use before
1493 * setting up. In non-dt case it can only be the legacy one.
1494 */
1495 xordev->xor_type = XOR_ORION;
1496 if (pdev->dev.of_node) {
1497 const struct of_device_id *of_id =
1498 of_match_device(mv_xor_dt_ids,
1499 &pdev->dev);
1500
1501 xordev->xor_type = (uintptr_t)of_id->data;
1502 }
1503
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001504 /*
1505 * (Re-)program MBUS remapping windows if we are asked to.
1506 */
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001507 if (xordev->xor_type == XOR_ARMADA_37XX) {
1508 mv_xor_conf_mbus_windows_a3700(xordev);
1509 } else {
1510 dram = mv_mbus_dram_info();
1511 if (dram)
1512 mv_xor_conf_mbus_windows(xordev, dram);
1513 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001514
Andrew Lunnc5101822012-02-19 13:30:26 +01001515 /* Not all platforms can gate the clock, so it is not
1516 * an error if the clock does not exists.
1517 */
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001518 xordev->clk = clk_get(&pdev->dev, NULL);
1519 if (!IS_ERR(xordev->clk))
1520 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001521
Thomas Petazzoni77757292015-07-08 16:28:19 +02001522 /*
1523 * We don't want to have more than one channel per CPU in
1524 * order for async_tx to perform well. So we limit the number
1525 * of engines and channels so that we take into account this
1526 * constraint. Note that we also want to use channels from
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001527 * separate engines when possible. For dual-CPU Armada 3700
1528 * SoC with single XOR engine allow using its both channels.
Thomas Petazzoni77757292015-07-08 16:28:19 +02001529 */
1530 max_engines = num_present_cpus();
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001531 if (xordev->xor_type == XOR_ARMADA_37XX)
1532 max_channels = num_present_cpus();
1533 else
1534 max_channels = min_t(unsigned int,
1535 MV_XOR_MAX_CHANNELS,
1536 DIV_ROUND_UP(num_present_cpus(), 2));
Thomas Petazzoni77757292015-07-08 16:28:19 +02001537
1538 if (mv_xor_engine_count >= max_engines)
1539 return 0;
1540
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001541 if (pdev->dev.of_node) {
1542 struct device_node *np;
1543 int i = 0;
1544
1545 for_each_child_of_node(pdev->dev.of_node, np) {
Russell King0be82532013-12-12 23:59:08 +00001546 struct mv_xor_chan *chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001547 dma_cap_mask_t cap_mask;
1548 int irq;
1549
Thomas Petazzoni77757292015-07-08 16:28:19 +02001550 if (i >= max_channels)
1551 continue;
1552
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001553 dma_cap_zero(cap_mask);
Thomas Petazzoni6d8f7ab2015-07-08 16:28:16 +02001554 dma_cap_set(DMA_MEMCPY, cap_mask);
Stefan Roesec5db8582016-10-26 10:10:25 +02001555 dma_cap_set(DMA_SG, cap_mask);
Thomas Petazzoni6d8f7ab2015-07-08 16:28:16 +02001556 dma_cap_set(DMA_XOR, cap_mask);
1557 dma_cap_set(DMA_INTERRUPT, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001558
1559 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001560 if (!irq) {
1561 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001562 goto err_channel_add;
1563 }
1564
Russell King0be82532013-12-12 23:59:08 +00001565 chan = mv_xor_channel_add(xordev, pdev, i,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001566 cap_mask, irq);
Russell King0be82532013-12-12 23:59:08 +00001567 if (IS_ERR(chan)) {
1568 ret = PTR_ERR(chan);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001569 irq_dispose_mapping(irq);
1570 goto err_channel_add;
1571 }
1572
Russell King0be82532013-12-12 23:59:08 +00001573 xordev->channels[i] = chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001574 i++;
1575 }
1576 } else if (pdata && pdata->channels) {
Thomas Petazzoni77757292015-07-08 16:28:19 +02001577 for (i = 0; i < max_channels; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001578 struct mv_xor_channel_data *cd;
Russell King0be82532013-12-12 23:59:08 +00001579 struct mv_xor_chan *chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001580 int irq;
1581
1582 cd = &pdata->channels[i];
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001583 irq = platform_get_irq(pdev, i);
1584 if (irq < 0) {
1585 ret = irq;
1586 goto err_channel_add;
1587 }
1588
Russell King0be82532013-12-12 23:59:08 +00001589 chan = mv_xor_channel_add(xordev, pdev, i,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001590 cd->cap_mask, irq);
Russell King0be82532013-12-12 23:59:08 +00001591 if (IS_ERR(chan)) {
1592 ret = PTR_ERR(chan);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001593 goto err_channel_add;
1594 }
Russell King0be82532013-12-12 23:59:08 +00001595
1596 xordev->channels[i] = chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001597 }
1598 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001599
1600 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001601
1602err_channel_add:
1603 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001604 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001605 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001606 if (pdev->dev.of_node)
1607 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001608 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001609
Thomas Petazzonidab92062013-01-06 11:10:44 +01001610 if (!IS_ERR(xordev->clk)) {
1611 clk_disable_unprepare(xordev->clk);
1612 clk_put(xordev->clk);
1613 }
1614
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001615 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001616}
1617
Thomas Petazzoni61971652012-10-30 12:05:40 +01001618static struct platform_driver mv_xor_driver = {
1619 .probe = mv_xor_probe,
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001620 .suspend = mv_xor_suspend,
1621 .resume = mv_xor_resume,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001622 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001623 .name = MV_XOR_NAME,
1624 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001625 },
1626};
1627
Geliang Tang812608d2016-11-18 22:12:26 +08001628builtin_platform_driver(mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001629
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001630/*
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001631MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1632MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1633MODULE_LICENSE("GPL");
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001634*/