blob: 086104ee104d28292f5048c80edab8596fd4a1f1 [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Saeed Bisharaff7b0472008-07-08 11:58:36 -070013 */
14
15#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070017#include <linux/delay.h>
18#include <linux/dma-mapping.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
Lior Amsalem6f166312015-05-26 15:07:34 +020021#include <linux/of_device.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/platform_device.h>
23#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010024#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010025#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/irqdomain.h>
Thomas Petazzoni77757292015-07-08 16:28:19 +020028#include <linux/cpumask.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020029#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000030
31#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070032#include "mv_xor.h"
33
Lior Amsalem6f166312015-05-26 15:07:34 +020034enum mv_xor_mode {
35 XOR_MODE_IN_REG,
36 XOR_MODE_IN_DESC,
37};
38
Saeed Bisharaff7b0472008-07-08 11:58:36 -070039static void mv_xor_issue_pending(struct dma_chan *chan);
40
41#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010042 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070043
44#define to_mv_xor_slot(tx) \
45 container_of(tx, struct mv_xor_desc_slot, async_tx)
46
Thomas Petazzonic98c1782012-11-15 14:17:18 +010047#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010048 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010049
Lior Amsalemdfc97662014-08-27 10:52:51 -030050static void mv_desc_init(struct mv_xor_desc_slot *desc,
Lior Amsalemba87d132014-08-27 10:52:53 -030051 dma_addr_t addr, u32 byte_count,
52 enum dma_ctrl_flags flags)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070053{
54 struct mv_xor_desc *hw_desc = desc->hw_desc;
55
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -030056 hw_desc->status = XOR_DESC_DMA_OWNED;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070057 hw_desc->phy_next_desc = 0;
Lior Amsalemba87d132014-08-27 10:52:53 -030058 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
59 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
60 XOR_DESC_EOD_INT_EN : 0;
Lior Amsalemdfc97662014-08-27 10:52:51 -030061 hw_desc->phy_dest_addr = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070062 hw_desc->byte_count = byte_count;
63}
64
Lior Amsalem6f166312015-05-26 15:07:34 +020065static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
66{
67 struct mv_xor_desc *hw_desc = desc->hw_desc;
68
69 switch (desc->type) {
70 case DMA_XOR:
71 case DMA_INTERRUPT:
72 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
73 break;
74 case DMA_MEMCPY:
75 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
76 break;
77 default:
78 BUG();
79 return;
80 }
81}
82
Saeed Bisharaff7b0472008-07-08 11:58:36 -070083static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
84 u32 next_desc_addr)
85{
86 struct mv_xor_desc *hw_desc = desc->hw_desc;
87 BUG_ON(hw_desc->phy_next_desc);
88 hw_desc->phy_next_desc = next_desc_addr;
89}
90
Saeed Bisharaff7b0472008-07-08 11:58:36 -070091static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
92 int index, dma_addr_t addr)
93{
94 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +020095 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070096 if (desc->type == DMA_XOR)
97 hw_desc->desc_command |= (1 << index);
98}
99
100static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
101{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200102 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700103}
104
105static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
106 u32 next_desc_addr)
107{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200108 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700109}
110
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700111static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
112{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200113 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700114 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200115 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700116}
117
118static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
119{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200120 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700121 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
122 return intr_cause;
123}
124
Maxime Ripard0951e722015-05-26 15:07:33 +0200125static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700126{
Lior Amsalemba87d132014-08-27 10:52:53 -0300127 u32 val;
128
129 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
130 val = ~(val << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100131 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200132 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700133}
134
Maxime Ripard0951e722015-05-26 15:07:33 +0200135static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700136{
137 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200138 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700139}
140
Maxime Ripard0951e722015-05-26 15:07:33 +0200141static void mv_chan_set_mode(struct mv_xor_chan *chan,
142 enum dma_transaction_type type)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700143{
144 u32 op_mode;
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200145 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700146
147 switch (type) {
148 case DMA_XOR:
149 op_mode = XOR_OPERATION_MODE_XOR;
150 break;
151 case DMA_MEMCPY:
152 op_mode = XOR_OPERATION_MODE_MEMCPY;
153 break;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700154 default:
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100155 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700156 "error: unsupported operation %d\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100157 type);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700158 BUG();
159 return;
160 }
161
162 config &= ~0x7;
163 config |= op_mode;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200164
Lior Amsalem6f166312015-05-26 15:07:34 +0200165 if (IS_ENABLED(__BIG_ENDIAN))
166 config |= XOR_DESCRIPTOR_SWAP;
167 else
168 config &= ~XOR_DESCRIPTOR_SWAP;
169
170 writel_relaxed(config, XOR_CONFIG(chan));
171 chan->current_type = type;
172}
173
174static void mv_chan_set_mode_to_desc(struct mv_xor_chan *chan)
175{
176 u32 op_mode;
177 u32 config = readl_relaxed(XOR_CONFIG(chan));
178
179 op_mode = XOR_OPERATION_MODE_IN_DESC;
180
181 config &= ~0x7;
182 config |= op_mode;
183
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200184#if defined(__BIG_ENDIAN)
185 config |= XOR_DESCRIPTOR_SWAP;
186#else
187 config &= ~XOR_DESCRIPTOR_SWAP;
188#endif
189
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200190 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700191}
192
193static void mv_chan_activate(struct mv_xor_chan *chan)
194{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100195 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Ezequiel Garcia5a9a55b2014-05-21 14:02:35 -0700196
197 /* writel ensures all descriptors are flushed before activation */
198 writel(BIT(0), XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700199}
200
201static char mv_chan_is_busy(struct mv_xor_chan *chan)
202{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200203 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700204
205 state = (state >> 4) & 0x3;
206
207 return (state == 1) ? 1 : 0;
208}
209
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700210/*
Maxime Ripard0951e722015-05-26 15:07:33 +0200211 * mv_chan_start_new_chain - program the engine to operate on new
212 * chain headed by sw_desc
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700213 * Caller must hold &mv_chan->lock while calling this function
214 */
Maxime Ripard0951e722015-05-26 15:07:33 +0200215static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
216 struct mv_xor_desc_slot *sw_desc)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700217{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100218 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700219 __func__, __LINE__, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700220
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700221 /* set the hardware chain */
222 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
223
Lior Amsalemdfc97662014-08-27 10:52:51 -0300224 mv_chan->pending++;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100225 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700226}
227
228static dma_cookie_t
Maxime Ripard0951e722015-05-26 15:07:33 +0200229mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
230 struct mv_xor_chan *mv_chan,
231 dma_cookie_t cookie)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700232{
233 BUG_ON(desc->async_tx.cookie < 0);
234
235 if (desc->async_tx.cookie > 0) {
236 cookie = desc->async_tx.cookie;
237
238 /* call the callback (must not sleep or submit new
239 * operations to this channel)
240 */
241 if (desc->async_tx.callback)
242 desc->async_tx.callback(
243 desc->async_tx.callback_param);
244
Dan Williamsd38a8c62013-10-18 19:35:23 +0200245 dma_descriptor_unmap(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700246 }
247
248 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700249 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700250
251 return cookie;
252}
253
254static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200255mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700256{
257 struct mv_xor_desc_slot *iter, *_iter;
258
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100259 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700260 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200261 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700262
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200263 if (async_tx_test_ack(&iter->async_tx))
264 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700265 }
266 return 0;
267}
268
269static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200270mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
271 struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700272{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100273 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700274 __func__, __LINE__, desc, desc->async_tx.flags);
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200275
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700276 /* the client is allowed to attach dependent operations
277 * until 'ack' is set
278 */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200279 if (!async_tx_test_ack(&desc->async_tx))
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700280 /* move this slot to the completed_slots */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200281 list_move_tail(&desc->node, &mv_chan->completed_slots);
282 else
283 list_move_tail(&desc->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700284
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700285 return 0;
286}
287
Ezequiel Garciafbeec992014-03-07 16:46:47 -0300288/* This function must be called with the mv_xor_chan spinlock held */
Maxime Ripard0951e722015-05-26 15:07:33 +0200289static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700290{
291 struct mv_xor_desc_slot *iter, *_iter;
292 dma_cookie_t cookie = 0;
293 int busy = mv_chan_is_busy(mv_chan);
294 u32 current_desc = mv_chan_get_current_desc(mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200295 int current_cleaned = 0;
296 struct mv_xor_desc *hw_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700297
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100298 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
299 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Maxime Ripard0951e722015-05-26 15:07:33 +0200300 mv_chan_clean_completed_slots(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700301
302 /* free completed slots from the chain starting with
303 * the oldest descriptor
304 */
305
306 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200307 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700308
Lior Amsalem91362912015-05-26 15:07:32 +0200309 /* clean finished descriptors */
310 hw_desc = iter->hw_desc;
311 if (hw_desc->status & XOR_DESC_SUCCESS) {
Maxime Ripard0951e722015-05-26 15:07:33 +0200312 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
313 cookie);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700314
Lior Amsalem91362912015-05-26 15:07:32 +0200315 /* done processing desc, clean slot */
Maxime Ripard0951e722015-05-26 15:07:33 +0200316 mv_desc_clean_slot(iter, mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200317
318 /* break if we did cleaned the current */
319 if (iter->async_tx.phys == current_desc) {
320 current_cleaned = 1;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700321 break;
Lior Amsalem91362912015-05-26 15:07:32 +0200322 }
323 } else {
324 if (iter->async_tx.phys == current_desc) {
325 current_cleaned = 0;
326 break;
327 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700328 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700329 }
330
331 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200332 if (current_cleaned) {
333 /*
334 * current descriptor cleaned and removed, run
335 * from list head
336 */
337 iter = list_entry(mv_chan->chain.next,
338 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200339 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200340 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200341 } else {
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200342 if (!list_is_last(&iter->node, &mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200343 /*
344 * descriptors are still waiting after
345 * current, trigger them
346 */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200347 iter = list_entry(iter->node.next,
Lior Amsalem91362912015-05-26 15:07:32 +0200348 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200349 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200350 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200351 } else {
352 /*
353 * some descriptors are still waiting
354 * to be cleaned
355 */
356 tasklet_schedule(&mv_chan->irq_tasklet);
357 }
358 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700359 }
360
361 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100362 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700363}
364
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700365static void mv_xor_tasklet(unsigned long data)
366{
367 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300368
369 spin_lock_bh(&chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200370 mv_chan_slot_cleanup(chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300371 spin_unlock_bh(&chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700372}
373
374static struct mv_xor_desc_slot *
Maxime Ripard0951e722015-05-26 15:07:33 +0200375mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700376{
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200377 struct mv_xor_desc_slot *iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700378
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200379 spin_lock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700380
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200381 if (!list_empty(&mv_chan->free_slots)) {
382 iter = list_first_entry(&mv_chan->free_slots,
383 struct mv_xor_desc_slot,
384 node);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300385
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200386 list_move_tail(&iter->node, &mv_chan->allocated_slots);
387
388 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700389
Lior Amsalemdfc97662014-08-27 10:52:51 -0300390 /* pre-ack descriptor */
391 async_tx_ack(&iter->async_tx);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300392 iter->async_tx.cookie = -EBUSY;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700393
Lior Amsalemdfc97662014-08-27 10:52:51 -0300394 return iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700395
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700396 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200397
398 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700399
400 /* try to free some slots if the allocation fails */
401 tasklet_schedule(&mv_chan->irq_tasklet);
402
403 return NULL;
404}
405
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700406/************************ DMA engine API functions ****************************/
407static dma_cookie_t
408mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
409{
410 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
411 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300412 struct mv_xor_desc_slot *old_chain_tail;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700413 dma_cookie_t cookie;
414 int new_hw_chain = 1;
415
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100416 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700417 "%s sw_desc %p: async_tx %p\n",
418 __func__, sw_desc, &sw_desc->async_tx);
419
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700420 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000421 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700422
423 if (list_empty(&mv_chan->chain))
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200424 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700425 else {
426 new_hw_chain = 0;
427
428 old_chain_tail = list_entry(mv_chan->chain.prev,
429 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200430 node);
431 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700432
Olof Johansson31fd8f52014-02-03 17:13:23 -0800433 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
434 &old_chain_tail->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700435
436 /* fix up the hardware chain */
Lior Amsalemdfc97662014-08-27 10:52:51 -0300437 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700438
439 /* if the channel is not busy */
440 if (!mv_chan_is_busy(mv_chan)) {
441 u32 current_desc = mv_chan_get_current_desc(mv_chan);
442 /*
443 * and the curren desc is the end of the chain before
444 * the append, then we need to start the channel
445 */
446 if (current_desc == old_chain_tail->async_tx.phys)
447 new_hw_chain = 1;
448 }
449 }
450
451 if (new_hw_chain)
Maxime Ripard0951e722015-05-26 15:07:33 +0200452 mv_chan_start_new_chain(mv_chan, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700453
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700454 spin_unlock_bh(&mv_chan->lock);
455
456 return cookie;
457}
458
459/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700460static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700461{
Olof Johansson31fd8f52014-02-03 17:13:23 -0800462 void *virt_desc;
463 dma_addr_t dma_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700464 int idx;
465 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
466 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100467 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700468
469 /* Allocate descriptor slots */
470 idx = mv_chan->slots_allocated;
471 while (idx < num_descs_in_pool) {
472 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
473 if (!slot) {
Ezequiel Garciab8291dd2014-08-27 10:52:49 -0300474 dev_info(mv_chan_to_devp(mv_chan),
475 "channel only initialized %d descriptor slots",
476 idx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700477 break;
478 }
Olof Johansson31fd8f52014-02-03 17:13:23 -0800479 virt_desc = mv_chan->dma_desc_pool_virt;
480 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700481
482 dma_async_tx_descriptor_init(&slot->async_tx, chan);
483 slot->async_tx.tx_submit = mv_xor_tx_submit;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200484 INIT_LIST_HEAD(&slot->node);
Olof Johansson31fd8f52014-02-03 17:13:23 -0800485 dma_desc = mv_chan->dma_desc_pool;
486 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700487 slot->idx = idx++;
488
489 spin_lock_bh(&mv_chan->lock);
490 mv_chan->slots_allocated = idx;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200491 list_add_tail(&slot->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700492 spin_unlock_bh(&mv_chan->lock);
493 }
494
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100495 dev_dbg(mv_chan_to_devp(mv_chan),
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200496 "allocated %d descriptor slots\n",
497 mv_chan->slots_allocated);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700498
499 return mv_chan->slots_allocated ? : -ENOMEM;
500}
501
502static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700503mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
504 unsigned int src_cnt, size_t len, unsigned long flags)
505{
506 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300507 struct mv_xor_desc_slot *sw_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700508
509 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
510 return NULL;
511
Coly Li7912d302011-03-27 01:26:53 +0800512 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700513
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100514 dev_dbg(mv_chan_to_devp(mv_chan),
Olof Johansson31fd8f52014-02-03 17:13:23 -0800515 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
516 __func__, src_cnt, len, &dest, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700517
Maxime Ripard0951e722015-05-26 15:07:33 +0200518 sw_desc = mv_chan_alloc_slot(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700519 if (sw_desc) {
520 sw_desc->type = DMA_XOR;
521 sw_desc->async_tx.flags = flags;
Lior Amsalemba87d132014-08-27 10:52:53 -0300522 mv_desc_init(sw_desc, dest, len, flags);
Lior Amsalem6f166312015-05-26 15:07:34 +0200523 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
524 mv_desc_set_mode(sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700525 while (src_cnt--)
Lior Amsalemdfc97662014-08-27 10:52:51 -0300526 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700527 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200528
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100529 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700530 "%s sw_desc %p async_tx %p \n",
531 __func__, sw_desc, &sw_desc->async_tx);
532 return sw_desc ? &sw_desc->async_tx : NULL;
533}
534
Lior Amsalem3e4f52e2014-08-27 10:52:50 -0300535static struct dma_async_tx_descriptor *
536mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
537 size_t len, unsigned long flags)
538{
539 /*
540 * A MEMCPY operation is identical to an XOR operation with only
541 * a single source address.
542 */
543 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
544}
545
Lior Amsalem22843542014-08-27 10:52:55 -0300546static struct dma_async_tx_descriptor *
547mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
548{
549 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
550 dma_addr_t src, dest;
551 size_t len;
552
553 src = mv_chan->dummy_src_addr;
554 dest = mv_chan->dummy_dst_addr;
555 len = MV_XOR_MIN_BYTE_COUNT;
556
557 /*
558 * We implement the DMA_INTERRUPT operation as a minimum sized
559 * XOR operation with a single dummy source address.
560 */
561 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
562}
563
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700564static void mv_xor_free_chan_resources(struct dma_chan *chan)
565{
566 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
567 struct mv_xor_desc_slot *iter, *_iter;
568 int in_use_descs = 0;
569
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700570 spin_lock_bh(&mv_chan->lock);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300571
Maxime Ripard0951e722015-05-26 15:07:33 +0200572 mv_chan_slot_cleanup(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700573
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700574 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200575 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700576 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200577 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700578 }
579 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200580 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700581 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200582 list_move_tail(&iter->node, &mv_chan->free_slots);
583 }
584 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
585 node) {
586 in_use_descs++;
587 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700588 }
589 list_for_each_entry_safe_reverse(
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200590 iter, _iter, &mv_chan->free_slots, node) {
591 list_del(&iter->node);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700592 kfree(iter);
593 mv_chan->slots_allocated--;
594 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700595
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100596 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700597 __func__, mv_chan->slots_allocated);
598 spin_unlock_bh(&mv_chan->lock);
599
600 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100601 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700602 "freeing %d in use descriptors!\n", in_use_descs);
603}
604
605/**
Linus Walleij07934482010-03-26 16:50:49 -0700606 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700607 * @chan: XOR channel handle
608 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700609 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700610 */
Linus Walleij07934482010-03-26 16:50:49 -0700611static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700612 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700613 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700614{
615 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700616 enum dma_status ret;
617
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000618 ret = dma_cookie_status(chan, cookie, txstate);
Ezequiel Garcia890766d2014-03-07 16:46:45 -0300619 if (ret == DMA_COMPLETE)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700620 return ret;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300621
622 spin_lock_bh(&mv_chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200623 mv_chan_slot_cleanup(mv_chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300624 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700625
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000626 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700627}
628
Maxime Ripard0951e722015-05-26 15:07:33 +0200629static void mv_chan_dump_regs(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700630{
631 u32 val;
632
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200633 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700634 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700635
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200636 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700637 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700638
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200639 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700640 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700641
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200642 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700643 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700644
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200645 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700646 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700647
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200648 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700649 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700650}
651
Maxime Ripard0951e722015-05-26 15:07:33 +0200652static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
653 u32 intr_cause)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700654{
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300655 if (intr_cause & XOR_INT_ERR_DECODE) {
656 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
657 return;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700658 }
659
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300660 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100661 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700662
Maxime Ripard0951e722015-05-26 15:07:33 +0200663 mv_chan_dump_regs(chan);
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300664 WARN_ON(1);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700665}
666
667static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
668{
669 struct mv_xor_chan *chan = data;
670 u32 intr_cause = mv_chan_get_intr_cause(chan);
671
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100672 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700673
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300674 if (intr_cause & XOR_INTR_ERRORS)
Maxime Ripard0951e722015-05-26 15:07:33 +0200675 mv_chan_err_interrupt_handler(chan, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700676
677 tasklet_schedule(&chan->irq_tasklet);
678
Maxime Ripard0951e722015-05-26 15:07:33 +0200679 mv_chan_clear_eoc_cause(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700680
681 return IRQ_HANDLED;
682}
683
684static void mv_xor_issue_pending(struct dma_chan *chan)
685{
686 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
687
688 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
689 mv_chan->pending = 0;
690 mv_chan_activate(mv_chan);
691 }
692}
693
694/*
695 * Perform a transaction to verify the HW works.
696 */
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700697
Maxime Ripard0951e722015-05-26 15:07:33 +0200698static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700699{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300700 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700701 void *src, *dest;
702 dma_addr_t src_dma, dest_dma;
703 struct dma_chan *dma_chan;
704 dma_cookie_t cookie;
705 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300706 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700707 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700708
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300709 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700710 if (!src)
711 return -ENOMEM;
712
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300713 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700714 if (!dest) {
715 kfree(src);
716 return -ENOMEM;
717 }
718
719 /* Fill in src buffer */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300720 for (i = 0; i < PAGE_SIZE; i++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700721 ((u8 *) src)[i] = (u8)i;
722
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100723 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700724 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700725 err = -ENODEV;
726 goto out;
727 }
728
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300729 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
730 if (!unmap) {
731 err = -ENOMEM;
732 goto free_resources;
733 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700734
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300735 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
736 PAGE_SIZE, DMA_TO_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300737 unmap->addr[0] = src_dma;
738
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300739 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
740 if (ret) {
741 err = -ENOMEM;
742 goto free_resources;
743 }
744 unmap->to_cnt = 1;
745
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300746 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
747 PAGE_SIZE, DMA_FROM_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300748 unmap->addr[1] = dest_dma;
749
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300750 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
751 if (ret) {
752 err = -ENOMEM;
753 goto free_resources;
754 }
755 unmap->from_cnt = 1;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300756 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700757
758 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300759 PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300760 if (!tx) {
761 dev_err(dma_chan->device->dev,
762 "Self-test cannot prepare operation, disabling\n");
763 err = -ENODEV;
764 goto free_resources;
765 }
766
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700767 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300768 if (dma_submit_error(cookie)) {
769 dev_err(dma_chan->device->dev,
770 "Self-test submit error, disabling\n");
771 err = -ENODEV;
772 goto free_resources;
773 }
774
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700775 mv_xor_issue_pending(dma_chan);
776 async_tx_ack(tx);
777 msleep(1);
778
Linus Walleij07934482010-03-26 16:50:49 -0700779 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530780 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100781 dev_err(dma_chan->device->dev,
782 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700783 err = -ENODEV;
784 goto free_resources;
785 }
786
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100787 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300788 PAGE_SIZE, DMA_FROM_DEVICE);
789 if (memcmp(src, dest, PAGE_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100790 dev_err(dma_chan->device->dev,
791 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700792 err = -ENODEV;
793 goto free_resources;
794 }
795
796free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300797 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700798 mv_xor_free_chan_resources(dma_chan);
799out:
800 kfree(src);
801 kfree(dest);
802 return err;
803}
804
805#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -0500806static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200807mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700808{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300809 int i, src_idx, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700810 struct page *dest;
811 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
812 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
813 dma_addr_t dest_dma;
814 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300815 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700816 struct dma_chan *dma_chan;
817 dma_cookie_t cookie;
818 u8 cmp_byte = 0;
819 u32 cmp_word;
820 int err = 0;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300821 int src_count = MV_XOR_NUM_SRC_TEST;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700822
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300823 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700824 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100825 if (!xor_srcs[src_idx]) {
826 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700827 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100828 return -ENOMEM;
829 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700830 }
831
832 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100833 if (!dest) {
834 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700835 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100836 return -ENOMEM;
837 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700838
839 /* Fill in src buffers */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300840 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700841 u8 *ptr = page_address(xor_srcs[src_idx]);
842 for (i = 0; i < PAGE_SIZE; i++)
843 ptr[i] = (1 << src_idx);
844 }
845
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300846 for (src_idx = 0; src_idx < src_count; src_idx++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700847 cmp_byte ^= (u8) (1 << src_idx);
848
849 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
850 (cmp_byte << 8) | cmp_byte;
851
852 memset(page_address(dest), 0, PAGE_SIZE);
853
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100854 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700855 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700856 err = -ENODEV;
857 goto out;
858 }
859
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300860 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
861 GFP_KERNEL);
862 if (!unmap) {
863 err = -ENOMEM;
864 goto free_resources;
865 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700866
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300867 /* test xor */
868 for (i = 0; i < src_count; i++) {
869 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
870 0, PAGE_SIZE, DMA_TO_DEVICE);
871 dma_srcs[i] = unmap->addr[i];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300872 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
873 if (ret) {
874 err = -ENOMEM;
875 goto free_resources;
876 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300877 unmap->to_cnt++;
878 }
879
880 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
881 DMA_FROM_DEVICE);
882 dest_dma = unmap->addr[src_count];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300883 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
884 if (ret) {
885 err = -ENOMEM;
886 goto free_resources;
887 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300888 unmap->from_cnt = 1;
889 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700890
891 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300892 src_count, PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300893 if (!tx) {
894 dev_err(dma_chan->device->dev,
895 "Self-test cannot prepare operation, disabling\n");
896 err = -ENODEV;
897 goto free_resources;
898 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700899
900 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300901 if (dma_submit_error(cookie)) {
902 dev_err(dma_chan->device->dev,
903 "Self-test submit error, disabling\n");
904 err = -ENODEV;
905 goto free_resources;
906 }
907
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700908 mv_xor_issue_pending(dma_chan);
909 async_tx_ack(tx);
910 msleep(8);
911
Linus Walleij07934482010-03-26 16:50:49 -0700912 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530913 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100914 dev_err(dma_chan->device->dev,
915 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700916 err = -ENODEV;
917 goto free_resources;
918 }
919
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100920 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700921 PAGE_SIZE, DMA_FROM_DEVICE);
922 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
923 u32 *ptr = page_address(dest);
924 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100925 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -0700926 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
927 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700928 err = -ENODEV;
929 goto free_resources;
930 }
931 }
932
933free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300934 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700935 mv_xor_free_chan_resources(dma_chan);
936out:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300937 src_idx = src_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700938 while (src_idx--)
939 __free_page(xor_srcs[src_idx]);
940 __free_page(dest);
941 return err;
942}
943
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100944static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700945{
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700946 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100947 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700948
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100949 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700950
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100951 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100952 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Lior Amsalem22843542014-08-27 10:52:55 -0300953 dma_unmap_single(dev, mv_chan->dummy_src_addr,
954 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
955 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
956 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700957
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100958 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +0100959 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700960 list_del(&chan->device_node);
961 }
962
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +0100963 free_irq(mv_chan->irq, mv_chan);
964
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700965 return 0;
966}
967
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100968static struct mv_xor_chan *
Thomas Petazzoni297eedb2012-11-15 15:29:53 +0100969mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +0100970 struct platform_device *pdev,
Lior Amsalem6f166312015-05-26 15:07:34 +0200971 int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700972{
973 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700974 struct mv_xor_chan *mv_chan;
975 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700976
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100977 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +0530978 if (!mv_chan)
979 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700980
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +0100981 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +0100982 mv_chan->irq = irq;
Lior Amsalem6f166312015-05-26 15:07:34 +0200983 mv_chan->op_in_desc = op_in_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700984
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100985 dma_dev = &mv_chan->dmadev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700986
Lior Amsalem22843542014-08-27 10:52:55 -0300987 /*
988 * These source and destination dummy buffers are used to implement
989 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
990 * Hence, we only need to map the buffers at initialization-time.
991 */
992 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
993 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
994 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
995 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
996
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700997 /* allocate coherent memory for hardware descriptors
998 * note: writecombine gives slightly better performance, but
999 * requires that we explicitly flush the writes
1000 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001001 mv_chan->dma_desc_pool_virt =
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001002 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001003 &mv_chan->dma_desc_pool, GFP_KERNEL);
1004 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001005 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001006
1007 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001008 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001009
1010 INIT_LIST_HEAD(&dma_dev->channels);
1011
1012 /* set base routines */
1013 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1014 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001015 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001016 dma_dev->device_issue_pending = mv_xor_issue_pending;
1017 dma_dev->dev = &pdev->dev;
1018
1019 /* set prep routines based on capability */
Lior Amsalem22843542014-08-27 10:52:55 -03001020 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1021 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001022 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1023 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001024 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001025 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001026 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1027 }
1028
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001029 mv_chan->mmr_base = xordev->xor_base;
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001030 mv_chan->mmr_high_base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001031 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1032 mv_chan);
1033
1034 /* clear errors before enabling interrupts */
Maxime Ripard0951e722015-05-26 15:07:33 +02001035 mv_chan_clear_err_status(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001036
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001037 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1038 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001039 if (ret)
1040 goto err_free_dma;
1041
1042 mv_chan_unmask_interrupts(mv_chan);
1043
Lior Amsalem6f166312015-05-26 15:07:34 +02001044 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
1045 mv_chan_set_mode_to_desc(mv_chan);
1046 else
1047 mv_chan_set_mode(mv_chan, DMA_XOR);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001048
1049 spin_lock_init(&mv_chan->lock);
1050 INIT_LIST_HEAD(&mv_chan->chain);
1051 INIT_LIST_HEAD(&mv_chan->completed_slots);
Lior Amsalemfbea28a2015-05-26 15:07:36 +02001052 INIT_LIST_HEAD(&mv_chan->free_slots);
1053 INIT_LIST_HEAD(&mv_chan->allocated_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001054 mv_chan->dmachan.device = dma_dev;
1055 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001056
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001057 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001058
1059 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001060 ret = mv_chan_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001061 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1062 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001063 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001064 }
1065
1066 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001067 ret = mv_chan_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001068 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1069 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001070 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001071 }
1072
Lior Amsalem6f166312015-05-26 15:07:34 +02001073 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1074 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
Joe Perches1ba151c2012-10-28 01:05:44 -07001075 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001076 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1077 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001078
1079 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001080 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001081
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001082err_free_irq:
1083 free_irq(mv_chan->irq, mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001084 err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001085 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001086 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001087 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001088}
1089
1090static void
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001091mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001092 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001093{
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001094 void __iomem *base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001095 u32 win_enable = 0;
1096 int i;
1097
1098 for (i = 0; i < 8; i++) {
1099 writel(0, base + WINDOW_BASE(i));
1100 writel(0, base + WINDOW_SIZE(i));
1101 if (i < 4)
1102 writel(0, base + WINDOW_REMAP_HIGH(i));
1103 }
1104
1105 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001106 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001107
1108 writel((cs->base & 0xffff0000) |
1109 (cs->mbus_attr << 8) |
1110 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1111 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1112
1113 win_enable |= (1 << i);
1114 win_enable |= 3 << (16 + (2 * i));
1115 }
1116
1117 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1118 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001119 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1120 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001121}
1122
Lior Amsalem6f166312015-05-26 15:07:34 +02001123static const struct of_device_id mv_xor_dt_ids[] = {
1124 { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
1125 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
1126 {},
1127};
Lior Amsalem6f166312015-05-26 15:07:34 +02001128
Thomas Petazzoni77757292015-07-08 16:28:19 +02001129static unsigned int mv_xor_engine_count;
1130
Linus Torvaldsc2714332012-12-14 14:54:26 -08001131static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001132{
Andrew Lunn63a93322011-12-07 21:48:07 +01001133 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001134 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001135 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001136 struct resource *res;
Thomas Petazzoni77757292015-07-08 16:28:19 +02001137 unsigned int max_engines, max_channels;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001138 int i, ret;
Lior Amsalem6f166312015-05-26 15:07:34 +02001139 int op_in_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001140
Joe Perches1ba151c2012-10-28 01:05:44 -07001141 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001142
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001143 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1144 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001145 return -ENOMEM;
1146
1147 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1148 if (!res)
1149 return -ENODEV;
1150
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001151 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1152 resource_size(res));
1153 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001154 return -EBUSY;
1155
1156 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1157 if (!res)
1158 return -ENODEV;
1159
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001160 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1161 resource_size(res));
1162 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001163 return -EBUSY;
1164
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001165 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001166
1167 /*
1168 * (Re-)program MBUS remapping windows if we are asked to.
1169 */
Andrew Lunn63a93322011-12-07 21:48:07 +01001170 dram = mv_mbus_dram_info();
1171 if (dram)
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001172 mv_xor_conf_mbus_windows(xordev, dram);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001173
Andrew Lunnc5101822012-02-19 13:30:26 +01001174 /* Not all platforms can gate the clock, so it is not
1175 * an error if the clock does not exists.
1176 */
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001177 xordev->clk = clk_get(&pdev->dev, NULL);
1178 if (!IS_ERR(xordev->clk))
1179 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001180
Thomas Petazzoni77757292015-07-08 16:28:19 +02001181 /*
1182 * We don't want to have more than one channel per CPU in
1183 * order for async_tx to perform well. So we limit the number
1184 * of engines and channels so that we take into account this
1185 * constraint. Note that we also want to use channels from
1186 * separate engines when possible.
1187 */
1188 max_engines = num_present_cpus();
1189 max_channels = min_t(unsigned int,
1190 MV_XOR_MAX_CHANNELS,
1191 DIV_ROUND_UP(num_present_cpus(), 2));
1192
1193 if (mv_xor_engine_count >= max_engines)
1194 return 0;
1195
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001196 if (pdev->dev.of_node) {
1197 struct device_node *np;
1198 int i = 0;
Lior Amsalem6f166312015-05-26 15:07:34 +02001199 const struct of_device_id *of_id =
1200 of_match_device(mv_xor_dt_ids,
1201 &pdev->dev);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001202
1203 for_each_child_of_node(pdev->dev.of_node, np) {
Russell King0be82532013-12-12 23:59:08 +00001204 struct mv_xor_chan *chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001205 dma_cap_mask_t cap_mask;
1206 int irq;
Lior Amsalem6f166312015-05-26 15:07:34 +02001207 op_in_desc = (int)of_id->data;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001208
Thomas Petazzoni77757292015-07-08 16:28:19 +02001209 if (i >= max_channels)
1210 continue;
1211
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001212 dma_cap_zero(cap_mask);
Thomas Petazzoni6d8f7ab2015-07-08 16:28:16 +02001213 dma_cap_set(DMA_MEMCPY, cap_mask);
1214 dma_cap_set(DMA_XOR, cap_mask);
1215 dma_cap_set(DMA_INTERRUPT, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001216
1217 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001218 if (!irq) {
1219 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001220 goto err_channel_add;
1221 }
1222
Russell King0be82532013-12-12 23:59:08 +00001223 chan = mv_xor_channel_add(xordev, pdev, i,
Lior Amsalem6f166312015-05-26 15:07:34 +02001224 cap_mask, irq, op_in_desc);
Russell King0be82532013-12-12 23:59:08 +00001225 if (IS_ERR(chan)) {
1226 ret = PTR_ERR(chan);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001227 irq_dispose_mapping(irq);
1228 goto err_channel_add;
1229 }
1230
Russell King0be82532013-12-12 23:59:08 +00001231 xordev->channels[i] = chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001232 i++;
1233 }
1234 } else if (pdata && pdata->channels) {
Thomas Petazzoni77757292015-07-08 16:28:19 +02001235 for (i = 0; i < max_channels; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001236 struct mv_xor_channel_data *cd;
Russell King0be82532013-12-12 23:59:08 +00001237 struct mv_xor_chan *chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001238 int irq;
1239
1240 cd = &pdata->channels[i];
1241 if (!cd) {
1242 ret = -ENODEV;
1243 goto err_channel_add;
1244 }
1245
1246 irq = platform_get_irq(pdev, i);
1247 if (irq < 0) {
1248 ret = irq;
1249 goto err_channel_add;
1250 }
1251
Russell King0be82532013-12-12 23:59:08 +00001252 chan = mv_xor_channel_add(xordev, pdev, i,
Lior Amsalem6f166312015-05-26 15:07:34 +02001253 cd->cap_mask, irq,
1254 XOR_MODE_IN_REG);
Russell King0be82532013-12-12 23:59:08 +00001255 if (IS_ERR(chan)) {
1256 ret = PTR_ERR(chan);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001257 goto err_channel_add;
1258 }
Russell King0be82532013-12-12 23:59:08 +00001259
1260 xordev->channels[i] = chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001261 }
1262 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001263
1264 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001265
1266err_channel_add:
1267 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001268 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001269 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001270 if (pdev->dev.of_node)
1271 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001272 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001273
Thomas Petazzonidab92062013-01-06 11:10:44 +01001274 if (!IS_ERR(xordev->clk)) {
1275 clk_disable_unprepare(xordev->clk);
1276 clk_put(xordev->clk);
1277 }
1278
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001279 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001280}
1281
Thomas Petazzoni61971652012-10-30 12:05:40 +01001282static struct platform_driver mv_xor_driver = {
1283 .probe = mv_xor_probe,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001284 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001285 .name = MV_XOR_NAME,
1286 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001287 },
1288};
1289
1290
1291static int __init mv_xor_init(void)
1292{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001293 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001294}
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001295device_initcall(mv_xor_init);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001296
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001297/*
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001298MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1299MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1300MODULE_LICENSE("GPL");
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001301*/