blob: 5d524f29c5f115b454cf6d2044c849cd42e45cbd [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Saeed Bisharaff7b0472008-07-08 11:58:36 -070013 */
14
15#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070017#include <linux/delay.h>
18#include <linux/dma-mapping.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
Lior Amsalem6f166312015-05-26 15:07:34 +020021#include <linux/of_device.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/platform_device.h>
23#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010024#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010025#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/irqdomain.h>
Thomas Petazzoni77757292015-07-08 16:28:19 +020028#include <linux/cpumask.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020029#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000030
31#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070032#include "mv_xor.h"
33
Gregory CLEMENTdd130c62016-04-29 09:49:06 +020034enum mv_xor_type {
35 XOR_ORION,
36 XOR_ARMADA_38X,
Marcin Wojtasac5f0f32016-04-29 09:49:07 +020037 XOR_ARMADA_37XX,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +020038};
39
Lior Amsalem6f166312015-05-26 15:07:34 +020040enum mv_xor_mode {
41 XOR_MODE_IN_REG,
42 XOR_MODE_IN_DESC,
43};
44
Saeed Bisharaff7b0472008-07-08 11:58:36 -070045static void mv_xor_issue_pending(struct dma_chan *chan);
46
47#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010048 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070049
50#define to_mv_xor_slot(tx) \
51 container_of(tx, struct mv_xor_desc_slot, async_tx)
52
Thomas Petazzonic98c1782012-11-15 14:17:18 +010053#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010054 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010055
Lior Amsalemdfc97662014-08-27 10:52:51 -030056static void mv_desc_init(struct mv_xor_desc_slot *desc,
Lior Amsalemba87d132014-08-27 10:52:53 -030057 dma_addr_t addr, u32 byte_count,
58 enum dma_ctrl_flags flags)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070059{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -030062 hw_desc->status = XOR_DESC_DMA_OWNED;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070063 hw_desc->phy_next_desc = 0;
Lior Amsalemba87d132014-08-27 10:52:53 -030064 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
65 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
66 XOR_DESC_EOD_INT_EN : 0;
Lior Amsalemdfc97662014-08-27 10:52:51 -030067 hw_desc->phy_dest_addr = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070068 hw_desc->byte_count = byte_count;
69}
70
Lior Amsalem6f166312015-05-26 15:07:34 +020071static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
72{
73 struct mv_xor_desc *hw_desc = desc->hw_desc;
74
75 switch (desc->type) {
76 case DMA_XOR:
77 case DMA_INTERRUPT:
78 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
79 break;
80 case DMA_MEMCPY:
81 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
82 break;
83 default:
84 BUG();
85 return;
86 }
87}
88
Saeed Bisharaff7b0472008-07-08 11:58:36 -070089static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
90 u32 next_desc_addr)
91{
92 struct mv_xor_desc *hw_desc = desc->hw_desc;
93 BUG_ON(hw_desc->phy_next_desc);
94 hw_desc->phy_next_desc = next_desc_addr;
95}
96
Saeed Bisharaff7b0472008-07-08 11:58:36 -070097static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
98 int index, dma_addr_t addr)
99{
100 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200101 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700102 if (desc->type == DMA_XOR)
103 hw_desc->desc_command |= (1 << index);
104}
105
106static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
107{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200108 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700109}
110
111static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
112 u32 next_desc_addr)
113{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200114 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700115}
116
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700117static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
118{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200119 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700120 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200121 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700122}
123
124static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
125{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200126 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700127 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
128 return intr_cause;
129}
130
Maxime Ripard0951e722015-05-26 15:07:33 +0200131static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700132{
Lior Amsalemba87d132014-08-27 10:52:53 -0300133 u32 val;
134
135 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
136 val = ~(val << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100137 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200138 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700139}
140
Maxime Ripard0951e722015-05-26 15:07:33 +0200141static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700142{
143 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200144 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700145}
146
Maxime Ripard0951e722015-05-26 15:07:33 +0200147static void mv_chan_set_mode(struct mv_xor_chan *chan,
Thomas Petazzoni81aafb32015-12-22 11:43:28 +0100148 u32 op_mode)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700149{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200150 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700151
Lior Amsalem6f166312015-05-26 15:07:34 +0200152 config &= ~0x7;
153 config |= op_mode;
154
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200155#if defined(__BIG_ENDIAN)
156 config |= XOR_DESCRIPTOR_SWAP;
157#else
158 config &= ~XOR_DESCRIPTOR_SWAP;
159#endif
160
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200161 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700162}
163
164static void mv_chan_activate(struct mv_xor_chan *chan)
165{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100166 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Ezequiel Garcia5a9a55b2014-05-21 14:02:35 -0700167
168 /* writel ensures all descriptors are flushed before activation */
169 writel(BIT(0), XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700170}
171
172static char mv_chan_is_busy(struct mv_xor_chan *chan)
173{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200174 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700175
176 state = (state >> 4) & 0x3;
177
178 return (state == 1) ? 1 : 0;
179}
180
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700181/*
Maxime Ripard0951e722015-05-26 15:07:33 +0200182 * mv_chan_start_new_chain - program the engine to operate on new
183 * chain headed by sw_desc
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700184 * Caller must hold &mv_chan->lock while calling this function
185 */
Maxime Ripard0951e722015-05-26 15:07:33 +0200186static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
187 struct mv_xor_desc_slot *sw_desc)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700188{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100189 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700190 __func__, __LINE__, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700191
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700192 /* set the hardware chain */
193 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
194
Lior Amsalemdfc97662014-08-27 10:52:51 -0300195 mv_chan->pending++;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100196 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700197}
198
199static dma_cookie_t
Maxime Ripard0951e722015-05-26 15:07:33 +0200200mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
201 struct mv_xor_chan *mv_chan,
202 dma_cookie_t cookie)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700203{
204 BUG_ON(desc->async_tx.cookie < 0);
205
206 if (desc->async_tx.cookie > 0) {
207 cookie = desc->async_tx.cookie;
208
Dave Jiang8058e252016-07-25 10:34:08 -0700209 dma_descriptor_unmap(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700210 /* call the callback (must not sleep or submit new
211 * operations to this channel)
212 */
Dave Jiangee7681a2016-07-20 13:12:13 -0700213 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700214 }
215
216 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700217 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700218
219 return cookie;
220}
221
222static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200223mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700224{
225 struct mv_xor_desc_slot *iter, *_iter;
226
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100227 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700228 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200229 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700230
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200231 if (async_tx_test_ack(&iter->async_tx))
232 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700233 }
234 return 0;
235}
236
237static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200238mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
239 struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700240{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100241 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700242 __func__, __LINE__, desc, desc->async_tx.flags);
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200243
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700244 /* the client is allowed to attach dependent operations
245 * until 'ack' is set
246 */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200247 if (!async_tx_test_ack(&desc->async_tx))
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700248 /* move this slot to the completed_slots */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200249 list_move_tail(&desc->node, &mv_chan->completed_slots);
250 else
251 list_move_tail(&desc->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700252
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700253 return 0;
254}
255
Ezequiel Garciafbeec992014-03-07 16:46:47 -0300256/* This function must be called with the mv_xor_chan spinlock held */
Maxime Ripard0951e722015-05-26 15:07:33 +0200257static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700258{
259 struct mv_xor_desc_slot *iter, *_iter;
260 dma_cookie_t cookie = 0;
261 int busy = mv_chan_is_busy(mv_chan);
262 u32 current_desc = mv_chan_get_current_desc(mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200263 int current_cleaned = 0;
264 struct mv_xor_desc *hw_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700265
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100266 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
267 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Maxime Ripard0951e722015-05-26 15:07:33 +0200268 mv_chan_clean_completed_slots(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700269
270 /* free completed slots from the chain starting with
271 * the oldest descriptor
272 */
273
274 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200275 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700276
Lior Amsalem91362912015-05-26 15:07:32 +0200277 /* clean finished descriptors */
278 hw_desc = iter->hw_desc;
279 if (hw_desc->status & XOR_DESC_SUCCESS) {
Maxime Ripard0951e722015-05-26 15:07:33 +0200280 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
281 cookie);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700282
Lior Amsalem91362912015-05-26 15:07:32 +0200283 /* done processing desc, clean slot */
Maxime Ripard0951e722015-05-26 15:07:33 +0200284 mv_desc_clean_slot(iter, mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200285
286 /* break if we did cleaned the current */
287 if (iter->async_tx.phys == current_desc) {
288 current_cleaned = 1;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700289 break;
Lior Amsalem91362912015-05-26 15:07:32 +0200290 }
291 } else {
292 if (iter->async_tx.phys == current_desc) {
293 current_cleaned = 0;
294 break;
295 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700296 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700297 }
298
299 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200300 if (current_cleaned) {
301 /*
302 * current descriptor cleaned and removed, run
303 * from list head
304 */
305 iter = list_entry(mv_chan->chain.next,
306 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200307 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200308 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200309 } else {
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200310 if (!list_is_last(&iter->node, &mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200311 /*
312 * descriptors are still waiting after
313 * current, trigger them
314 */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200315 iter = list_entry(iter->node.next,
Lior Amsalem91362912015-05-26 15:07:32 +0200316 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200317 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200318 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200319 } else {
320 /*
321 * some descriptors are still waiting
322 * to be cleaned
323 */
324 tasklet_schedule(&mv_chan->irq_tasklet);
325 }
326 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700327 }
328
329 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100330 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700331}
332
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700333static void mv_xor_tasklet(unsigned long data)
334{
335 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300336
337 spin_lock_bh(&chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200338 mv_chan_slot_cleanup(chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300339 spin_unlock_bh(&chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700340}
341
342static struct mv_xor_desc_slot *
Maxime Ripard0951e722015-05-26 15:07:33 +0200343mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700344{
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200345 struct mv_xor_desc_slot *iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700346
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200347 spin_lock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700348
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200349 if (!list_empty(&mv_chan->free_slots)) {
350 iter = list_first_entry(&mv_chan->free_slots,
351 struct mv_xor_desc_slot,
352 node);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300353
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200354 list_move_tail(&iter->node, &mv_chan->allocated_slots);
355
356 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700357
Lior Amsalemdfc97662014-08-27 10:52:51 -0300358 /* pre-ack descriptor */
359 async_tx_ack(&iter->async_tx);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300360 iter->async_tx.cookie = -EBUSY;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700361
Lior Amsalemdfc97662014-08-27 10:52:51 -0300362 return iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700363
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700364 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200365
366 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700367
368 /* try to free some slots if the allocation fails */
369 tasklet_schedule(&mv_chan->irq_tasklet);
370
371 return NULL;
372}
373
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700374/************************ DMA engine API functions ****************************/
375static dma_cookie_t
376mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
377{
378 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
379 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300380 struct mv_xor_desc_slot *old_chain_tail;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700381 dma_cookie_t cookie;
382 int new_hw_chain = 1;
383
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100384 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700385 "%s sw_desc %p: async_tx %p\n",
386 __func__, sw_desc, &sw_desc->async_tx);
387
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700388 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000389 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700390
391 if (list_empty(&mv_chan->chain))
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200392 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700393 else {
394 new_hw_chain = 0;
395
396 old_chain_tail = list_entry(mv_chan->chain.prev,
397 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200398 node);
399 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700400
Olof Johansson31fd8f52014-02-03 17:13:23 -0800401 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
402 &old_chain_tail->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700403
404 /* fix up the hardware chain */
Lior Amsalemdfc97662014-08-27 10:52:51 -0300405 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700406
407 /* if the channel is not busy */
408 if (!mv_chan_is_busy(mv_chan)) {
409 u32 current_desc = mv_chan_get_current_desc(mv_chan);
410 /*
411 * and the curren desc is the end of the chain before
412 * the append, then we need to start the channel
413 */
414 if (current_desc == old_chain_tail->async_tx.phys)
415 new_hw_chain = 1;
416 }
417 }
418
419 if (new_hw_chain)
Maxime Ripard0951e722015-05-26 15:07:33 +0200420 mv_chan_start_new_chain(mv_chan, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700421
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700422 spin_unlock_bh(&mv_chan->lock);
423
424 return cookie;
425}
426
427/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700428static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700429{
Olof Johansson31fd8f52014-02-03 17:13:23 -0800430 void *virt_desc;
431 dma_addr_t dma_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700432 int idx;
433 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
434 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100435 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700436
437 /* Allocate descriptor slots */
438 idx = mv_chan->slots_allocated;
439 while (idx < num_descs_in_pool) {
440 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
441 if (!slot) {
Ezequiel Garciab8291dd2014-08-27 10:52:49 -0300442 dev_info(mv_chan_to_devp(mv_chan),
443 "channel only initialized %d descriptor slots",
444 idx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700445 break;
446 }
Olof Johansson31fd8f52014-02-03 17:13:23 -0800447 virt_desc = mv_chan->dma_desc_pool_virt;
448 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700449
450 dma_async_tx_descriptor_init(&slot->async_tx, chan);
451 slot->async_tx.tx_submit = mv_xor_tx_submit;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200452 INIT_LIST_HEAD(&slot->node);
Olof Johansson31fd8f52014-02-03 17:13:23 -0800453 dma_desc = mv_chan->dma_desc_pool;
454 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700455 slot->idx = idx++;
456
457 spin_lock_bh(&mv_chan->lock);
458 mv_chan->slots_allocated = idx;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200459 list_add_tail(&slot->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700460 spin_unlock_bh(&mv_chan->lock);
461 }
462
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100463 dev_dbg(mv_chan_to_devp(mv_chan),
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200464 "allocated %d descriptor slots\n",
465 mv_chan->slots_allocated);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700466
467 return mv_chan->slots_allocated ? : -ENOMEM;
468}
469
Stefan Roese77ff7a72016-09-15 07:37:31 +0200470/*
471 * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
472 * a new MBus window if necessary. Use a cache for these check so that
473 * the MMIO mapped registers don't have to be accessed for this check
474 * to speed up this process.
475 */
476static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
477{
478 struct mv_xor_device *xordev = mv_chan->xordev;
479 void __iomem *base = mv_chan->mmr_high_base;
480 u32 win_enable;
481 u32 size;
482 u8 target, attr;
483 int ret;
484 int i;
485
486 /* Nothing needs to get done for the Armada 3700 */
487 if (xordev->xor_type == XOR_ARMADA_37XX)
488 return 0;
489
490 /*
491 * Loop over the cached windows to check, if the requested area
492 * is already mapped. If this the case, nothing needs to be done
493 * and we can return.
494 */
495 for (i = 0; i < WINDOW_COUNT; i++) {
496 if (addr >= xordev->win_start[i] &&
497 addr <= xordev->win_end[i]) {
498 /* Window is already mapped */
499 return 0;
500 }
501 }
502
503 /*
504 * The window is not mapped, so we need to create the new mapping
505 */
506
507 /* If no IO window is found that addr has to be located in SDRAM */
508 ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
509 if (ret < 0)
510 return 0;
511
512 /*
513 * Mask the base addr 'addr' according to 'size' read back from the
514 * MBus window. Otherwise we might end up with an address located
515 * somewhere in the middle of this area here.
516 */
517 size -= 1;
518 addr &= ~size;
519
520 /*
521 * Reading one of both enabled register is enough, as they are always
522 * programmed to the identical values
523 */
524 win_enable = readl(base + WINDOW_BAR_ENABLE(0));
525
526 /* Set 'i' to the first free window to write the new values to */
527 i = ffs(~win_enable) - 1;
528 if (i >= WINDOW_COUNT)
529 return -ENOMEM;
530
531 writel((addr & 0xffff0000) | (attr << 8) | target,
532 base + WINDOW_BASE(i));
533 writel(size & 0xffff0000, base + WINDOW_SIZE(i));
534
535 /* Fill the caching variables for later use */
536 xordev->win_start[i] = addr;
537 xordev->win_end[i] = addr + size;
538
539 win_enable |= (1 << i);
540 win_enable |= 3 << (16 + (2 * i));
541 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
542 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
543
544 return 0;
545}
546
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700547static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700548mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
549 unsigned int src_cnt, size_t len, unsigned long flags)
550{
551 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300552 struct mv_xor_desc_slot *sw_desc;
Stefan Roese77ff7a72016-09-15 07:37:31 +0200553 int ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700554
555 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
556 return NULL;
557
Coly Li7912d302011-03-27 01:26:53 +0800558 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700559
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100560 dev_dbg(mv_chan_to_devp(mv_chan),
Gregory CLEMENTbc822e12016-04-29 09:49:05 +0200561 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
Olof Johansson31fd8f52014-02-03 17:13:23 -0800562 __func__, src_cnt, len, &dest, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700563
Stefan Roese77ff7a72016-09-15 07:37:31 +0200564 /* Check if a new window needs to get added for 'dest' */
565 ret = mv_xor_add_io_win(mv_chan, dest);
566 if (ret)
567 return NULL;
568
Maxime Ripard0951e722015-05-26 15:07:33 +0200569 sw_desc = mv_chan_alloc_slot(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700570 if (sw_desc) {
571 sw_desc->type = DMA_XOR;
572 sw_desc->async_tx.flags = flags;
Lior Amsalemba87d132014-08-27 10:52:53 -0300573 mv_desc_init(sw_desc, dest, len, flags);
Lior Amsalem6f166312015-05-26 15:07:34 +0200574 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
575 mv_desc_set_mode(sw_desc);
Stefan Roese77ff7a72016-09-15 07:37:31 +0200576 while (src_cnt--) {
577 /* Check if a new window needs to get added for 'src' */
578 ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
579 if (ret)
580 return NULL;
Lior Amsalemdfc97662014-08-27 10:52:51 -0300581 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
Stefan Roese77ff7a72016-09-15 07:37:31 +0200582 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700583 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200584
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100585 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700586 "%s sw_desc %p async_tx %p \n",
587 __func__, sw_desc, &sw_desc->async_tx);
588 return sw_desc ? &sw_desc->async_tx : NULL;
589}
590
Lior Amsalem3e4f52e2014-08-27 10:52:50 -0300591static struct dma_async_tx_descriptor *
592mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
593 size_t len, unsigned long flags)
594{
595 /*
596 * A MEMCPY operation is identical to an XOR operation with only
597 * a single source address.
598 */
599 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
600}
601
Lior Amsalem22843542014-08-27 10:52:55 -0300602static struct dma_async_tx_descriptor *
603mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
604{
605 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
606 dma_addr_t src, dest;
607 size_t len;
608
609 src = mv_chan->dummy_src_addr;
610 dest = mv_chan->dummy_dst_addr;
611 len = MV_XOR_MIN_BYTE_COUNT;
612
613 /*
614 * We implement the DMA_INTERRUPT operation as a minimum sized
615 * XOR operation with a single dummy source address.
616 */
617 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
618}
619
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700620static void mv_xor_free_chan_resources(struct dma_chan *chan)
621{
622 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
623 struct mv_xor_desc_slot *iter, *_iter;
624 int in_use_descs = 0;
625
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700626 spin_lock_bh(&mv_chan->lock);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300627
Maxime Ripard0951e722015-05-26 15:07:33 +0200628 mv_chan_slot_cleanup(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700629
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700630 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200631 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700632 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200633 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700634 }
635 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200636 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700637 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200638 list_move_tail(&iter->node, &mv_chan->free_slots);
639 }
640 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
641 node) {
642 in_use_descs++;
643 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700644 }
645 list_for_each_entry_safe_reverse(
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200646 iter, _iter, &mv_chan->free_slots, node) {
647 list_del(&iter->node);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700648 kfree(iter);
649 mv_chan->slots_allocated--;
650 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700651
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100652 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700653 __func__, mv_chan->slots_allocated);
654 spin_unlock_bh(&mv_chan->lock);
655
656 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100657 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700658 "freeing %d in use descriptors!\n", in_use_descs);
659}
660
661/**
Linus Walleij07934482010-03-26 16:50:49 -0700662 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700663 * @chan: XOR channel handle
664 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700665 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700666 */
Linus Walleij07934482010-03-26 16:50:49 -0700667static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700668 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700669 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700670{
671 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700672 enum dma_status ret;
673
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000674 ret = dma_cookie_status(chan, cookie, txstate);
Ezequiel Garcia890766d2014-03-07 16:46:45 -0300675 if (ret == DMA_COMPLETE)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700676 return ret;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300677
678 spin_lock_bh(&mv_chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200679 mv_chan_slot_cleanup(mv_chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300680 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700681
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000682 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700683}
684
Maxime Ripard0951e722015-05-26 15:07:33 +0200685static void mv_chan_dump_regs(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700686{
687 u32 val;
688
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200689 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700690 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700691
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200692 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700693 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700694
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200695 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700696 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700697
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200698 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700699 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700700
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200701 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700702 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700703
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200704 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700705 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700706}
707
Maxime Ripard0951e722015-05-26 15:07:33 +0200708static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
709 u32 intr_cause)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700710{
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300711 if (intr_cause & XOR_INT_ERR_DECODE) {
712 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
713 return;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700714 }
715
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300716 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100717 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700718
Maxime Ripard0951e722015-05-26 15:07:33 +0200719 mv_chan_dump_regs(chan);
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300720 WARN_ON(1);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700721}
722
723static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
724{
725 struct mv_xor_chan *chan = data;
726 u32 intr_cause = mv_chan_get_intr_cause(chan);
727
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100728 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700729
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300730 if (intr_cause & XOR_INTR_ERRORS)
Maxime Ripard0951e722015-05-26 15:07:33 +0200731 mv_chan_err_interrupt_handler(chan, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700732
733 tasklet_schedule(&chan->irq_tasklet);
734
Maxime Ripard0951e722015-05-26 15:07:33 +0200735 mv_chan_clear_eoc_cause(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700736
737 return IRQ_HANDLED;
738}
739
740static void mv_xor_issue_pending(struct dma_chan *chan)
741{
742 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
743
744 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
745 mv_chan->pending = 0;
746 mv_chan_activate(mv_chan);
747 }
748}
749
750/*
751 * Perform a transaction to verify the HW works.
752 */
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700753
Maxime Ripard0951e722015-05-26 15:07:33 +0200754static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700755{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300756 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700757 void *src, *dest;
758 dma_addr_t src_dma, dest_dma;
759 struct dma_chan *dma_chan;
760 dma_cookie_t cookie;
761 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300762 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700763 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700764
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300765 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700766 if (!src)
767 return -ENOMEM;
768
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300769 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700770 if (!dest) {
771 kfree(src);
772 return -ENOMEM;
773 }
774
775 /* Fill in src buffer */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300776 for (i = 0; i < PAGE_SIZE; i++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700777 ((u8 *) src)[i] = (u8)i;
778
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100779 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700780 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700781 err = -ENODEV;
782 goto out;
783 }
784
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300785 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
786 if (!unmap) {
787 err = -ENOMEM;
788 goto free_resources;
789 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700790
Stefan Roese51564632016-06-01 12:43:32 +0200791 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
792 (size_t)src & ~PAGE_MASK, PAGE_SIZE,
793 DMA_TO_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300794 unmap->addr[0] = src_dma;
795
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300796 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
797 if (ret) {
798 err = -ENOMEM;
799 goto free_resources;
800 }
801 unmap->to_cnt = 1;
802
Stefan Roese51564632016-06-01 12:43:32 +0200803 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
804 (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
805 DMA_FROM_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300806 unmap->addr[1] = dest_dma;
807
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300808 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
809 if (ret) {
810 err = -ENOMEM;
811 goto free_resources;
812 }
813 unmap->from_cnt = 1;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300814 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700815
816 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300817 PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300818 if (!tx) {
819 dev_err(dma_chan->device->dev,
820 "Self-test cannot prepare operation, disabling\n");
821 err = -ENODEV;
822 goto free_resources;
823 }
824
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700825 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300826 if (dma_submit_error(cookie)) {
827 dev_err(dma_chan->device->dev,
828 "Self-test submit error, disabling\n");
829 err = -ENODEV;
830 goto free_resources;
831 }
832
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700833 mv_xor_issue_pending(dma_chan);
834 async_tx_ack(tx);
835 msleep(1);
836
Linus Walleij07934482010-03-26 16:50:49 -0700837 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530838 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100839 dev_err(dma_chan->device->dev,
840 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700841 err = -ENODEV;
842 goto free_resources;
843 }
844
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100845 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300846 PAGE_SIZE, DMA_FROM_DEVICE);
847 if (memcmp(src, dest, PAGE_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100848 dev_err(dma_chan->device->dev,
849 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700850 err = -ENODEV;
851 goto free_resources;
852 }
853
854free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300855 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700856 mv_xor_free_chan_resources(dma_chan);
857out:
858 kfree(src);
859 kfree(dest);
860 return err;
861}
862
863#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -0500864static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200865mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700866{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300867 int i, src_idx, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700868 struct page *dest;
869 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
870 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
871 dma_addr_t dest_dma;
872 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300873 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700874 struct dma_chan *dma_chan;
875 dma_cookie_t cookie;
876 u8 cmp_byte = 0;
877 u32 cmp_word;
878 int err = 0;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300879 int src_count = MV_XOR_NUM_SRC_TEST;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700880
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300881 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700882 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100883 if (!xor_srcs[src_idx]) {
884 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700885 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100886 return -ENOMEM;
887 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700888 }
889
890 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100891 if (!dest) {
892 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700893 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100894 return -ENOMEM;
895 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700896
897 /* Fill in src buffers */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300898 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700899 u8 *ptr = page_address(xor_srcs[src_idx]);
900 for (i = 0; i < PAGE_SIZE; i++)
901 ptr[i] = (1 << src_idx);
902 }
903
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300904 for (src_idx = 0; src_idx < src_count; src_idx++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700905 cmp_byte ^= (u8) (1 << src_idx);
906
907 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
908 (cmp_byte << 8) | cmp_byte;
909
910 memset(page_address(dest), 0, PAGE_SIZE);
911
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100912 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700913 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700914 err = -ENODEV;
915 goto out;
916 }
917
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300918 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
919 GFP_KERNEL);
920 if (!unmap) {
921 err = -ENOMEM;
922 goto free_resources;
923 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700924
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300925 /* test xor */
926 for (i = 0; i < src_count; i++) {
927 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
928 0, PAGE_SIZE, DMA_TO_DEVICE);
929 dma_srcs[i] = unmap->addr[i];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300930 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
931 if (ret) {
932 err = -ENOMEM;
933 goto free_resources;
934 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300935 unmap->to_cnt++;
936 }
937
938 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
939 DMA_FROM_DEVICE);
940 dest_dma = unmap->addr[src_count];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300941 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
942 if (ret) {
943 err = -ENOMEM;
944 goto free_resources;
945 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300946 unmap->from_cnt = 1;
947 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700948
949 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300950 src_count, PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300951 if (!tx) {
952 dev_err(dma_chan->device->dev,
953 "Self-test cannot prepare operation, disabling\n");
954 err = -ENODEV;
955 goto free_resources;
956 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700957
958 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300959 if (dma_submit_error(cookie)) {
960 dev_err(dma_chan->device->dev,
961 "Self-test submit error, disabling\n");
962 err = -ENODEV;
963 goto free_resources;
964 }
965
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700966 mv_xor_issue_pending(dma_chan);
967 async_tx_ack(tx);
968 msleep(8);
969
Linus Walleij07934482010-03-26 16:50:49 -0700970 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530971 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100972 dev_err(dma_chan->device->dev,
973 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700974 err = -ENODEV;
975 goto free_resources;
976 }
977
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100978 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700979 PAGE_SIZE, DMA_FROM_DEVICE);
980 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
981 u32 *ptr = page_address(dest);
982 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100983 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -0700984 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
985 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700986 err = -ENODEV;
987 goto free_resources;
988 }
989 }
990
991free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300992 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700993 mv_xor_free_chan_resources(dma_chan);
994out:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300995 src_idx = src_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700996 while (src_idx--)
997 __free_page(xor_srcs[src_idx]);
998 __free_page(dest);
999 return err;
1000}
1001
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001002static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001003{
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001004 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001005 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001006
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001007 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001008
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001009 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001010 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Lior Amsalem22843542014-08-27 10:52:55 -03001011 dma_unmap_single(dev, mv_chan->dummy_src_addr,
1012 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1013 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
1014 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001015
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001016 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001017 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001018 list_del(&chan->device_node);
1019 }
1020
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001021 free_irq(mv_chan->irq, mv_chan);
1022
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001023 return 0;
1024}
1025
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001026static struct mv_xor_chan *
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001027mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001028 struct platform_device *pdev,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001029 int idx, dma_cap_mask_t cap_mask, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001030{
1031 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001032 struct mv_xor_chan *mv_chan;
1033 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001034
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001035 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +05301036 if (!mv_chan)
1037 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001038
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +01001039 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001040 mv_chan->irq = irq;
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001041 if (xordev->xor_type == XOR_ORION)
1042 mv_chan->op_in_desc = XOR_MODE_IN_REG;
1043 else
1044 mv_chan->op_in_desc = XOR_MODE_IN_DESC;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001045
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001046 dma_dev = &mv_chan->dmadev;
Robin Murphy4d33c632019-02-18 18:27:06 +00001047 dma_dev->dev = &pdev->dev;
Stefan Roese77ff7a72016-09-15 07:37:31 +02001048 mv_chan->xordev = xordev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001049
Lior Amsalem22843542014-08-27 10:52:55 -03001050 /*
1051 * These source and destination dummy buffers are used to implement
1052 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
1053 * Hence, we only need to map the buffers at initialization-time.
1054 */
1055 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
1056 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1057 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
1058 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1059
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001060 /* allocate coherent memory for hardware descriptors
1061 * note: writecombine gives slightly better performance, but
1062 * requires that we explicitly flush the writes
1063 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001064 mv_chan->dma_desc_pool_virt =
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -08001065 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1066 GFP_KERNEL);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001067 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001068 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001069
1070 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001071 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001072
1073 INIT_LIST_HEAD(&dma_dev->channels);
1074
1075 /* set base routines */
1076 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1077 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001078 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001079 dma_dev->device_issue_pending = mv_xor_issue_pending;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001080
1081 /* set prep routines based on capability */
Lior Amsalem22843542014-08-27 10:52:55 -03001082 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1083 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001084 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1085 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001086 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001087 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001088 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1089 }
1090
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001091 mv_chan->mmr_base = xordev->xor_base;
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001092 mv_chan->mmr_high_base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001093 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1094 mv_chan);
1095
1096 /* clear errors before enabling interrupts */
Maxime Ripard0951e722015-05-26 15:07:33 +02001097 mv_chan_clear_err_status(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001098
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001099 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1100 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001101 if (ret)
1102 goto err_free_dma;
1103
1104 mv_chan_unmask_interrupts(mv_chan);
1105
Lior Amsalem6f166312015-05-26 15:07:34 +02001106 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
Thomas Petazzoni81aafb32015-12-22 11:43:28 +01001107 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
Lior Amsalem6f166312015-05-26 15:07:34 +02001108 else
Thomas Petazzoni81aafb32015-12-22 11:43:28 +01001109 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001110
1111 spin_lock_init(&mv_chan->lock);
1112 INIT_LIST_HEAD(&mv_chan->chain);
1113 INIT_LIST_HEAD(&mv_chan->completed_slots);
Lior Amsalemfbea28a2015-05-26 15:07:36 +02001114 INIT_LIST_HEAD(&mv_chan->free_slots);
1115 INIT_LIST_HEAD(&mv_chan->allocated_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001116 mv_chan->dmachan.device = dma_dev;
1117 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001118
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001119 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001120
1121 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001122 ret = mv_chan_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001123 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1124 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001125 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001126 }
1127
1128 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001129 ret = mv_chan_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001130 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1131 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001132 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001133 }
1134
Lior Amsalem6f166312015-05-26 15:07:34 +02001135 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1136 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
Joe Perches1ba151c2012-10-28 01:05:44 -07001137 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001138 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1139 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001140
1141 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001142 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001143
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001144err_free_irq:
1145 free_irq(mv_chan->irq, mv_chan);
Stefan Roesea4a1e53d2016-06-01 12:43:31 +02001146err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001147 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001148 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001149 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001150}
1151
1152static void
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001153mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001154 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001155{
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001156 void __iomem *base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001157 u32 win_enable = 0;
1158 int i;
1159
1160 for (i = 0; i < 8; i++) {
1161 writel(0, base + WINDOW_BASE(i));
1162 writel(0, base + WINDOW_SIZE(i));
1163 if (i < 4)
1164 writel(0, base + WINDOW_REMAP_HIGH(i));
1165 }
1166
1167 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001168 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001169
1170 writel((cs->base & 0xffff0000) |
1171 (cs->mbus_attr << 8) |
1172 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1173 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1174
Stefan Roese77ff7a72016-09-15 07:37:31 +02001175 /* Fill the caching variables for later use */
1176 xordev->win_start[i] = cs->base;
1177 xordev->win_end[i] = cs->base + cs->size - 1;
1178
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001179 win_enable |= (1 << i);
1180 win_enable |= 3 << (16 + (2 * i));
1181 }
1182
1183 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1184 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001185 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1186 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001187}
1188
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001189static void
1190mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1191{
1192 void __iomem *base = xordev->xor_high_base;
1193 u32 win_enable = 0;
1194 int i;
1195
1196 for (i = 0; i < 8; i++) {
1197 writel(0, base + WINDOW_BASE(i));
1198 writel(0, base + WINDOW_SIZE(i));
1199 if (i < 4)
1200 writel(0, base + WINDOW_REMAP_HIGH(i));
1201 }
1202 /*
1203 * For Armada3700 open default 4GB Mbus window. The dram
1204 * related configuration are done at AXIS level.
1205 */
1206 writel(0xffff0000, base + WINDOW_SIZE(0));
1207 win_enable |= 1;
1208 win_enable |= 3 << 16;
1209
1210 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1211 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1212 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1213 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1214}
1215
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001216/*
1217 * Since this XOR driver is basically used only for RAID5, we don't
1218 * need to care about synchronizing ->suspend with DMA activity,
1219 * because the DMA engine will naturally be quiet due to the block
1220 * devices being suspended.
1221 */
1222static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1223{
1224 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1225 int i;
1226
1227 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1228 struct mv_xor_chan *mv_chan = xordev->channels[i];
1229
1230 if (!mv_chan)
1231 continue;
1232
1233 mv_chan->saved_config_reg =
1234 readl_relaxed(XOR_CONFIG(mv_chan));
1235 mv_chan->saved_int_mask_reg =
1236 readl_relaxed(XOR_INTR_MASK(mv_chan));
1237 }
1238
1239 return 0;
1240}
1241
1242static int mv_xor_resume(struct platform_device *dev)
1243{
1244 struct mv_xor_device *xordev = platform_get_drvdata(dev);
1245 const struct mbus_dram_target_info *dram;
1246 int i;
1247
1248 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1249 struct mv_xor_chan *mv_chan = xordev->channels[i];
1250
1251 if (!mv_chan)
1252 continue;
1253
1254 writel_relaxed(mv_chan->saved_config_reg,
1255 XOR_CONFIG(mv_chan));
1256 writel_relaxed(mv_chan->saved_int_mask_reg,
1257 XOR_INTR_MASK(mv_chan));
1258 }
1259
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001260 if (xordev->xor_type == XOR_ARMADA_37XX) {
1261 mv_xor_conf_mbus_windows_a3700(xordev);
1262 return 0;
1263 }
1264
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001265 dram = mv_mbus_dram_info();
1266 if (dram)
1267 mv_xor_conf_mbus_windows(xordev, dram);
1268
1269 return 0;
1270}
1271
Lior Amsalem6f166312015-05-26 15:07:34 +02001272static const struct of_device_id mv_xor_dt_ids[] = {
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001273 { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1274 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001275 { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
Lior Amsalem6f166312015-05-26 15:07:34 +02001276 {},
1277};
Lior Amsalem6f166312015-05-26 15:07:34 +02001278
Thomas Petazzoni77757292015-07-08 16:28:19 +02001279static unsigned int mv_xor_engine_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001280
Linus Torvaldsc2714332012-12-14 14:54:26 -08001281static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001282{
Andrew Lunn63a93322011-12-07 21:48:07 +01001283 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001284 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001285 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001286 struct resource *res;
Thomas Petazzoni77757292015-07-08 16:28:19 +02001287 unsigned int max_engines, max_channels;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001288 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001289
Joe Perches1ba151c2012-10-28 01:05:44 -07001290 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001291
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001292 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1293 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001294 return -ENOMEM;
1295
1296 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1297 if (!res)
1298 return -ENODEV;
1299
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001300 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1301 resource_size(res));
1302 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001303 return -EBUSY;
1304
1305 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1306 if (!res)
1307 return -ENODEV;
1308
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001309 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1310 resource_size(res));
1311 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001312 return -EBUSY;
1313
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001314 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001315
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001316
1317 /*
1318 * We need to know which type of XOR device we use before
1319 * setting up. In non-dt case it can only be the legacy one.
1320 */
1321 xordev->xor_type = XOR_ORION;
1322 if (pdev->dev.of_node) {
1323 const struct of_device_id *of_id =
1324 of_match_device(mv_xor_dt_ids,
1325 &pdev->dev);
1326
1327 xordev->xor_type = (uintptr_t)of_id->data;
1328 }
1329
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001330 /*
1331 * (Re-)program MBUS remapping windows if we are asked to.
1332 */
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001333 if (xordev->xor_type == XOR_ARMADA_37XX) {
1334 mv_xor_conf_mbus_windows_a3700(xordev);
1335 } else {
1336 dram = mv_mbus_dram_info();
1337 if (dram)
1338 mv_xor_conf_mbus_windows(xordev, dram);
1339 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001340
Andrew Lunnc5101822012-02-19 13:30:26 +01001341 /* Not all platforms can gate the clock, so it is not
1342 * an error if the clock does not exists.
1343 */
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001344 xordev->clk = clk_get(&pdev->dev, NULL);
1345 if (!IS_ERR(xordev->clk))
1346 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001347
Thomas Petazzoni77757292015-07-08 16:28:19 +02001348 /*
1349 * We don't want to have more than one channel per CPU in
1350 * order for async_tx to perform well. So we limit the number
1351 * of engines and channels so that we take into account this
1352 * constraint. Note that we also want to use channels from
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001353 * separate engines when possible. For dual-CPU Armada 3700
1354 * SoC with single XOR engine allow using its both channels.
Thomas Petazzoni77757292015-07-08 16:28:19 +02001355 */
1356 max_engines = num_present_cpus();
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001357 if (xordev->xor_type == XOR_ARMADA_37XX)
1358 max_channels = num_present_cpus();
1359 else
1360 max_channels = min_t(unsigned int,
1361 MV_XOR_MAX_CHANNELS,
1362 DIV_ROUND_UP(num_present_cpus(), 2));
Thomas Petazzoni77757292015-07-08 16:28:19 +02001363
1364 if (mv_xor_engine_count >= max_engines)
1365 return 0;
1366
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001367 if (pdev->dev.of_node) {
1368 struct device_node *np;
1369 int i = 0;
1370
1371 for_each_child_of_node(pdev->dev.of_node, np) {
Russell King0be82532013-12-12 23:59:08 +00001372 struct mv_xor_chan *chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001373 dma_cap_mask_t cap_mask;
1374 int irq;
1375
Thomas Petazzoni77757292015-07-08 16:28:19 +02001376 if (i >= max_channels)
1377 continue;
1378
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001379 dma_cap_zero(cap_mask);
Thomas Petazzoni6d8f7ab2015-07-08 16:28:16 +02001380 dma_cap_set(DMA_MEMCPY, cap_mask);
1381 dma_cap_set(DMA_XOR, cap_mask);
1382 dma_cap_set(DMA_INTERRUPT, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001383
1384 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001385 if (!irq) {
1386 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001387 goto err_channel_add;
1388 }
1389
Russell King0be82532013-12-12 23:59:08 +00001390 chan = mv_xor_channel_add(xordev, pdev, i,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001391 cap_mask, irq);
Russell King0be82532013-12-12 23:59:08 +00001392 if (IS_ERR(chan)) {
1393 ret = PTR_ERR(chan);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001394 irq_dispose_mapping(irq);
1395 goto err_channel_add;
1396 }
1397
Russell King0be82532013-12-12 23:59:08 +00001398 xordev->channels[i] = chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001399 i++;
1400 }
1401 } else if (pdata && pdata->channels) {
Thomas Petazzoni77757292015-07-08 16:28:19 +02001402 for (i = 0; i < max_channels; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001403 struct mv_xor_channel_data *cd;
Russell King0be82532013-12-12 23:59:08 +00001404 struct mv_xor_chan *chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001405 int irq;
1406
1407 cd = &pdata->channels[i];
1408 if (!cd) {
1409 ret = -ENODEV;
1410 goto err_channel_add;
1411 }
1412
1413 irq = platform_get_irq(pdev, i);
1414 if (irq < 0) {
1415 ret = irq;
1416 goto err_channel_add;
1417 }
1418
Russell King0be82532013-12-12 23:59:08 +00001419 chan = mv_xor_channel_add(xordev, pdev, i,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001420 cd->cap_mask, irq);
Russell King0be82532013-12-12 23:59:08 +00001421 if (IS_ERR(chan)) {
1422 ret = PTR_ERR(chan);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001423 goto err_channel_add;
1424 }
Russell King0be82532013-12-12 23:59:08 +00001425
1426 xordev->channels[i] = chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001427 }
1428 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001429
1430 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001431
1432err_channel_add:
1433 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001434 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001435 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001436 if (pdev->dev.of_node)
1437 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001438 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001439
Thomas Petazzonidab92062013-01-06 11:10:44 +01001440 if (!IS_ERR(xordev->clk)) {
1441 clk_disable_unprepare(xordev->clk);
1442 clk_put(xordev->clk);
1443 }
1444
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001445 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001446}
1447
Thomas Petazzoni61971652012-10-30 12:05:40 +01001448static struct platform_driver mv_xor_driver = {
1449 .probe = mv_xor_probe,
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001450 .suspend = mv_xor_suspend,
1451 .resume = mv_xor_resume,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001452 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001453 .name = MV_XOR_NAME,
1454 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001455 },
1456};
1457
1458
1459static int __init mv_xor_init(void)
1460{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001461 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001462}
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001463device_initcall(mv_xor_init);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001464
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001465/*
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001466MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1467MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1468MODULE_LICENSE("GPL");
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001469*/