blob: f4c9f98ec35e51ac0ba46ae07ea7cb214d2988b3 [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
Saeed Bisharaff7b0472008-07-08 11:58:36 -070013 */
14
15#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070017#include <linux/delay.h>
18#include <linux/dma-mapping.h>
19#include <linux/spinlock.h>
20#include <linux/interrupt.h>
Lior Amsalem6f166312015-05-26 15:07:34 +020021#include <linux/of_device.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/platform_device.h>
23#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010024#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010025#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/irqdomain.h>
Thomas Petazzoni77757292015-07-08 16:28:19 +020028#include <linux/cpumask.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020029#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000030
31#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070032#include "mv_xor.h"
33
Gregory CLEMENTdd130c62016-04-29 09:49:06 +020034enum mv_xor_type {
35 XOR_ORION,
36 XOR_ARMADA_38X,
Marcin Wojtasac5f0f32016-04-29 09:49:07 +020037 XOR_ARMADA_37XX,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +020038};
39
Lior Amsalem6f166312015-05-26 15:07:34 +020040enum mv_xor_mode {
41 XOR_MODE_IN_REG,
42 XOR_MODE_IN_DESC,
43};
44
Saeed Bisharaff7b0472008-07-08 11:58:36 -070045static void mv_xor_issue_pending(struct dma_chan *chan);
46
47#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010048 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070049
50#define to_mv_xor_slot(tx) \
51 container_of(tx, struct mv_xor_desc_slot, async_tx)
52
Thomas Petazzonic98c1782012-11-15 14:17:18 +010053#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010054 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010055
Lior Amsalemdfc97662014-08-27 10:52:51 -030056static void mv_desc_init(struct mv_xor_desc_slot *desc,
Lior Amsalemba87d132014-08-27 10:52:53 -030057 dma_addr_t addr, u32 byte_count,
58 enum dma_ctrl_flags flags)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070059{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -030062 hw_desc->status = XOR_DESC_DMA_OWNED;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070063 hw_desc->phy_next_desc = 0;
Lior Amsalemba87d132014-08-27 10:52:53 -030064 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
65 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
66 XOR_DESC_EOD_INT_EN : 0;
Lior Amsalemdfc97662014-08-27 10:52:51 -030067 hw_desc->phy_dest_addr = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070068 hw_desc->byte_count = byte_count;
69}
70
Lior Amsalem6f166312015-05-26 15:07:34 +020071static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
72{
73 struct mv_xor_desc *hw_desc = desc->hw_desc;
74
75 switch (desc->type) {
76 case DMA_XOR:
77 case DMA_INTERRUPT:
78 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
79 break;
80 case DMA_MEMCPY:
81 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
82 break;
83 default:
84 BUG();
85 return;
86 }
87}
88
Saeed Bisharaff7b0472008-07-08 11:58:36 -070089static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
90 u32 next_desc_addr)
91{
92 struct mv_xor_desc *hw_desc = desc->hw_desc;
93 BUG_ON(hw_desc->phy_next_desc);
94 hw_desc->phy_next_desc = next_desc_addr;
95}
96
Saeed Bisharaff7b0472008-07-08 11:58:36 -070097static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
98 int index, dma_addr_t addr)
99{
100 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200101 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700102 if (desc->type == DMA_XOR)
103 hw_desc->desc_command |= (1 << index);
104}
105
106static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
107{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200108 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700109}
110
111static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
112 u32 next_desc_addr)
113{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200114 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700115}
116
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700117static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
118{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200119 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700120 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200121 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700122}
123
124static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
125{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200126 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700127 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
128 return intr_cause;
129}
130
Maxime Ripard0951e722015-05-26 15:07:33 +0200131static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700132{
Lior Amsalemba87d132014-08-27 10:52:53 -0300133 u32 val;
134
135 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
136 val = ~(val << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100137 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200138 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700139}
140
Maxime Ripard0951e722015-05-26 15:07:33 +0200141static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700142{
143 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200144 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700145}
146
Maxime Ripard0951e722015-05-26 15:07:33 +0200147static void mv_chan_set_mode(struct mv_xor_chan *chan,
Thomas Petazzoni81aafb32015-12-22 11:43:28 +0100148 u32 op_mode)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700149{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200150 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700151
Lior Amsalem6f166312015-05-26 15:07:34 +0200152 config &= ~0x7;
153 config |= op_mode;
154
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200155#if defined(__BIG_ENDIAN)
156 config |= XOR_DESCRIPTOR_SWAP;
157#else
158 config &= ~XOR_DESCRIPTOR_SWAP;
159#endif
160
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200161 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700162}
163
164static void mv_chan_activate(struct mv_xor_chan *chan)
165{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100166 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Ezequiel Garcia5a9a55b2014-05-21 14:02:35 -0700167
168 /* writel ensures all descriptors are flushed before activation */
169 writel(BIT(0), XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700170}
171
172static char mv_chan_is_busy(struct mv_xor_chan *chan)
173{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200174 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700175
176 state = (state >> 4) & 0x3;
177
178 return (state == 1) ? 1 : 0;
179}
180
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700181/*
Maxime Ripard0951e722015-05-26 15:07:33 +0200182 * mv_chan_start_new_chain - program the engine to operate on new
183 * chain headed by sw_desc
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700184 * Caller must hold &mv_chan->lock while calling this function
185 */
Maxime Ripard0951e722015-05-26 15:07:33 +0200186static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
187 struct mv_xor_desc_slot *sw_desc)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700188{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100189 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700190 __func__, __LINE__, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700191
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700192 /* set the hardware chain */
193 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
194
Lior Amsalemdfc97662014-08-27 10:52:51 -0300195 mv_chan->pending++;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100196 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700197}
198
199static dma_cookie_t
Maxime Ripard0951e722015-05-26 15:07:33 +0200200mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
201 struct mv_xor_chan *mv_chan,
202 dma_cookie_t cookie)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700203{
204 BUG_ON(desc->async_tx.cookie < 0);
205
206 if (desc->async_tx.cookie > 0) {
207 cookie = desc->async_tx.cookie;
208
209 /* call the callback (must not sleep or submit new
210 * operations to this channel)
211 */
212 if (desc->async_tx.callback)
213 desc->async_tx.callback(
214 desc->async_tx.callback_param);
215
Dan Williamsd38a8c62013-10-18 19:35:23 +0200216 dma_descriptor_unmap(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700217 }
218
219 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700220 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700221
222 return cookie;
223}
224
225static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200226mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700227{
228 struct mv_xor_desc_slot *iter, *_iter;
229
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100230 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700231 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200232 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700233
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200234 if (async_tx_test_ack(&iter->async_tx))
235 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700236 }
237 return 0;
238}
239
240static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200241mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
242 struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700243{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100244 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700245 __func__, __LINE__, desc, desc->async_tx.flags);
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200246
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700247 /* the client is allowed to attach dependent operations
248 * until 'ack' is set
249 */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200250 if (!async_tx_test_ack(&desc->async_tx))
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700251 /* move this slot to the completed_slots */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200252 list_move_tail(&desc->node, &mv_chan->completed_slots);
253 else
254 list_move_tail(&desc->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700255
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700256 return 0;
257}
258
Ezequiel Garciafbeec992014-03-07 16:46:47 -0300259/* This function must be called with the mv_xor_chan spinlock held */
Maxime Ripard0951e722015-05-26 15:07:33 +0200260static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700261{
262 struct mv_xor_desc_slot *iter, *_iter;
263 dma_cookie_t cookie = 0;
264 int busy = mv_chan_is_busy(mv_chan);
265 u32 current_desc = mv_chan_get_current_desc(mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200266 int current_cleaned = 0;
267 struct mv_xor_desc *hw_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700268
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100269 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
270 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Maxime Ripard0951e722015-05-26 15:07:33 +0200271 mv_chan_clean_completed_slots(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700272
273 /* free completed slots from the chain starting with
274 * the oldest descriptor
275 */
276
277 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200278 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700279
Lior Amsalem91362912015-05-26 15:07:32 +0200280 /* clean finished descriptors */
281 hw_desc = iter->hw_desc;
282 if (hw_desc->status & XOR_DESC_SUCCESS) {
Maxime Ripard0951e722015-05-26 15:07:33 +0200283 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
284 cookie);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700285
Lior Amsalem91362912015-05-26 15:07:32 +0200286 /* done processing desc, clean slot */
Maxime Ripard0951e722015-05-26 15:07:33 +0200287 mv_desc_clean_slot(iter, mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200288
289 /* break if we did cleaned the current */
290 if (iter->async_tx.phys == current_desc) {
291 current_cleaned = 1;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700292 break;
Lior Amsalem91362912015-05-26 15:07:32 +0200293 }
294 } else {
295 if (iter->async_tx.phys == current_desc) {
296 current_cleaned = 0;
297 break;
298 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700299 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700300 }
301
302 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200303 if (current_cleaned) {
304 /*
305 * current descriptor cleaned and removed, run
306 * from list head
307 */
308 iter = list_entry(mv_chan->chain.next,
309 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200310 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200311 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200312 } else {
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200313 if (!list_is_last(&iter->node, &mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200314 /*
315 * descriptors are still waiting after
316 * current, trigger them
317 */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200318 iter = list_entry(iter->node.next,
Lior Amsalem91362912015-05-26 15:07:32 +0200319 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200320 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200321 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200322 } else {
323 /*
324 * some descriptors are still waiting
325 * to be cleaned
326 */
327 tasklet_schedule(&mv_chan->irq_tasklet);
328 }
329 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700330 }
331
332 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100333 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700334}
335
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700336static void mv_xor_tasklet(unsigned long data)
337{
338 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300339
340 spin_lock_bh(&chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200341 mv_chan_slot_cleanup(chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300342 spin_unlock_bh(&chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700343}
344
345static struct mv_xor_desc_slot *
Maxime Ripard0951e722015-05-26 15:07:33 +0200346mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700347{
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200348 struct mv_xor_desc_slot *iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700349
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200350 spin_lock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700351
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200352 if (!list_empty(&mv_chan->free_slots)) {
353 iter = list_first_entry(&mv_chan->free_slots,
354 struct mv_xor_desc_slot,
355 node);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300356
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200357 list_move_tail(&iter->node, &mv_chan->allocated_slots);
358
359 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700360
Lior Amsalemdfc97662014-08-27 10:52:51 -0300361 /* pre-ack descriptor */
362 async_tx_ack(&iter->async_tx);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300363 iter->async_tx.cookie = -EBUSY;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700364
Lior Amsalemdfc97662014-08-27 10:52:51 -0300365 return iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700366
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700367 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200368
369 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700370
371 /* try to free some slots if the allocation fails */
372 tasklet_schedule(&mv_chan->irq_tasklet);
373
374 return NULL;
375}
376
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700377/************************ DMA engine API functions ****************************/
378static dma_cookie_t
379mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
380{
381 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
382 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300383 struct mv_xor_desc_slot *old_chain_tail;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700384 dma_cookie_t cookie;
385 int new_hw_chain = 1;
386
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100387 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700388 "%s sw_desc %p: async_tx %p\n",
389 __func__, sw_desc, &sw_desc->async_tx);
390
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700391 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000392 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700393
394 if (list_empty(&mv_chan->chain))
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200395 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700396 else {
397 new_hw_chain = 0;
398
399 old_chain_tail = list_entry(mv_chan->chain.prev,
400 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200401 node);
402 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700403
Olof Johansson31fd8f52014-02-03 17:13:23 -0800404 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
405 &old_chain_tail->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700406
407 /* fix up the hardware chain */
Lior Amsalemdfc97662014-08-27 10:52:51 -0300408 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700409
410 /* if the channel is not busy */
411 if (!mv_chan_is_busy(mv_chan)) {
412 u32 current_desc = mv_chan_get_current_desc(mv_chan);
413 /*
414 * and the curren desc is the end of the chain before
415 * the append, then we need to start the channel
416 */
417 if (current_desc == old_chain_tail->async_tx.phys)
418 new_hw_chain = 1;
419 }
420 }
421
422 if (new_hw_chain)
Maxime Ripard0951e722015-05-26 15:07:33 +0200423 mv_chan_start_new_chain(mv_chan, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700424
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700425 spin_unlock_bh(&mv_chan->lock);
426
427 return cookie;
428}
429
430/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700431static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700432{
Olof Johansson31fd8f52014-02-03 17:13:23 -0800433 void *virt_desc;
434 dma_addr_t dma_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700435 int idx;
436 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
437 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100438 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700439
440 /* Allocate descriptor slots */
441 idx = mv_chan->slots_allocated;
442 while (idx < num_descs_in_pool) {
443 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
444 if (!slot) {
Ezequiel Garciab8291dd2014-08-27 10:52:49 -0300445 dev_info(mv_chan_to_devp(mv_chan),
446 "channel only initialized %d descriptor slots",
447 idx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700448 break;
449 }
Olof Johansson31fd8f52014-02-03 17:13:23 -0800450 virt_desc = mv_chan->dma_desc_pool_virt;
451 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700452
453 dma_async_tx_descriptor_init(&slot->async_tx, chan);
454 slot->async_tx.tx_submit = mv_xor_tx_submit;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200455 INIT_LIST_HEAD(&slot->node);
Olof Johansson31fd8f52014-02-03 17:13:23 -0800456 dma_desc = mv_chan->dma_desc_pool;
457 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700458 slot->idx = idx++;
459
460 spin_lock_bh(&mv_chan->lock);
461 mv_chan->slots_allocated = idx;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200462 list_add_tail(&slot->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700463 spin_unlock_bh(&mv_chan->lock);
464 }
465
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100466 dev_dbg(mv_chan_to_devp(mv_chan),
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200467 "allocated %d descriptor slots\n",
468 mv_chan->slots_allocated);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700469
470 return mv_chan->slots_allocated ? : -ENOMEM;
471}
472
473static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700474mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
475 unsigned int src_cnt, size_t len, unsigned long flags)
476{
477 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300478 struct mv_xor_desc_slot *sw_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700479
480 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
481 return NULL;
482
Coly Li7912d302011-03-27 01:26:53 +0800483 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700484
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100485 dev_dbg(mv_chan_to_devp(mv_chan),
Gregory CLEMENTbc822e12016-04-29 09:49:05 +0200486 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
Olof Johansson31fd8f52014-02-03 17:13:23 -0800487 __func__, src_cnt, len, &dest, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700488
Maxime Ripard0951e722015-05-26 15:07:33 +0200489 sw_desc = mv_chan_alloc_slot(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700490 if (sw_desc) {
491 sw_desc->type = DMA_XOR;
492 sw_desc->async_tx.flags = flags;
Lior Amsalemba87d132014-08-27 10:52:53 -0300493 mv_desc_init(sw_desc, dest, len, flags);
Lior Amsalem6f166312015-05-26 15:07:34 +0200494 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
495 mv_desc_set_mode(sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700496 while (src_cnt--)
Lior Amsalemdfc97662014-08-27 10:52:51 -0300497 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700498 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200499
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100500 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700501 "%s sw_desc %p async_tx %p \n",
502 __func__, sw_desc, &sw_desc->async_tx);
503 return sw_desc ? &sw_desc->async_tx : NULL;
504}
505
Lior Amsalem3e4f52e2014-08-27 10:52:50 -0300506static struct dma_async_tx_descriptor *
507mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
508 size_t len, unsigned long flags)
509{
510 /*
511 * A MEMCPY operation is identical to an XOR operation with only
512 * a single source address.
513 */
514 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
515}
516
Lior Amsalem22843542014-08-27 10:52:55 -0300517static struct dma_async_tx_descriptor *
518mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
519{
520 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
521 dma_addr_t src, dest;
522 size_t len;
523
524 src = mv_chan->dummy_src_addr;
525 dest = mv_chan->dummy_dst_addr;
526 len = MV_XOR_MIN_BYTE_COUNT;
527
528 /*
529 * We implement the DMA_INTERRUPT operation as a minimum sized
530 * XOR operation with a single dummy source address.
531 */
532 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
533}
534
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700535static void mv_xor_free_chan_resources(struct dma_chan *chan)
536{
537 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
538 struct mv_xor_desc_slot *iter, *_iter;
539 int in_use_descs = 0;
540
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700541 spin_lock_bh(&mv_chan->lock);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300542
Maxime Ripard0951e722015-05-26 15:07:33 +0200543 mv_chan_slot_cleanup(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700544
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700545 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200546 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700547 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200548 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700549 }
550 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200551 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700552 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200553 list_move_tail(&iter->node, &mv_chan->free_slots);
554 }
555 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
556 node) {
557 in_use_descs++;
558 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700559 }
560 list_for_each_entry_safe_reverse(
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200561 iter, _iter, &mv_chan->free_slots, node) {
562 list_del(&iter->node);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700563 kfree(iter);
564 mv_chan->slots_allocated--;
565 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700566
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100567 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700568 __func__, mv_chan->slots_allocated);
569 spin_unlock_bh(&mv_chan->lock);
570
571 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100572 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700573 "freeing %d in use descriptors!\n", in_use_descs);
574}
575
576/**
Linus Walleij07934482010-03-26 16:50:49 -0700577 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700578 * @chan: XOR channel handle
579 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700580 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700581 */
Linus Walleij07934482010-03-26 16:50:49 -0700582static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700583 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700584 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700585{
586 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700587 enum dma_status ret;
588
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000589 ret = dma_cookie_status(chan, cookie, txstate);
Ezequiel Garcia890766d2014-03-07 16:46:45 -0300590 if (ret == DMA_COMPLETE)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700591 return ret;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300592
593 spin_lock_bh(&mv_chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200594 mv_chan_slot_cleanup(mv_chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300595 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700596
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000597 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700598}
599
Maxime Ripard0951e722015-05-26 15:07:33 +0200600static void mv_chan_dump_regs(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700601{
602 u32 val;
603
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200604 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700605 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700606
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200607 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700608 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700609
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200610 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700611 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700612
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200613 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700614 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700615
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200616 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700617 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700618
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200619 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700620 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700621}
622
Maxime Ripard0951e722015-05-26 15:07:33 +0200623static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
624 u32 intr_cause)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700625{
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300626 if (intr_cause & XOR_INT_ERR_DECODE) {
627 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
628 return;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700629 }
630
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300631 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100632 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700633
Maxime Ripard0951e722015-05-26 15:07:33 +0200634 mv_chan_dump_regs(chan);
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300635 WARN_ON(1);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700636}
637
638static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
639{
640 struct mv_xor_chan *chan = data;
641 u32 intr_cause = mv_chan_get_intr_cause(chan);
642
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100643 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700644
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300645 if (intr_cause & XOR_INTR_ERRORS)
Maxime Ripard0951e722015-05-26 15:07:33 +0200646 mv_chan_err_interrupt_handler(chan, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700647
648 tasklet_schedule(&chan->irq_tasklet);
649
Maxime Ripard0951e722015-05-26 15:07:33 +0200650 mv_chan_clear_eoc_cause(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700651
652 return IRQ_HANDLED;
653}
654
655static void mv_xor_issue_pending(struct dma_chan *chan)
656{
657 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
658
659 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
660 mv_chan->pending = 0;
661 mv_chan_activate(mv_chan);
662 }
663}
664
665/*
666 * Perform a transaction to verify the HW works.
667 */
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700668
Maxime Ripard0951e722015-05-26 15:07:33 +0200669static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700670{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300671 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700672 void *src, *dest;
673 dma_addr_t src_dma, dest_dma;
674 struct dma_chan *dma_chan;
675 dma_cookie_t cookie;
676 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300677 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700678 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700679
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300680 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700681 if (!src)
682 return -ENOMEM;
683
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300684 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700685 if (!dest) {
686 kfree(src);
687 return -ENOMEM;
688 }
689
690 /* Fill in src buffer */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300691 for (i = 0; i < PAGE_SIZE; i++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700692 ((u8 *) src)[i] = (u8)i;
693
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100694 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700695 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700696 err = -ENODEV;
697 goto out;
698 }
699
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300700 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
701 if (!unmap) {
702 err = -ENOMEM;
703 goto free_resources;
704 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700705
Stefan Roese51564632016-06-01 12:43:32 +0200706 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
707 (size_t)src & ~PAGE_MASK, PAGE_SIZE,
708 DMA_TO_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300709 unmap->addr[0] = src_dma;
710
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300711 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
712 if (ret) {
713 err = -ENOMEM;
714 goto free_resources;
715 }
716 unmap->to_cnt = 1;
717
Stefan Roese51564632016-06-01 12:43:32 +0200718 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
719 (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
720 DMA_FROM_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300721 unmap->addr[1] = dest_dma;
722
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300723 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
724 if (ret) {
725 err = -ENOMEM;
726 goto free_resources;
727 }
728 unmap->from_cnt = 1;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300729 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700730
731 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300732 PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300733 if (!tx) {
734 dev_err(dma_chan->device->dev,
735 "Self-test cannot prepare operation, disabling\n");
736 err = -ENODEV;
737 goto free_resources;
738 }
739
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700740 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300741 if (dma_submit_error(cookie)) {
742 dev_err(dma_chan->device->dev,
743 "Self-test submit error, disabling\n");
744 err = -ENODEV;
745 goto free_resources;
746 }
747
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700748 mv_xor_issue_pending(dma_chan);
749 async_tx_ack(tx);
750 msleep(1);
751
Linus Walleij07934482010-03-26 16:50:49 -0700752 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530753 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100754 dev_err(dma_chan->device->dev,
755 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700756 err = -ENODEV;
757 goto free_resources;
758 }
759
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100760 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300761 PAGE_SIZE, DMA_FROM_DEVICE);
762 if (memcmp(src, dest, PAGE_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100763 dev_err(dma_chan->device->dev,
764 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700765 err = -ENODEV;
766 goto free_resources;
767 }
768
769free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300770 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700771 mv_xor_free_chan_resources(dma_chan);
772out:
773 kfree(src);
774 kfree(dest);
775 return err;
776}
777
778#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -0500779static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200780mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700781{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300782 int i, src_idx, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700783 struct page *dest;
784 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
785 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
786 dma_addr_t dest_dma;
787 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300788 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700789 struct dma_chan *dma_chan;
790 dma_cookie_t cookie;
791 u8 cmp_byte = 0;
792 u32 cmp_word;
793 int err = 0;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300794 int src_count = MV_XOR_NUM_SRC_TEST;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700795
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300796 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700797 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100798 if (!xor_srcs[src_idx]) {
799 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700800 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100801 return -ENOMEM;
802 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700803 }
804
805 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100806 if (!dest) {
807 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700808 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100809 return -ENOMEM;
810 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700811
812 /* Fill in src buffers */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300813 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700814 u8 *ptr = page_address(xor_srcs[src_idx]);
815 for (i = 0; i < PAGE_SIZE; i++)
816 ptr[i] = (1 << src_idx);
817 }
818
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300819 for (src_idx = 0; src_idx < src_count; src_idx++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700820 cmp_byte ^= (u8) (1 << src_idx);
821
822 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
823 (cmp_byte << 8) | cmp_byte;
824
825 memset(page_address(dest), 0, PAGE_SIZE);
826
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100827 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700828 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700829 err = -ENODEV;
830 goto out;
831 }
832
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300833 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
834 GFP_KERNEL);
835 if (!unmap) {
836 err = -ENOMEM;
837 goto free_resources;
838 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700839
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300840 /* test xor */
841 for (i = 0; i < src_count; i++) {
842 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
843 0, PAGE_SIZE, DMA_TO_DEVICE);
844 dma_srcs[i] = unmap->addr[i];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300845 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
846 if (ret) {
847 err = -ENOMEM;
848 goto free_resources;
849 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300850 unmap->to_cnt++;
851 }
852
853 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
854 DMA_FROM_DEVICE);
855 dest_dma = unmap->addr[src_count];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300856 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
857 if (ret) {
858 err = -ENOMEM;
859 goto free_resources;
860 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300861 unmap->from_cnt = 1;
862 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700863
864 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300865 src_count, PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300866 if (!tx) {
867 dev_err(dma_chan->device->dev,
868 "Self-test cannot prepare operation, disabling\n");
869 err = -ENODEV;
870 goto free_resources;
871 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700872
873 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300874 if (dma_submit_error(cookie)) {
875 dev_err(dma_chan->device->dev,
876 "Self-test submit error, disabling\n");
877 err = -ENODEV;
878 goto free_resources;
879 }
880
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700881 mv_xor_issue_pending(dma_chan);
882 async_tx_ack(tx);
883 msleep(8);
884
Linus Walleij07934482010-03-26 16:50:49 -0700885 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530886 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100887 dev_err(dma_chan->device->dev,
888 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700889 err = -ENODEV;
890 goto free_resources;
891 }
892
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100893 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700894 PAGE_SIZE, DMA_FROM_DEVICE);
895 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
896 u32 *ptr = page_address(dest);
897 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100898 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -0700899 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
900 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700901 err = -ENODEV;
902 goto free_resources;
903 }
904 }
905
906free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300907 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700908 mv_xor_free_chan_resources(dma_chan);
909out:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300910 src_idx = src_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700911 while (src_idx--)
912 __free_page(xor_srcs[src_idx]);
913 __free_page(dest);
914 return err;
915}
916
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100917static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700918{
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700919 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100920 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700921
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100922 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700923
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100924 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100925 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Lior Amsalem22843542014-08-27 10:52:55 -0300926 dma_unmap_single(dev, mv_chan->dummy_src_addr,
927 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
928 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
929 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700930
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100931 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +0100932 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700933 list_del(&chan->device_node);
934 }
935
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +0100936 free_irq(mv_chan->irq, mv_chan);
937
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700938 return 0;
939}
940
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100941static struct mv_xor_chan *
Thomas Petazzoni297eedb2012-11-15 15:29:53 +0100942mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +0100943 struct platform_device *pdev,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +0200944 int idx, dma_cap_mask_t cap_mask, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700945{
946 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700947 struct mv_xor_chan *mv_chan;
948 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700949
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100950 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +0530951 if (!mv_chan)
952 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700953
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +0100954 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +0100955 mv_chan->irq = irq;
Gregory CLEMENTdd130c62016-04-29 09:49:06 +0200956 if (xordev->xor_type == XOR_ORION)
957 mv_chan->op_in_desc = XOR_MODE_IN_REG;
958 else
959 mv_chan->op_in_desc = XOR_MODE_IN_DESC;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700960
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100961 dma_dev = &mv_chan->dmadev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700962
Lior Amsalem22843542014-08-27 10:52:55 -0300963 /*
964 * These source and destination dummy buffers are used to implement
965 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
966 * Hence, we only need to map the buffers at initialization-time.
967 */
968 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
969 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
970 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
971 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
972
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700973 /* allocate coherent memory for hardware descriptors
974 * note: writecombine gives slightly better performance, but
975 * requires that we explicitly flush the writes
976 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100977 mv_chan->dma_desc_pool_virt =
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800978 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
979 GFP_KERNEL);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100980 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +0100981 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700982
983 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +0100984 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700985
986 INIT_LIST_HEAD(&dma_dev->channels);
987
988 /* set base routines */
989 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
990 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -0700991 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700992 dma_dev->device_issue_pending = mv_xor_issue_pending;
993 dma_dev->dev = &pdev->dev;
994
995 /* set prep routines based on capability */
Lior Amsalem22843542014-08-27 10:52:55 -0300996 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
997 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700998 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
999 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001000 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001001 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001002 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1003 }
1004
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001005 mv_chan->mmr_base = xordev->xor_base;
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001006 mv_chan->mmr_high_base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001007 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1008 mv_chan);
1009
1010 /* clear errors before enabling interrupts */
Maxime Ripard0951e722015-05-26 15:07:33 +02001011 mv_chan_clear_err_status(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001012
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001013 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1014 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001015 if (ret)
1016 goto err_free_dma;
1017
1018 mv_chan_unmask_interrupts(mv_chan);
1019
Lior Amsalem6f166312015-05-26 15:07:34 +02001020 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
Thomas Petazzoni81aafb32015-12-22 11:43:28 +01001021 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
Lior Amsalem6f166312015-05-26 15:07:34 +02001022 else
Thomas Petazzoni81aafb32015-12-22 11:43:28 +01001023 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001024
1025 spin_lock_init(&mv_chan->lock);
1026 INIT_LIST_HEAD(&mv_chan->chain);
1027 INIT_LIST_HEAD(&mv_chan->completed_slots);
Lior Amsalemfbea28a2015-05-26 15:07:36 +02001028 INIT_LIST_HEAD(&mv_chan->free_slots);
1029 INIT_LIST_HEAD(&mv_chan->allocated_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001030 mv_chan->dmachan.device = dma_dev;
1031 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001032
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001033 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001034
1035 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001036 ret = mv_chan_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001037 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1038 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001039 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001040 }
1041
1042 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001043 ret = mv_chan_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001044 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1045 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001046 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001047 }
1048
Lior Amsalem6f166312015-05-26 15:07:34 +02001049 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
1050 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
Joe Perches1ba151c2012-10-28 01:05:44 -07001051 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001052 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1053 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001054
1055 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001056 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001057
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001058err_free_irq:
1059 free_irq(mv_chan->irq, mv_chan);
Stefan Roesea4a1e53d2016-06-01 12:43:31 +02001060err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001061 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001062 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001063 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001064}
1065
1066static void
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001067mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001068 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001069{
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001070 void __iomem *base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001071 u32 win_enable = 0;
1072 int i;
1073
1074 for (i = 0; i < 8; i++) {
1075 writel(0, base + WINDOW_BASE(i));
1076 writel(0, base + WINDOW_SIZE(i));
1077 if (i < 4)
1078 writel(0, base + WINDOW_REMAP_HIGH(i));
1079 }
1080
1081 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001082 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001083
1084 writel((cs->base & 0xffff0000) |
1085 (cs->mbus_attr << 8) |
1086 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1087 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1088
1089 win_enable |= (1 << i);
1090 win_enable |= 3 << (16 + (2 * i));
1091 }
1092
1093 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1094 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001095 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1096 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001097}
1098
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001099static void
1100mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1101{
1102 void __iomem *base = xordev->xor_high_base;
1103 u32 win_enable = 0;
1104 int i;
1105
1106 for (i = 0; i < 8; i++) {
1107 writel(0, base + WINDOW_BASE(i));
1108 writel(0, base + WINDOW_SIZE(i));
1109 if (i < 4)
1110 writel(0, base + WINDOW_REMAP_HIGH(i));
1111 }
1112 /*
1113 * For Armada3700 open default 4GB Mbus window. The dram
1114 * related configuration are done at AXIS level.
1115 */
1116 writel(0xffff0000, base + WINDOW_SIZE(0));
1117 win_enable |= 1;
1118 win_enable |= 3 << 16;
1119
1120 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1121 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1122 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1123 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1124}
1125
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001126/*
1127 * Since this XOR driver is basically used only for RAID5, we don't
1128 * need to care about synchronizing ->suspend with DMA activity,
1129 * because the DMA engine will naturally be quiet due to the block
1130 * devices being suspended.
1131 */
1132static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1133{
1134 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1135 int i;
1136
1137 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1138 struct mv_xor_chan *mv_chan = xordev->channels[i];
1139
1140 if (!mv_chan)
1141 continue;
1142
1143 mv_chan->saved_config_reg =
1144 readl_relaxed(XOR_CONFIG(mv_chan));
1145 mv_chan->saved_int_mask_reg =
1146 readl_relaxed(XOR_INTR_MASK(mv_chan));
1147 }
1148
1149 return 0;
1150}
1151
1152static int mv_xor_resume(struct platform_device *dev)
1153{
1154 struct mv_xor_device *xordev = platform_get_drvdata(dev);
1155 const struct mbus_dram_target_info *dram;
1156 int i;
1157
1158 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1159 struct mv_xor_chan *mv_chan = xordev->channels[i];
1160
1161 if (!mv_chan)
1162 continue;
1163
1164 writel_relaxed(mv_chan->saved_config_reg,
1165 XOR_CONFIG(mv_chan));
1166 writel_relaxed(mv_chan->saved_int_mask_reg,
1167 XOR_INTR_MASK(mv_chan));
1168 }
1169
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001170 if (xordev->xor_type == XOR_ARMADA_37XX) {
1171 mv_xor_conf_mbus_windows_a3700(xordev);
1172 return 0;
1173 }
1174
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001175 dram = mv_mbus_dram_info();
1176 if (dram)
1177 mv_xor_conf_mbus_windows(xordev, dram);
1178
1179 return 0;
1180}
1181
Lior Amsalem6f166312015-05-26 15:07:34 +02001182static const struct of_device_id mv_xor_dt_ids[] = {
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001183 { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1184 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001185 { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
Lior Amsalem6f166312015-05-26 15:07:34 +02001186 {},
1187};
Lior Amsalem6f166312015-05-26 15:07:34 +02001188
Thomas Petazzoni77757292015-07-08 16:28:19 +02001189static unsigned int mv_xor_engine_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001190
Linus Torvaldsc2714332012-12-14 14:54:26 -08001191static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001192{
Andrew Lunn63a93322011-12-07 21:48:07 +01001193 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001194 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001195 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001196 struct resource *res;
Thomas Petazzoni77757292015-07-08 16:28:19 +02001197 unsigned int max_engines, max_channels;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001198 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001199
Joe Perches1ba151c2012-10-28 01:05:44 -07001200 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001201
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001202 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1203 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001204 return -ENOMEM;
1205
1206 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1207 if (!res)
1208 return -ENODEV;
1209
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001210 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1211 resource_size(res));
1212 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001213 return -EBUSY;
1214
1215 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1216 if (!res)
1217 return -ENODEV;
1218
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001219 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1220 resource_size(res));
1221 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001222 return -EBUSY;
1223
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001224 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001225
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001226
1227 /*
1228 * We need to know which type of XOR device we use before
1229 * setting up. In non-dt case it can only be the legacy one.
1230 */
1231 xordev->xor_type = XOR_ORION;
1232 if (pdev->dev.of_node) {
1233 const struct of_device_id *of_id =
1234 of_match_device(mv_xor_dt_ids,
1235 &pdev->dev);
1236
1237 xordev->xor_type = (uintptr_t)of_id->data;
1238 }
1239
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001240 /*
1241 * (Re-)program MBUS remapping windows if we are asked to.
1242 */
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001243 if (xordev->xor_type == XOR_ARMADA_37XX) {
1244 mv_xor_conf_mbus_windows_a3700(xordev);
1245 } else {
1246 dram = mv_mbus_dram_info();
1247 if (dram)
1248 mv_xor_conf_mbus_windows(xordev, dram);
1249 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001250
Andrew Lunnc5101822012-02-19 13:30:26 +01001251 /* Not all platforms can gate the clock, so it is not
1252 * an error if the clock does not exists.
1253 */
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001254 xordev->clk = clk_get(&pdev->dev, NULL);
1255 if (!IS_ERR(xordev->clk))
1256 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001257
Thomas Petazzoni77757292015-07-08 16:28:19 +02001258 /*
1259 * We don't want to have more than one channel per CPU in
1260 * order for async_tx to perform well. So we limit the number
1261 * of engines and channels so that we take into account this
1262 * constraint. Note that we also want to use channels from
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001263 * separate engines when possible. For dual-CPU Armada 3700
1264 * SoC with single XOR engine allow using its both channels.
Thomas Petazzoni77757292015-07-08 16:28:19 +02001265 */
1266 max_engines = num_present_cpus();
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001267 if (xordev->xor_type == XOR_ARMADA_37XX)
1268 max_channels = num_present_cpus();
1269 else
1270 max_channels = min_t(unsigned int,
1271 MV_XOR_MAX_CHANNELS,
1272 DIV_ROUND_UP(num_present_cpus(), 2));
Thomas Petazzoni77757292015-07-08 16:28:19 +02001273
1274 if (mv_xor_engine_count >= max_engines)
1275 return 0;
1276
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001277 if (pdev->dev.of_node) {
1278 struct device_node *np;
1279 int i = 0;
1280
1281 for_each_child_of_node(pdev->dev.of_node, np) {
Russell King0be82532013-12-12 23:59:08 +00001282 struct mv_xor_chan *chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001283 dma_cap_mask_t cap_mask;
1284 int irq;
1285
Thomas Petazzoni77757292015-07-08 16:28:19 +02001286 if (i >= max_channels)
1287 continue;
1288
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001289 dma_cap_zero(cap_mask);
Thomas Petazzoni6d8f7ab2015-07-08 16:28:16 +02001290 dma_cap_set(DMA_MEMCPY, cap_mask);
1291 dma_cap_set(DMA_XOR, cap_mask);
1292 dma_cap_set(DMA_INTERRUPT, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001293
1294 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001295 if (!irq) {
1296 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001297 goto err_channel_add;
1298 }
1299
Russell King0be82532013-12-12 23:59:08 +00001300 chan = mv_xor_channel_add(xordev, pdev, i,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001301 cap_mask, irq);
Russell King0be82532013-12-12 23:59:08 +00001302 if (IS_ERR(chan)) {
1303 ret = PTR_ERR(chan);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001304 irq_dispose_mapping(irq);
1305 goto err_channel_add;
1306 }
1307
Russell King0be82532013-12-12 23:59:08 +00001308 xordev->channels[i] = chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001309 i++;
1310 }
1311 } else if (pdata && pdata->channels) {
Thomas Petazzoni77757292015-07-08 16:28:19 +02001312 for (i = 0; i < max_channels; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001313 struct mv_xor_channel_data *cd;
Russell King0be82532013-12-12 23:59:08 +00001314 struct mv_xor_chan *chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001315 int irq;
1316
1317 cd = &pdata->channels[i];
1318 if (!cd) {
1319 ret = -ENODEV;
1320 goto err_channel_add;
1321 }
1322
1323 irq = platform_get_irq(pdev, i);
1324 if (irq < 0) {
1325 ret = irq;
1326 goto err_channel_add;
1327 }
1328
Russell King0be82532013-12-12 23:59:08 +00001329 chan = mv_xor_channel_add(xordev, pdev, i,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001330 cd->cap_mask, irq);
Russell King0be82532013-12-12 23:59:08 +00001331 if (IS_ERR(chan)) {
1332 ret = PTR_ERR(chan);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001333 goto err_channel_add;
1334 }
Russell King0be82532013-12-12 23:59:08 +00001335
1336 xordev->channels[i] = chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001337 }
1338 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001339
1340 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001341
1342err_channel_add:
1343 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001344 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001345 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001346 if (pdev->dev.of_node)
1347 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001348 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001349
Thomas Petazzonidab92062013-01-06 11:10:44 +01001350 if (!IS_ERR(xordev->clk)) {
1351 clk_disable_unprepare(xordev->clk);
1352 clk_put(xordev->clk);
1353 }
1354
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001355 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001356}
1357
Thomas Petazzoni61971652012-10-30 12:05:40 +01001358static struct platform_driver mv_xor_driver = {
1359 .probe = mv_xor_probe,
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001360 .suspend = mv_xor_suspend,
1361 .resume = mv_xor_resume,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001362 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001363 .name = MV_XOR_NAME,
1364 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001365 },
1366};
1367
1368
1369static int __init mv_xor_init(void)
1370{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001371 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001372}
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001373device_initcall(mv_xor_init);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001374
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001375/*
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001376MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1377MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1378MODULE_LICENSE("GPL");
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001379*/