blob: 7c3876838032e7b013d93592d3ea8e9a3f7ff73e [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010028#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010029#include <linux/of.h>
30#include <linux/of_irq.h>
31#include <linux/irqdomain.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020032#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000033
34#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070035#include "mv_xor.h"
36
37static void mv_xor_issue_pending(struct dma_chan *chan);
38
39#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010040 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070041
42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44
Thomas Petazzonic98c1782012-11-15 14:17:18 +010045#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010046 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010047
Saeed Bisharaff7b0472008-07-08 11:58:36 -070048static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
49{
50 struct mv_xor_desc *hw_desc = desc->hw_desc;
51
52 hw_desc->status = (1 << 31);
53 hw_desc->phy_next_desc = 0;
54 hw_desc->desc_command = (1 << 31);
55}
56
Saeed Bisharaff7b0472008-07-08 11:58:36 -070057static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
58 u32 byte_count)
59{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61 hw_desc->byte_count = byte_count;
62}
63
64static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
65 u32 next_desc_addr)
66{
67 struct mv_xor_desc *hw_desc = desc->hw_desc;
68 BUG_ON(hw_desc->phy_next_desc);
69 hw_desc->phy_next_desc = next_desc_addr;
70}
71
72static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
73{
74 struct mv_xor_desc *hw_desc = desc->hw_desc;
75 hw_desc->phy_next_desc = 0;
76}
77
Saeed Bisharaff7b0472008-07-08 11:58:36 -070078static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
79 dma_addr_t addr)
80{
81 struct mv_xor_desc *hw_desc = desc->hw_desc;
82 hw_desc->phy_dest_addr = addr;
83}
84
85static int mv_chan_memset_slot_count(size_t len)
86{
87 return 1;
88}
89
90#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
91
92static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
93 int index, dma_addr_t addr)
94{
95 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +020096 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070097 if (desc->type == DMA_XOR)
98 hw_desc->desc_command |= (1 << index);
99}
100
101static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
102{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200103 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700104}
105
106static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
107 u32 next_desc_addr)
108{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200109 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700110}
111
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700112static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
113{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200114 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700115 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200116 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700117}
118
119static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
120{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200121 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700122 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
123 return intr_cause;
124}
125
126static int mv_is_err_intr(u32 intr_cause)
127{
128 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
129 return 1;
130
131 return 0;
132}
133
134static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
135{
Simon Guinot86363682010-09-17 23:33:51 +0200136 u32 val = ~(1 << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100137 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200138 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700139}
140
141static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
142{
143 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200144 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700145}
146
147static int mv_can_chain(struct mv_xor_desc_slot *desc)
148{
149 struct mv_xor_desc_slot *chain_old_tail = list_entry(
150 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
151
152 if (chain_old_tail->type != desc->type)
153 return 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700154
155 return 1;
156}
157
158static void mv_set_mode(struct mv_xor_chan *chan,
159 enum dma_transaction_type type)
160{
161 u32 op_mode;
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200162 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700163
164 switch (type) {
165 case DMA_XOR:
166 op_mode = XOR_OPERATION_MODE_XOR;
167 break;
168 case DMA_MEMCPY:
169 op_mode = XOR_OPERATION_MODE_MEMCPY;
170 break;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700171 default:
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100172 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700173 "error: unsupported operation %d\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100174 type);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700175 BUG();
176 return;
177 }
178
179 config &= ~0x7;
180 config |= op_mode;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200181
182#if defined(__BIG_ENDIAN)
183 config |= XOR_DESCRIPTOR_SWAP;
184#else
185 config &= ~XOR_DESCRIPTOR_SWAP;
186#endif
187
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200188 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700189 chan->current_type = type;
190}
191
192static void mv_chan_activate(struct mv_xor_chan *chan)
193{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100194 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Ezequiel Garcia5a9a55b2014-05-21 14:02:35 -0700195
196 /* writel ensures all descriptors are flushed before activation */
197 writel(BIT(0), XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700198}
199
200static char mv_chan_is_busy(struct mv_xor_chan *chan)
201{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200202 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700203
204 state = (state >> 4) & 0x3;
205
206 return (state == 1) ? 1 : 0;
207}
208
209static int mv_chan_xor_slot_count(size_t len, int src_cnt)
210{
211 return 1;
212}
213
214/**
215 * mv_xor_free_slots - flags descriptor slots for reuse
216 * @slot: Slot to free
217 * Caller must hold &mv_chan->lock while calling this function
218 */
219static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
220 struct mv_xor_desc_slot *slot)
221{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100222 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700223 __func__, __LINE__, slot);
224
225 slot->slots_per_op = 0;
226
227}
228
229/*
230 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
231 * sw_desc
232 * Caller must hold &mv_chan->lock while calling this function
233 */
234static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
235 struct mv_xor_desc_slot *sw_desc)
236{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100237 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700238 __func__, __LINE__, sw_desc);
239 if (sw_desc->type != mv_chan->current_type)
240 mv_set_mode(mv_chan, sw_desc->type);
241
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700242 /* set the hardware chain */
243 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
244
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700245 mv_chan->pending += sw_desc->slot_cnt;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100246 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700247}
248
249static dma_cookie_t
250mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
251 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
252{
253 BUG_ON(desc->async_tx.cookie < 0);
254
255 if (desc->async_tx.cookie > 0) {
256 cookie = desc->async_tx.cookie;
257
258 /* call the callback (must not sleep or submit new
259 * operations to this channel)
260 */
261 if (desc->async_tx.callback)
262 desc->async_tx.callback(
263 desc->async_tx.callback_param);
264
Dan Williamsd38a8c62013-10-18 19:35:23 +0200265 dma_descriptor_unmap(&desc->async_tx);
Bartlomiej Zolnierkiewicz54f8d502013-10-18 19:35:32 +0200266 if (desc->group_head)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700267 desc->group_head = NULL;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700268 }
269
270 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700271 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700272
273 return cookie;
274}
275
276static int
277mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
278{
279 struct mv_xor_desc_slot *iter, *_iter;
280
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100281 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700282 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
283 completed_node) {
284
285 if (async_tx_test_ack(&iter->async_tx)) {
286 list_del(&iter->completed_node);
287 mv_xor_free_slots(mv_chan, iter);
288 }
289 }
290 return 0;
291}
292
293static int
294mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
295 struct mv_xor_chan *mv_chan)
296{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100297 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700298 __func__, __LINE__, desc, desc->async_tx.flags);
299 list_del(&desc->chain_node);
300 /* the client is allowed to attach dependent operations
301 * until 'ack' is set
302 */
303 if (!async_tx_test_ack(&desc->async_tx)) {
304 /* move this slot to the completed_slots */
305 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
306 return 0;
307 }
308
309 mv_xor_free_slots(mv_chan, desc);
310 return 0;
311}
312
313static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
314{
315 struct mv_xor_desc_slot *iter, *_iter;
316 dma_cookie_t cookie = 0;
317 int busy = mv_chan_is_busy(mv_chan);
318 u32 current_desc = mv_chan_get_current_desc(mv_chan);
319 int seen_current = 0;
320
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100321 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
322 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700323 mv_xor_clean_completed_slots(mv_chan);
324
325 /* free completed slots from the chain starting with
326 * the oldest descriptor
327 */
328
329 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
330 chain_node) {
331 prefetch(_iter);
332 prefetch(&_iter->async_tx);
333
334 /* do not advance past the current descriptor loaded into the
335 * hardware channel, subsequent descriptors are either in
336 * process or have not been submitted
337 */
338 if (seen_current)
339 break;
340
341 /* stop the search if we reach the current descriptor and the
342 * channel is busy
343 */
344 if (iter->async_tx.phys == current_desc) {
345 seen_current = 1;
346 if (busy)
347 break;
348 }
349
350 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
351
352 if (mv_xor_clean_slot(iter, mv_chan))
353 break;
354 }
355
356 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
357 struct mv_xor_desc_slot *chain_head;
358 chain_head = list_entry(mv_chan->chain.next,
359 struct mv_xor_desc_slot,
360 chain_node);
361
362 mv_xor_start_new_chain(mv_chan, chain_head);
363 }
364
365 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100366 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700367}
368
369static void
370mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
371{
372 spin_lock_bh(&mv_chan->lock);
373 __mv_xor_slot_cleanup(mv_chan);
374 spin_unlock_bh(&mv_chan->lock);
375}
376
377static void mv_xor_tasklet(unsigned long data)
378{
379 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Saeed Bishara8333f652010-12-21 16:53:39 +0200380 mv_xor_slot_cleanup(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700381}
382
383static struct mv_xor_desc_slot *
384mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
385 int slots_per_op)
386{
387 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
388 LIST_HEAD(chain);
389 int slots_found, retry = 0;
390
391 /* start search from the last allocated descrtiptor
392 * if a contiguous allocation can not be found start searching
393 * from the beginning of the list
394 */
395retry:
396 slots_found = 0;
397 if (retry == 0)
398 iter = mv_chan->last_used;
399 else
400 iter = list_entry(&mv_chan->all_slots,
401 struct mv_xor_desc_slot,
402 slot_node);
403
404 list_for_each_entry_safe_continue(
405 iter, _iter, &mv_chan->all_slots, slot_node) {
406 prefetch(_iter);
407 prefetch(&_iter->async_tx);
408 if (iter->slots_per_op) {
409 /* give up after finding the first busy slot
410 * on the second pass through the list
411 */
412 if (retry)
413 break;
414
415 slots_found = 0;
416 continue;
417 }
418
419 /* start the allocation if the slot is correctly aligned */
420 if (!slots_found++)
421 alloc_start = iter;
422
423 if (slots_found == num_slots) {
424 struct mv_xor_desc_slot *alloc_tail = NULL;
425 struct mv_xor_desc_slot *last_used = NULL;
426 iter = alloc_start;
427 while (num_slots) {
428 int i;
429
430 /* pre-ack all but the last descriptor */
431 async_tx_ack(&iter->async_tx);
432
433 list_add_tail(&iter->chain_node, &chain);
434 alloc_tail = iter;
435 iter->async_tx.cookie = 0;
436 iter->slot_cnt = num_slots;
437 iter->xor_check_result = NULL;
438 for (i = 0; i < slots_per_op; i++) {
439 iter->slots_per_op = slots_per_op - i;
440 last_used = iter;
441 iter = list_entry(iter->slot_node.next,
442 struct mv_xor_desc_slot,
443 slot_node);
444 }
445 num_slots -= slots_per_op;
446 }
447 alloc_tail->group_head = alloc_start;
448 alloc_tail->async_tx.cookie = -EBUSY;
Dan Williams64203b62009-09-08 17:53:03 -0700449 list_splice(&chain, &alloc_tail->tx_list);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700450 mv_chan->last_used = last_used;
451 mv_desc_clear_next_desc(alloc_start);
452 mv_desc_clear_next_desc(alloc_tail);
453 return alloc_tail;
454 }
455 }
456 if (!retry++)
457 goto retry;
458
459 /* try to free some slots if the allocation fails */
460 tasklet_schedule(&mv_chan->irq_tasklet);
461
462 return NULL;
463}
464
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700465/************************ DMA engine API functions ****************************/
466static dma_cookie_t
467mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
468{
469 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
470 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
471 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
472 dma_cookie_t cookie;
473 int new_hw_chain = 1;
474
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100475 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700476 "%s sw_desc %p: async_tx %p\n",
477 __func__, sw_desc, &sw_desc->async_tx);
478
479 grp_start = sw_desc->group_head;
480
481 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000482 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700483
484 if (list_empty(&mv_chan->chain))
Dan Williams64203b62009-09-08 17:53:03 -0700485 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700486 else {
487 new_hw_chain = 0;
488
489 old_chain_tail = list_entry(mv_chan->chain.prev,
490 struct mv_xor_desc_slot,
491 chain_node);
Dan Williams64203b62009-09-08 17:53:03 -0700492 list_splice_init(&grp_start->tx_list,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700493 &old_chain_tail->chain_node);
494
495 if (!mv_can_chain(grp_start))
496 goto submit_done;
497
Olof Johansson31fd8f52014-02-03 17:13:23 -0800498 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
499 &old_chain_tail->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700500
501 /* fix up the hardware chain */
502 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
503
504 /* if the channel is not busy */
505 if (!mv_chan_is_busy(mv_chan)) {
506 u32 current_desc = mv_chan_get_current_desc(mv_chan);
507 /*
508 * and the curren desc is the end of the chain before
509 * the append, then we need to start the channel
510 */
511 if (current_desc == old_chain_tail->async_tx.phys)
512 new_hw_chain = 1;
513 }
514 }
515
516 if (new_hw_chain)
517 mv_xor_start_new_chain(mv_chan, grp_start);
518
519submit_done:
520 spin_unlock_bh(&mv_chan->lock);
521
522 return cookie;
523}
524
525/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700526static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700527{
Olof Johansson31fd8f52014-02-03 17:13:23 -0800528 void *virt_desc;
529 dma_addr_t dma_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700530 int idx;
531 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
532 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100533 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700534
535 /* Allocate descriptor slots */
536 idx = mv_chan->slots_allocated;
537 while (idx < num_descs_in_pool) {
538 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
539 if (!slot) {
Ezequiel Garciab8291dd2014-08-27 10:52:49 -0300540 dev_info(mv_chan_to_devp(mv_chan),
541 "channel only initialized %d descriptor slots",
542 idx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700543 break;
544 }
Olof Johansson31fd8f52014-02-03 17:13:23 -0800545 virt_desc = mv_chan->dma_desc_pool_virt;
546 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700547
548 dma_async_tx_descriptor_init(&slot->async_tx, chan);
549 slot->async_tx.tx_submit = mv_xor_tx_submit;
550 INIT_LIST_HEAD(&slot->chain_node);
551 INIT_LIST_HEAD(&slot->slot_node);
Dan Williams64203b62009-09-08 17:53:03 -0700552 INIT_LIST_HEAD(&slot->tx_list);
Olof Johansson31fd8f52014-02-03 17:13:23 -0800553 dma_desc = mv_chan->dma_desc_pool;
554 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700555 slot->idx = idx++;
556
557 spin_lock_bh(&mv_chan->lock);
558 mv_chan->slots_allocated = idx;
559 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
560 spin_unlock_bh(&mv_chan->lock);
561 }
562
563 if (mv_chan->slots_allocated && !mv_chan->last_used)
564 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
565 struct mv_xor_desc_slot,
566 slot_node);
567
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100568 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700569 "allocated %d descriptor slots last_used: %p\n",
570 mv_chan->slots_allocated, mv_chan->last_used);
571
572 return mv_chan->slots_allocated ? : -ENOMEM;
573}
574
575static struct dma_async_tx_descriptor *
576mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
577 size_t len, unsigned long flags)
578{
579 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
580 struct mv_xor_desc_slot *sw_desc, *grp_start;
581 int slot_cnt;
582
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100583 dev_dbg(mv_chan_to_devp(mv_chan),
Olof Johansson31fd8f52014-02-03 17:13:23 -0800584 "%s dest: %pad src %pad len: %u flags: %ld\n",
585 __func__, &dest, &src, len, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700586 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
587 return NULL;
588
Coly Li7912d302011-03-27 01:26:53 +0800589 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700590
591 spin_lock_bh(&mv_chan->lock);
592 slot_cnt = mv_chan_memcpy_slot_count(len);
593 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
594 if (sw_desc) {
595 sw_desc->type = DMA_MEMCPY;
596 sw_desc->async_tx.flags = flags;
597 grp_start = sw_desc->group_head;
598 mv_desc_init(grp_start, flags);
599 mv_desc_set_byte_count(grp_start, len);
600 mv_desc_set_dest_addr(sw_desc->group_head, dest);
601 mv_desc_set_src_addr(grp_start, 0, src);
602 sw_desc->unmap_src_cnt = 1;
603 sw_desc->unmap_len = len;
604 }
605 spin_unlock_bh(&mv_chan->lock);
606
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100607 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700608 "%s sw_desc %p async_tx %p\n",
Jingoo Han4c143722013-08-06 19:37:08 +0900609 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700610
611 return sw_desc ? &sw_desc->async_tx : NULL;
612}
613
614static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700615mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
616 unsigned int src_cnt, size_t len, unsigned long flags)
617{
618 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
619 struct mv_xor_desc_slot *sw_desc, *grp_start;
620 int slot_cnt;
621
622 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
623 return NULL;
624
Coly Li7912d302011-03-27 01:26:53 +0800625 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700626
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100627 dev_dbg(mv_chan_to_devp(mv_chan),
Olof Johansson31fd8f52014-02-03 17:13:23 -0800628 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
629 __func__, src_cnt, len, &dest, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700630
631 spin_lock_bh(&mv_chan->lock);
632 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
633 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
634 if (sw_desc) {
635 sw_desc->type = DMA_XOR;
636 sw_desc->async_tx.flags = flags;
637 grp_start = sw_desc->group_head;
638 mv_desc_init(grp_start, flags);
639 /* the byte count field is the same as in memcpy desc*/
640 mv_desc_set_byte_count(grp_start, len);
641 mv_desc_set_dest_addr(sw_desc->group_head, dest);
642 sw_desc->unmap_src_cnt = src_cnt;
643 sw_desc->unmap_len = len;
644 while (src_cnt--)
645 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
646 }
647 spin_unlock_bh(&mv_chan->lock);
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100648 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700649 "%s sw_desc %p async_tx %p \n",
650 __func__, sw_desc, &sw_desc->async_tx);
651 return sw_desc ? &sw_desc->async_tx : NULL;
652}
653
654static void mv_xor_free_chan_resources(struct dma_chan *chan)
655{
656 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
657 struct mv_xor_desc_slot *iter, *_iter;
658 int in_use_descs = 0;
659
660 mv_xor_slot_cleanup(mv_chan);
661
662 spin_lock_bh(&mv_chan->lock);
663 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
664 chain_node) {
665 in_use_descs++;
666 list_del(&iter->chain_node);
667 }
668 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
669 completed_node) {
670 in_use_descs++;
671 list_del(&iter->completed_node);
672 }
673 list_for_each_entry_safe_reverse(
674 iter, _iter, &mv_chan->all_slots, slot_node) {
675 list_del(&iter->slot_node);
676 kfree(iter);
677 mv_chan->slots_allocated--;
678 }
679 mv_chan->last_used = NULL;
680
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100681 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700682 __func__, mv_chan->slots_allocated);
683 spin_unlock_bh(&mv_chan->lock);
684
685 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100686 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700687 "freeing %d in use descriptors!\n", in_use_descs);
688}
689
690/**
Linus Walleij07934482010-03-26 16:50:49 -0700691 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700692 * @chan: XOR channel handle
693 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700694 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700695 */
Linus Walleij07934482010-03-26 16:50:49 -0700696static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700697 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700698 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700699{
700 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700701 enum dma_status ret;
702
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000703 ret = dma_cookie_status(chan, cookie, txstate);
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530704 if (ret == DMA_COMPLETE) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700705 mv_xor_clean_completed_slots(mv_chan);
706 return ret;
707 }
708 mv_xor_slot_cleanup(mv_chan);
709
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000710 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700711}
712
713static void mv_dump_xor_regs(struct mv_xor_chan *chan)
714{
715 u32 val;
716
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200717 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700718 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700719
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200720 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700721 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700722
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200723 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700724 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700725
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200726 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700727 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700728
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200729 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700730 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700731
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200732 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700733 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700734}
735
736static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
737 u32 intr_cause)
738{
739 if (intr_cause & (1 << 4)) {
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100740 dev_dbg(mv_chan_to_devp(chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700741 "ignore this error\n");
742 return;
743 }
744
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100745 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700746 "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100747 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700748
749 mv_dump_xor_regs(chan);
750 BUG();
751}
752
753static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
754{
755 struct mv_xor_chan *chan = data;
756 u32 intr_cause = mv_chan_get_intr_cause(chan);
757
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100758 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700759
760 if (mv_is_err_intr(intr_cause))
761 mv_xor_err_interrupt_handler(chan, intr_cause);
762
763 tasklet_schedule(&chan->irq_tasklet);
764
765 mv_xor_device_clear_eoc_cause(chan);
766
767 return IRQ_HANDLED;
768}
769
770static void mv_xor_issue_pending(struct dma_chan *chan)
771{
772 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
773
774 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
775 mv_chan->pending = 0;
776 mv_chan_activate(mv_chan);
777 }
778}
779
780/*
781 * Perform a transaction to verify the HW works.
782 */
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700783
Linus Torvaldsc2714332012-12-14 14:54:26 -0800784static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700785{
786 int i;
787 void *src, *dest;
788 dma_addr_t src_dma, dest_dma;
789 struct dma_chan *dma_chan;
790 dma_cookie_t cookie;
791 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300792 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700793 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700794
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300795 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700796 if (!src)
797 return -ENOMEM;
798
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300799 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700800 if (!dest) {
801 kfree(src);
802 return -ENOMEM;
803 }
804
805 /* Fill in src buffer */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300806 for (i = 0; i < PAGE_SIZE; i++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700807 ((u8 *) src)[i] = (u8)i;
808
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100809 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700810 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700811 err = -ENODEV;
812 goto out;
813 }
814
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300815 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
816 if (!unmap) {
817 err = -ENOMEM;
818 goto free_resources;
819 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700820
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300821 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
822 PAGE_SIZE, DMA_TO_DEVICE);
823 unmap->to_cnt = 1;
824 unmap->addr[0] = src_dma;
825
826 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
827 PAGE_SIZE, DMA_FROM_DEVICE);
828 unmap->from_cnt = 1;
829 unmap->addr[1] = dest_dma;
830
831 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700832
833 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300834 PAGE_SIZE, 0);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700835 cookie = mv_xor_tx_submit(tx);
836 mv_xor_issue_pending(dma_chan);
837 async_tx_ack(tx);
838 msleep(1);
839
Linus Walleij07934482010-03-26 16:50:49 -0700840 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530841 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100842 dev_err(dma_chan->device->dev,
843 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700844 err = -ENODEV;
845 goto free_resources;
846 }
847
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100848 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300849 PAGE_SIZE, DMA_FROM_DEVICE);
850 if (memcmp(src, dest, PAGE_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100851 dev_err(dma_chan->device->dev,
852 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700853 err = -ENODEV;
854 goto free_resources;
855 }
856
857free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300858 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700859 mv_xor_free_chan_resources(dma_chan);
860out:
861 kfree(src);
862 kfree(dest);
863 return err;
864}
865
866#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -0500867static int
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100868mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700869{
870 int i, src_idx;
871 struct page *dest;
872 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
873 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
874 dma_addr_t dest_dma;
875 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300876 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700877 struct dma_chan *dma_chan;
878 dma_cookie_t cookie;
879 u8 cmp_byte = 0;
880 u32 cmp_word;
881 int err = 0;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300882 int src_count = MV_XOR_NUM_SRC_TEST;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700883
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300884 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700885 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100886 if (!xor_srcs[src_idx]) {
887 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700888 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100889 return -ENOMEM;
890 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700891 }
892
893 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100894 if (!dest) {
895 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700896 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100897 return -ENOMEM;
898 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700899
900 /* Fill in src buffers */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300901 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700902 u8 *ptr = page_address(xor_srcs[src_idx]);
903 for (i = 0; i < PAGE_SIZE; i++)
904 ptr[i] = (1 << src_idx);
905 }
906
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300907 for (src_idx = 0; src_idx < src_count; src_idx++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700908 cmp_byte ^= (u8) (1 << src_idx);
909
910 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
911 (cmp_byte << 8) | cmp_byte;
912
913 memset(page_address(dest), 0, PAGE_SIZE);
914
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100915 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700916 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700917 err = -ENODEV;
918 goto out;
919 }
920
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300921 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
922 GFP_KERNEL);
923 if (!unmap) {
924 err = -ENOMEM;
925 goto free_resources;
926 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700927
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300928 /* test xor */
929 for (i = 0; i < src_count; i++) {
930 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
931 0, PAGE_SIZE, DMA_TO_DEVICE);
932 dma_srcs[i] = unmap->addr[i];
933 unmap->to_cnt++;
934 }
935
936 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
937 DMA_FROM_DEVICE);
938 dest_dma = unmap->addr[src_count];
939 unmap->from_cnt = 1;
940 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700941
942 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300943 src_count, PAGE_SIZE, 0);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700944
945 cookie = mv_xor_tx_submit(tx);
946 mv_xor_issue_pending(dma_chan);
947 async_tx_ack(tx);
948 msleep(8);
949
Linus Walleij07934482010-03-26 16:50:49 -0700950 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530951 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100952 dev_err(dma_chan->device->dev,
953 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700954 err = -ENODEV;
955 goto free_resources;
956 }
957
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100958 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700959 PAGE_SIZE, DMA_FROM_DEVICE);
960 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
961 u32 *ptr = page_address(dest);
962 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100963 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -0700964 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
965 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700966 err = -ENODEV;
967 goto free_resources;
968 }
969 }
970
971free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300972 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700973 mv_xor_free_chan_resources(dma_chan);
974out:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300975 src_idx = src_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700976 while (src_idx--)
977 __free_page(xor_srcs[src_idx]);
978 __free_page(dest);
979 return err;
980}
981
Andrew Lunn34c93c82012-11-18 11:44:56 +0100982/* This driver does not implement any of the optional DMA operations. */
983static int
984mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
985 unsigned long arg)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700986{
Andrew Lunn34c93c82012-11-18 11:44:56 +0100987 return -ENOSYS;
988}
989
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100990static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700991{
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700992 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100993 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700994
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100995 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700996
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100997 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100998 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700999
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001000 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001001 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001002 list_del(&chan->device_node);
1003 }
1004
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001005 free_irq(mv_chan->irq, mv_chan);
1006
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001007 return 0;
1008}
1009
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001010static struct mv_xor_chan *
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001011mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001012 struct platform_device *pdev,
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001013 int idx, dma_cap_mask_t cap_mask, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001014{
1015 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001016 struct mv_xor_chan *mv_chan;
1017 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001018
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001019 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +05301020 if (!mv_chan)
1021 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001022
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +01001023 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001024 mv_chan->irq = irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001025
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001026 dma_dev = &mv_chan->dmadev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001027
1028 /* allocate coherent memory for hardware descriptors
1029 * note: writecombine gives slightly better performance, but
1030 * requires that we explicitly flush the writes
1031 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001032 mv_chan->dma_desc_pool_virt =
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001033 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001034 &mv_chan->dma_desc_pool, GFP_KERNEL);
1035 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001036 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001037
1038 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001039 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001040
1041 INIT_LIST_HEAD(&dma_dev->channels);
1042
1043 /* set base routines */
1044 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1045 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001046 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001047 dma_dev->device_issue_pending = mv_xor_issue_pending;
Andrew Lunn34c93c82012-11-18 11:44:56 +01001048 dma_dev->device_control = mv_xor_control;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001049 dma_dev->dev = &pdev->dev;
1050
1051 /* set prep routines based on capability */
1052 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1053 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001054 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001055 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001056 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1057 }
1058
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001059 mv_chan->mmr_base = xordev->xor_base;
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001060 mv_chan->mmr_high_base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001061 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1062 mv_chan);
1063
1064 /* clear errors before enabling interrupts */
1065 mv_xor_device_clear_err_status(mv_chan);
1066
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001067 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1068 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001069 if (ret)
1070 goto err_free_dma;
1071
1072 mv_chan_unmask_interrupts(mv_chan);
1073
1074 mv_set_mode(mv_chan, DMA_MEMCPY);
1075
1076 spin_lock_init(&mv_chan->lock);
1077 INIT_LIST_HEAD(&mv_chan->chain);
1078 INIT_LIST_HEAD(&mv_chan->completed_slots);
1079 INIT_LIST_HEAD(&mv_chan->all_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001080 mv_chan->dmachan.device = dma_dev;
1081 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001082
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001083 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001084
1085 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001086 ret = mv_xor_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001087 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1088 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001089 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001090 }
1091
1092 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001093 ret = mv_xor_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001094 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1095 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001096 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001097 }
1098
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -07001099 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
Joe Perches1ba151c2012-10-28 01:05:44 -07001100 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001101 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1102 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001103
1104 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001105 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001106
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001107err_free_irq:
1108 free_irq(mv_chan->irq, mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001109 err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001110 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001111 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001112 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001113}
1114
1115static void
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001116mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001117 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001118{
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001119 void __iomem *base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001120 u32 win_enable = 0;
1121 int i;
1122
1123 for (i = 0; i < 8; i++) {
1124 writel(0, base + WINDOW_BASE(i));
1125 writel(0, base + WINDOW_SIZE(i));
1126 if (i < 4)
1127 writel(0, base + WINDOW_REMAP_HIGH(i));
1128 }
1129
1130 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001131 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001132
1133 writel((cs->base & 0xffff0000) |
1134 (cs->mbus_attr << 8) |
1135 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1136 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1137
1138 win_enable |= (1 << i);
1139 win_enable |= 3 << (16 + (2 * i));
1140 }
1141
1142 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1143 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001144 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1145 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001146}
1147
Linus Torvaldsc2714332012-12-14 14:54:26 -08001148static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001149{
Andrew Lunn63a93322011-12-07 21:48:07 +01001150 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001151 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001152 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001153 struct resource *res;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001154 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001155
Joe Perches1ba151c2012-10-28 01:05:44 -07001156 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001157
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001158 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1159 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001160 return -ENOMEM;
1161
1162 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1163 if (!res)
1164 return -ENODEV;
1165
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001166 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1167 resource_size(res));
1168 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001169 return -EBUSY;
1170
1171 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1172 if (!res)
1173 return -ENODEV;
1174
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001175 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1176 resource_size(res));
1177 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001178 return -EBUSY;
1179
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001180 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001181
1182 /*
1183 * (Re-)program MBUS remapping windows if we are asked to.
1184 */
Andrew Lunn63a93322011-12-07 21:48:07 +01001185 dram = mv_mbus_dram_info();
1186 if (dram)
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001187 mv_xor_conf_mbus_windows(xordev, dram);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001188
Andrew Lunnc5101822012-02-19 13:30:26 +01001189 /* Not all platforms can gate the clock, so it is not
1190 * an error if the clock does not exists.
1191 */
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001192 xordev->clk = clk_get(&pdev->dev, NULL);
1193 if (!IS_ERR(xordev->clk))
1194 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001195
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001196 if (pdev->dev.of_node) {
1197 struct device_node *np;
1198 int i = 0;
1199
1200 for_each_child_of_node(pdev->dev.of_node, np) {
Russell King0be82532013-12-12 23:59:08 +00001201 struct mv_xor_chan *chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001202 dma_cap_mask_t cap_mask;
1203 int irq;
1204
1205 dma_cap_zero(cap_mask);
1206 if (of_property_read_bool(np, "dmacap,memcpy"))
1207 dma_cap_set(DMA_MEMCPY, cap_mask);
1208 if (of_property_read_bool(np, "dmacap,xor"))
1209 dma_cap_set(DMA_XOR, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001210 if (of_property_read_bool(np, "dmacap,interrupt"))
1211 dma_cap_set(DMA_INTERRUPT, cap_mask);
1212
1213 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001214 if (!irq) {
1215 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001216 goto err_channel_add;
1217 }
1218
Russell King0be82532013-12-12 23:59:08 +00001219 chan = mv_xor_channel_add(xordev, pdev, i,
1220 cap_mask, irq);
1221 if (IS_ERR(chan)) {
1222 ret = PTR_ERR(chan);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001223 irq_dispose_mapping(irq);
1224 goto err_channel_add;
1225 }
1226
Russell King0be82532013-12-12 23:59:08 +00001227 xordev->channels[i] = chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001228 i++;
1229 }
1230 } else if (pdata && pdata->channels) {
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001231 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001232 struct mv_xor_channel_data *cd;
Russell King0be82532013-12-12 23:59:08 +00001233 struct mv_xor_chan *chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001234 int irq;
1235
1236 cd = &pdata->channels[i];
1237 if (!cd) {
1238 ret = -ENODEV;
1239 goto err_channel_add;
1240 }
1241
1242 irq = platform_get_irq(pdev, i);
1243 if (irq < 0) {
1244 ret = irq;
1245 goto err_channel_add;
1246 }
1247
Russell King0be82532013-12-12 23:59:08 +00001248 chan = mv_xor_channel_add(xordev, pdev, i,
1249 cd->cap_mask, irq);
1250 if (IS_ERR(chan)) {
1251 ret = PTR_ERR(chan);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001252 goto err_channel_add;
1253 }
Russell King0be82532013-12-12 23:59:08 +00001254
1255 xordev->channels[i] = chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001256 }
1257 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001258
1259 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001260
1261err_channel_add:
1262 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001263 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001264 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001265 if (pdev->dev.of_node)
1266 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001267 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001268
Thomas Petazzonidab92062013-01-06 11:10:44 +01001269 if (!IS_ERR(xordev->clk)) {
1270 clk_disable_unprepare(xordev->clk);
1271 clk_put(xordev->clk);
1272 }
1273
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001274 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001275}
1276
Linus Torvaldsc2714332012-12-14 14:54:26 -08001277static int mv_xor_remove(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001278{
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001279 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001280 int i;
Andrew Lunnc5101822012-02-19 13:30:26 +01001281
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001282 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001283 if (xordev->channels[i])
1284 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001285 }
Andrew Lunnc5101822012-02-19 13:30:26 +01001286
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001287 if (!IS_ERR(xordev->clk)) {
1288 clk_disable_unprepare(xordev->clk);
1289 clk_put(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001290 }
1291
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001292 return 0;
1293}
1294
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001295#ifdef CONFIG_OF
Linus Torvaldsc2714332012-12-14 14:54:26 -08001296static struct of_device_id mv_xor_dt_ids[] = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001297 { .compatible = "marvell,orion-xor", },
1298 {},
1299};
1300MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1301#endif
1302
Thomas Petazzoni61971652012-10-30 12:05:40 +01001303static struct platform_driver mv_xor_driver = {
1304 .probe = mv_xor_probe,
Linus Torvaldsc2714332012-12-14 14:54:26 -08001305 .remove = mv_xor_remove,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001306 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001307 .owner = THIS_MODULE,
1308 .name = MV_XOR_NAME,
1309 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001310 },
1311};
1312
1313
1314static int __init mv_xor_init(void)
1315{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001316 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001317}
1318module_init(mv_xor_init);
1319
1320/* it's currently unsafe to unload this module */
1321#if 0
1322static void __exit mv_xor_exit(void)
1323{
1324 platform_driver_unregister(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001325 return;
1326}
1327
1328module_exit(mv_xor_exit);
1329#endif
1330
1331MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1332MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1333MODULE_LICENSE("GPL");