blob: ed1ab1d0875edc0333f60d32a6b6a414b4487cd5 [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010028#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010029#include <linux/of.h>
30#include <linux/of_irq.h>
31#include <linux/irqdomain.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020032#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000033
34#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070035#include "mv_xor.h"
36
37static void mv_xor_issue_pending(struct dma_chan *chan);
38
39#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010040 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070041
42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44
Thomas Petazzonic98c1782012-11-15 14:17:18 +010045#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010046 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010047
Saeed Bisharaff7b0472008-07-08 11:58:36 -070048static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
49{
50 struct mv_xor_desc *hw_desc = desc->hw_desc;
51
52 hw_desc->status = (1 << 31);
53 hw_desc->phy_next_desc = 0;
54 hw_desc->desc_command = (1 << 31);
55}
56
57static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58{
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
61}
62
63static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
64 int src_idx)
65{
66 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +020067 return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
Saeed Bisharaff7b0472008-07-08 11:58:36 -070068}
69
70
71static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
72 u32 byte_count)
73{
74 struct mv_xor_desc *hw_desc = desc->hw_desc;
75 hw_desc->byte_count = byte_count;
76}
77
78static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
79 u32 next_desc_addr)
80{
81 struct mv_xor_desc *hw_desc = desc->hw_desc;
82 BUG_ON(hw_desc->phy_next_desc);
83 hw_desc->phy_next_desc = next_desc_addr;
84}
85
86static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
87{
88 struct mv_xor_desc *hw_desc = desc->hw_desc;
89 hw_desc->phy_next_desc = 0;
90}
91
Saeed Bisharaff7b0472008-07-08 11:58:36 -070092static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
93 dma_addr_t addr)
94{
95 struct mv_xor_desc *hw_desc = desc->hw_desc;
96 hw_desc->phy_dest_addr = addr;
97}
98
99static int mv_chan_memset_slot_count(size_t len)
100{
101 return 1;
102}
103
104#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
105
106static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
107 int index, dma_addr_t addr)
108{
109 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200110 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700111 if (desc->type == DMA_XOR)
112 hw_desc->desc_command |= (1 << index);
113}
114
115static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
116{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200117 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700118}
119
120static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
121 u32 next_desc_addr)
122{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200123 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700124}
125
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700126static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
127{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200128 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700129 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200130 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700131}
132
133static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
134{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200135 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700136 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
137 return intr_cause;
138}
139
140static int mv_is_err_intr(u32 intr_cause)
141{
142 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
143 return 1;
144
145 return 0;
146}
147
148static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
149{
Simon Guinot86363682010-09-17 23:33:51 +0200150 u32 val = ~(1 << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100151 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200152 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700153}
154
155static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
156{
157 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200158 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700159}
160
161static int mv_can_chain(struct mv_xor_desc_slot *desc)
162{
163 struct mv_xor_desc_slot *chain_old_tail = list_entry(
164 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
165
166 if (chain_old_tail->type != desc->type)
167 return 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700168
169 return 1;
170}
171
172static void mv_set_mode(struct mv_xor_chan *chan,
173 enum dma_transaction_type type)
174{
175 u32 op_mode;
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200176 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700177
178 switch (type) {
179 case DMA_XOR:
180 op_mode = XOR_OPERATION_MODE_XOR;
181 break;
182 case DMA_MEMCPY:
183 op_mode = XOR_OPERATION_MODE_MEMCPY;
184 break;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700185 default:
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100186 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700187 "error: unsupported operation %d\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100188 type);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700189 BUG();
190 return;
191 }
192
193 config &= ~0x7;
194 config |= op_mode;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200195
196#if defined(__BIG_ENDIAN)
197 config |= XOR_DESCRIPTOR_SWAP;
198#else
199 config &= ~XOR_DESCRIPTOR_SWAP;
200#endif
201
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200202 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700203 chan->current_type = type;
204}
205
206static void mv_chan_activate(struct mv_xor_chan *chan)
207{
208 u32 activation;
209
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100210 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200211 activation = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700212 activation |= 0x1;
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200213 writel_relaxed(activation, XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700214}
215
216static char mv_chan_is_busy(struct mv_xor_chan *chan)
217{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200218 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700219
220 state = (state >> 4) & 0x3;
221
222 return (state == 1) ? 1 : 0;
223}
224
225static int mv_chan_xor_slot_count(size_t len, int src_cnt)
226{
227 return 1;
228}
229
230/**
231 * mv_xor_free_slots - flags descriptor slots for reuse
232 * @slot: Slot to free
233 * Caller must hold &mv_chan->lock while calling this function
234 */
235static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
236 struct mv_xor_desc_slot *slot)
237{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700239 __func__, __LINE__, slot);
240
241 slot->slots_per_op = 0;
242
243}
244
245/*
246 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
247 * sw_desc
248 * Caller must hold &mv_chan->lock while calling this function
249 */
250static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
251 struct mv_xor_desc_slot *sw_desc)
252{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100253 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700254 __func__, __LINE__, sw_desc);
255 if (sw_desc->type != mv_chan->current_type)
256 mv_set_mode(mv_chan, sw_desc->type);
257
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700258 /* set the hardware chain */
259 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
260
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700261 mv_chan->pending += sw_desc->slot_cnt;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100262 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700263}
264
265static dma_cookie_t
266mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
267 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
268{
269 BUG_ON(desc->async_tx.cookie < 0);
270
271 if (desc->async_tx.cookie > 0) {
272 cookie = desc->async_tx.cookie;
273
274 /* call the callback (must not sleep or submit new
275 * operations to this channel)
276 */
277 if (desc->async_tx.callback)
278 desc->async_tx.callback(
279 desc->async_tx.callback_param);
280
Dan Williamsd38a8c62013-10-18 19:35:23 +0200281 dma_descriptor_unmap(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700282 /* unmap dma addresses
283 * (unmap_single vs unmap_page?)
284 */
285 if (desc->group_head && desc->unmap_len) {
286 struct mv_xor_desc_slot *unmap = desc->group_head;
Thomas Petazzoniecde6cd2012-11-15 14:37:36 +0100287 struct device *dev = mv_chan_to_devp(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700288 u32 len = unmap->unmap_len;
Dan Williamse1d181e2008-07-04 00:13:40 -0700289 enum dma_ctrl_flags flags = desc->async_tx.flags;
290 u32 src_cnt;
291 dma_addr_t addr;
Dan Williamsa06d5682008-12-08 13:46:00 -0700292 dma_addr_t dest;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700293
Dan Williamsa06d5682008-12-08 13:46:00 -0700294 src_cnt = unmap->unmap_src_cnt;
295 dest = mv_desc_get_dest_addr(unmap);
Dan Williamse1d181e2008-07-04 00:13:40 -0700296 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
Dan Williamsa06d5682008-12-08 13:46:00 -0700297 enum dma_data_direction dir;
298
299 if (src_cnt > 1) /* is xor ? */
300 dir = DMA_BIDIRECTIONAL;
301 else
302 dir = DMA_FROM_DEVICE;
303 dma_unmap_page(dev, dest, len, dir);
Dan Williamse1d181e2008-07-04 00:13:40 -0700304 }
305
306 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
Dan Williamse1d181e2008-07-04 00:13:40 -0700307 while (src_cnt--) {
308 addr = mv_desc_get_src_addr(unmap,
309 src_cnt);
Dan Williamsa06d5682008-12-08 13:46:00 -0700310 if (addr == dest)
311 continue;
Dan Williamse1d181e2008-07-04 00:13:40 -0700312 dma_unmap_page(dev, addr, len,
313 DMA_TO_DEVICE);
314 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700315 }
316 desc->group_head = NULL;
317 }
318 }
319
320 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700321 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700322
323 return cookie;
324}
325
326static int
327mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
328{
329 struct mv_xor_desc_slot *iter, *_iter;
330
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100331 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700332 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
333 completed_node) {
334
335 if (async_tx_test_ack(&iter->async_tx)) {
336 list_del(&iter->completed_node);
337 mv_xor_free_slots(mv_chan, iter);
338 }
339 }
340 return 0;
341}
342
343static int
344mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
345 struct mv_xor_chan *mv_chan)
346{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100347 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700348 __func__, __LINE__, desc, desc->async_tx.flags);
349 list_del(&desc->chain_node);
350 /* the client is allowed to attach dependent operations
351 * until 'ack' is set
352 */
353 if (!async_tx_test_ack(&desc->async_tx)) {
354 /* move this slot to the completed_slots */
355 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
356 return 0;
357 }
358
359 mv_xor_free_slots(mv_chan, desc);
360 return 0;
361}
362
363static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
364{
365 struct mv_xor_desc_slot *iter, *_iter;
366 dma_cookie_t cookie = 0;
367 int busy = mv_chan_is_busy(mv_chan);
368 u32 current_desc = mv_chan_get_current_desc(mv_chan);
369 int seen_current = 0;
370
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100371 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
372 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700373 mv_xor_clean_completed_slots(mv_chan);
374
375 /* free completed slots from the chain starting with
376 * the oldest descriptor
377 */
378
379 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
380 chain_node) {
381 prefetch(_iter);
382 prefetch(&_iter->async_tx);
383
384 /* do not advance past the current descriptor loaded into the
385 * hardware channel, subsequent descriptors are either in
386 * process or have not been submitted
387 */
388 if (seen_current)
389 break;
390
391 /* stop the search if we reach the current descriptor and the
392 * channel is busy
393 */
394 if (iter->async_tx.phys == current_desc) {
395 seen_current = 1;
396 if (busy)
397 break;
398 }
399
400 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
401
402 if (mv_xor_clean_slot(iter, mv_chan))
403 break;
404 }
405
406 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
407 struct mv_xor_desc_slot *chain_head;
408 chain_head = list_entry(mv_chan->chain.next,
409 struct mv_xor_desc_slot,
410 chain_node);
411
412 mv_xor_start_new_chain(mv_chan, chain_head);
413 }
414
415 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100416 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700417}
418
419static void
420mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
421{
422 spin_lock_bh(&mv_chan->lock);
423 __mv_xor_slot_cleanup(mv_chan);
424 spin_unlock_bh(&mv_chan->lock);
425}
426
427static void mv_xor_tasklet(unsigned long data)
428{
429 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Saeed Bishara8333f652010-12-21 16:53:39 +0200430 mv_xor_slot_cleanup(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700431}
432
433static struct mv_xor_desc_slot *
434mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
435 int slots_per_op)
436{
437 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
438 LIST_HEAD(chain);
439 int slots_found, retry = 0;
440
441 /* start search from the last allocated descrtiptor
442 * if a contiguous allocation can not be found start searching
443 * from the beginning of the list
444 */
445retry:
446 slots_found = 0;
447 if (retry == 0)
448 iter = mv_chan->last_used;
449 else
450 iter = list_entry(&mv_chan->all_slots,
451 struct mv_xor_desc_slot,
452 slot_node);
453
454 list_for_each_entry_safe_continue(
455 iter, _iter, &mv_chan->all_slots, slot_node) {
456 prefetch(_iter);
457 prefetch(&_iter->async_tx);
458 if (iter->slots_per_op) {
459 /* give up after finding the first busy slot
460 * on the second pass through the list
461 */
462 if (retry)
463 break;
464
465 slots_found = 0;
466 continue;
467 }
468
469 /* start the allocation if the slot is correctly aligned */
470 if (!slots_found++)
471 alloc_start = iter;
472
473 if (slots_found == num_slots) {
474 struct mv_xor_desc_slot *alloc_tail = NULL;
475 struct mv_xor_desc_slot *last_used = NULL;
476 iter = alloc_start;
477 while (num_slots) {
478 int i;
479
480 /* pre-ack all but the last descriptor */
481 async_tx_ack(&iter->async_tx);
482
483 list_add_tail(&iter->chain_node, &chain);
484 alloc_tail = iter;
485 iter->async_tx.cookie = 0;
486 iter->slot_cnt = num_slots;
487 iter->xor_check_result = NULL;
488 for (i = 0; i < slots_per_op; i++) {
489 iter->slots_per_op = slots_per_op - i;
490 last_used = iter;
491 iter = list_entry(iter->slot_node.next,
492 struct mv_xor_desc_slot,
493 slot_node);
494 }
495 num_slots -= slots_per_op;
496 }
497 alloc_tail->group_head = alloc_start;
498 alloc_tail->async_tx.cookie = -EBUSY;
Dan Williams64203b62009-09-08 17:53:03 -0700499 list_splice(&chain, &alloc_tail->tx_list);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700500 mv_chan->last_used = last_used;
501 mv_desc_clear_next_desc(alloc_start);
502 mv_desc_clear_next_desc(alloc_tail);
503 return alloc_tail;
504 }
505 }
506 if (!retry++)
507 goto retry;
508
509 /* try to free some slots if the allocation fails */
510 tasklet_schedule(&mv_chan->irq_tasklet);
511
512 return NULL;
513}
514
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700515/************************ DMA engine API functions ****************************/
516static dma_cookie_t
517mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
518{
519 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
520 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
521 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
522 dma_cookie_t cookie;
523 int new_hw_chain = 1;
524
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100525 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700526 "%s sw_desc %p: async_tx %p\n",
527 __func__, sw_desc, &sw_desc->async_tx);
528
529 grp_start = sw_desc->group_head;
530
531 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000532 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700533
534 if (list_empty(&mv_chan->chain))
Dan Williams64203b62009-09-08 17:53:03 -0700535 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700536 else {
537 new_hw_chain = 0;
538
539 old_chain_tail = list_entry(mv_chan->chain.prev,
540 struct mv_xor_desc_slot,
541 chain_node);
Dan Williams64203b62009-09-08 17:53:03 -0700542 list_splice_init(&grp_start->tx_list,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700543 &old_chain_tail->chain_node);
544
545 if (!mv_can_chain(grp_start))
546 goto submit_done;
547
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100548 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700549 old_chain_tail->async_tx.phys);
550
551 /* fix up the hardware chain */
552 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
553
554 /* if the channel is not busy */
555 if (!mv_chan_is_busy(mv_chan)) {
556 u32 current_desc = mv_chan_get_current_desc(mv_chan);
557 /*
558 * and the curren desc is the end of the chain before
559 * the append, then we need to start the channel
560 */
561 if (current_desc == old_chain_tail->async_tx.phys)
562 new_hw_chain = 1;
563 }
564 }
565
566 if (new_hw_chain)
567 mv_xor_start_new_chain(mv_chan, grp_start);
568
569submit_done:
570 spin_unlock_bh(&mv_chan->lock);
571
572 return cookie;
573}
574
575/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700576static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700577{
578 char *hw_desc;
579 int idx;
580 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
581 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100582 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700583
584 /* Allocate descriptor slots */
585 idx = mv_chan->slots_allocated;
586 while (idx < num_descs_in_pool) {
587 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
588 if (!slot) {
589 printk(KERN_INFO "MV XOR Channel only initialized"
590 " %d descriptor slots", idx);
591 break;
592 }
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100593 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700594 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
595
596 dma_async_tx_descriptor_init(&slot->async_tx, chan);
597 slot->async_tx.tx_submit = mv_xor_tx_submit;
598 INIT_LIST_HEAD(&slot->chain_node);
599 INIT_LIST_HEAD(&slot->slot_node);
Dan Williams64203b62009-09-08 17:53:03 -0700600 INIT_LIST_HEAD(&slot->tx_list);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100601 hw_desc = (char *) mv_chan->dma_desc_pool;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700602 slot->async_tx.phys =
603 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
604 slot->idx = idx++;
605
606 spin_lock_bh(&mv_chan->lock);
607 mv_chan->slots_allocated = idx;
608 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
609 spin_unlock_bh(&mv_chan->lock);
610 }
611
612 if (mv_chan->slots_allocated && !mv_chan->last_used)
613 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
614 struct mv_xor_desc_slot,
615 slot_node);
616
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100617 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700618 "allocated %d descriptor slots last_used: %p\n",
619 mv_chan->slots_allocated, mv_chan->last_used);
620
621 return mv_chan->slots_allocated ? : -ENOMEM;
622}
623
624static struct dma_async_tx_descriptor *
625mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
626 size_t len, unsigned long flags)
627{
628 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
629 struct mv_xor_desc_slot *sw_desc, *grp_start;
630 int slot_cnt;
631
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100632 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700633 "%s dest: %x src %x len: %u flags: %ld\n",
634 __func__, dest, src, len, flags);
635 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
636 return NULL;
637
Coly Li7912d302011-03-27 01:26:53 +0800638 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700639
640 spin_lock_bh(&mv_chan->lock);
641 slot_cnt = mv_chan_memcpy_slot_count(len);
642 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
643 if (sw_desc) {
644 sw_desc->type = DMA_MEMCPY;
645 sw_desc->async_tx.flags = flags;
646 grp_start = sw_desc->group_head;
647 mv_desc_init(grp_start, flags);
648 mv_desc_set_byte_count(grp_start, len);
649 mv_desc_set_dest_addr(sw_desc->group_head, dest);
650 mv_desc_set_src_addr(grp_start, 0, src);
651 sw_desc->unmap_src_cnt = 1;
652 sw_desc->unmap_len = len;
653 }
654 spin_unlock_bh(&mv_chan->lock);
655
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100656 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700657 "%s sw_desc %p async_tx %p\n",
Jingoo Han4c143722013-08-06 19:37:08 +0900658 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700659
660 return sw_desc ? &sw_desc->async_tx : NULL;
661}
662
663static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700664mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
665 unsigned int src_cnt, size_t len, unsigned long flags)
666{
667 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
668 struct mv_xor_desc_slot *sw_desc, *grp_start;
669 int slot_cnt;
670
671 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
672 return NULL;
673
Coly Li7912d302011-03-27 01:26:53 +0800674 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700675
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100676 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700677 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
678 __func__, src_cnt, len, dest, flags);
679
680 spin_lock_bh(&mv_chan->lock);
681 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
682 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
683 if (sw_desc) {
684 sw_desc->type = DMA_XOR;
685 sw_desc->async_tx.flags = flags;
686 grp_start = sw_desc->group_head;
687 mv_desc_init(grp_start, flags);
688 /* the byte count field is the same as in memcpy desc*/
689 mv_desc_set_byte_count(grp_start, len);
690 mv_desc_set_dest_addr(sw_desc->group_head, dest);
691 sw_desc->unmap_src_cnt = src_cnt;
692 sw_desc->unmap_len = len;
693 while (src_cnt--)
694 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
695 }
696 spin_unlock_bh(&mv_chan->lock);
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100697 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700698 "%s sw_desc %p async_tx %p \n",
699 __func__, sw_desc, &sw_desc->async_tx);
700 return sw_desc ? &sw_desc->async_tx : NULL;
701}
702
703static void mv_xor_free_chan_resources(struct dma_chan *chan)
704{
705 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
706 struct mv_xor_desc_slot *iter, *_iter;
707 int in_use_descs = 0;
708
709 mv_xor_slot_cleanup(mv_chan);
710
711 spin_lock_bh(&mv_chan->lock);
712 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
713 chain_node) {
714 in_use_descs++;
715 list_del(&iter->chain_node);
716 }
717 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
718 completed_node) {
719 in_use_descs++;
720 list_del(&iter->completed_node);
721 }
722 list_for_each_entry_safe_reverse(
723 iter, _iter, &mv_chan->all_slots, slot_node) {
724 list_del(&iter->slot_node);
725 kfree(iter);
726 mv_chan->slots_allocated--;
727 }
728 mv_chan->last_used = NULL;
729
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100730 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700731 __func__, mv_chan->slots_allocated);
732 spin_unlock_bh(&mv_chan->lock);
733
734 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100735 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700736 "freeing %d in use descriptors!\n", in_use_descs);
737}
738
739/**
Linus Walleij07934482010-03-26 16:50:49 -0700740 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700741 * @chan: XOR channel handle
742 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700743 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700744 */
Linus Walleij07934482010-03-26 16:50:49 -0700745static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700746 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700747 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700748{
749 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700750 enum dma_status ret;
751
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000752 ret = dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700753 if (ret == DMA_SUCCESS) {
754 mv_xor_clean_completed_slots(mv_chan);
755 return ret;
756 }
757 mv_xor_slot_cleanup(mv_chan);
758
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000759 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700760}
761
762static void mv_dump_xor_regs(struct mv_xor_chan *chan)
763{
764 u32 val;
765
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200766 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700767 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700768
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200769 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700770 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700771
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200772 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700773 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700774
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200775 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700776 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700777
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200778 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700779 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700780
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200781 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700782 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700783}
784
785static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
786 u32 intr_cause)
787{
788 if (intr_cause & (1 << 4)) {
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100789 dev_dbg(mv_chan_to_devp(chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700790 "ignore this error\n");
791 return;
792 }
793
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100794 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700795 "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100796 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700797
798 mv_dump_xor_regs(chan);
799 BUG();
800}
801
802static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
803{
804 struct mv_xor_chan *chan = data;
805 u32 intr_cause = mv_chan_get_intr_cause(chan);
806
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100807 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700808
809 if (mv_is_err_intr(intr_cause))
810 mv_xor_err_interrupt_handler(chan, intr_cause);
811
812 tasklet_schedule(&chan->irq_tasklet);
813
814 mv_xor_device_clear_eoc_cause(chan);
815
816 return IRQ_HANDLED;
817}
818
819static void mv_xor_issue_pending(struct dma_chan *chan)
820{
821 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
822
823 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
824 mv_chan->pending = 0;
825 mv_chan_activate(mv_chan);
826 }
827}
828
829/*
830 * Perform a transaction to verify the HW works.
831 */
832#define MV_XOR_TEST_SIZE 2000
833
Linus Torvaldsc2714332012-12-14 14:54:26 -0800834static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700835{
836 int i;
837 void *src, *dest;
838 dma_addr_t src_dma, dest_dma;
839 struct dma_chan *dma_chan;
840 dma_cookie_t cookie;
841 struct dma_async_tx_descriptor *tx;
842 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700843
844 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
845 if (!src)
846 return -ENOMEM;
847
848 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
849 if (!dest) {
850 kfree(src);
851 return -ENOMEM;
852 }
853
854 /* Fill in src buffer */
855 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
856 ((u8 *) src)[i] = (u8)i;
857
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100858 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700859 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700860 err = -ENODEV;
861 goto out;
862 }
863
864 dest_dma = dma_map_single(dma_chan->device->dev, dest,
865 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
866
867 src_dma = dma_map_single(dma_chan->device->dev, src,
868 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
869
870 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
871 MV_XOR_TEST_SIZE, 0);
872 cookie = mv_xor_tx_submit(tx);
873 mv_xor_issue_pending(dma_chan);
874 async_tx_ack(tx);
875 msleep(1);
876
Linus Walleij07934482010-03-26 16:50:49 -0700877 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700878 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100879 dev_err(dma_chan->device->dev,
880 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700881 err = -ENODEV;
882 goto free_resources;
883 }
884
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100885 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700886 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
887 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100888 dev_err(dma_chan->device->dev,
889 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700890 err = -ENODEV;
891 goto free_resources;
892 }
893
894free_resources:
895 mv_xor_free_chan_resources(dma_chan);
896out:
897 kfree(src);
898 kfree(dest);
899 return err;
900}
901
902#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -0500903static int
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100904mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700905{
906 int i, src_idx;
907 struct page *dest;
908 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
909 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
910 dma_addr_t dest_dma;
911 struct dma_async_tx_descriptor *tx;
912 struct dma_chan *dma_chan;
913 dma_cookie_t cookie;
914 u8 cmp_byte = 0;
915 u32 cmp_word;
916 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700917
918 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
919 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100920 if (!xor_srcs[src_idx]) {
921 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700922 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100923 return -ENOMEM;
924 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700925 }
926
927 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100928 if (!dest) {
929 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700930 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100931 return -ENOMEM;
932 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700933
934 /* Fill in src buffers */
935 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
936 u8 *ptr = page_address(xor_srcs[src_idx]);
937 for (i = 0; i < PAGE_SIZE; i++)
938 ptr[i] = (1 << src_idx);
939 }
940
941 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
942 cmp_byte ^= (u8) (1 << src_idx);
943
944 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
945 (cmp_byte << 8) | cmp_byte;
946
947 memset(page_address(dest), 0, PAGE_SIZE);
948
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100949 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700950 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700951 err = -ENODEV;
952 goto out;
953 }
954
955 /* test xor */
956 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
957 DMA_FROM_DEVICE);
958
959 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
960 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
961 0, PAGE_SIZE, DMA_TO_DEVICE);
962
963 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
964 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
965
966 cookie = mv_xor_tx_submit(tx);
967 mv_xor_issue_pending(dma_chan);
968 async_tx_ack(tx);
969 msleep(8);
970
Linus Walleij07934482010-03-26 16:50:49 -0700971 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700972 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100973 dev_err(dma_chan->device->dev,
974 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700975 err = -ENODEV;
976 goto free_resources;
977 }
978
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100979 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700980 PAGE_SIZE, DMA_FROM_DEVICE);
981 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
982 u32 *ptr = page_address(dest);
983 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100984 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -0700985 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
986 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700987 err = -ENODEV;
988 goto free_resources;
989 }
990 }
991
992free_resources:
993 mv_xor_free_chan_resources(dma_chan);
994out:
995 src_idx = MV_XOR_NUM_SRC_TEST;
996 while (src_idx--)
997 __free_page(xor_srcs[src_idx]);
998 __free_page(dest);
999 return err;
1000}
1001
Andrew Lunn34c93c82012-11-18 11:44:56 +01001002/* This driver does not implement any of the optional DMA operations. */
1003static int
1004mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1005 unsigned long arg)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001006{
Andrew Lunn34c93c82012-11-18 11:44:56 +01001007 return -ENOSYS;
1008}
1009
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001010static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001011{
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001012 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001013 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001014
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001015 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001016
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001017 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001018 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001019
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001020 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001021 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001022 list_del(&chan->device_node);
1023 }
1024
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001025 free_irq(mv_chan->irq, mv_chan);
1026
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001027 return 0;
1028}
1029
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001030static struct mv_xor_chan *
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001031mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001032 struct platform_device *pdev,
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001033 int idx, dma_cap_mask_t cap_mask, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001034{
1035 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001036 struct mv_xor_chan *mv_chan;
1037 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001038
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001039 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +05301040 if (!mv_chan)
1041 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001042
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +01001043 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001044 mv_chan->irq = irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001045
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001046 dma_dev = &mv_chan->dmadev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001047
1048 /* allocate coherent memory for hardware descriptors
1049 * note: writecombine gives slightly better performance, but
1050 * requires that we explicitly flush the writes
1051 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001052 mv_chan->dma_desc_pool_virt =
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001053 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001054 &mv_chan->dma_desc_pool, GFP_KERNEL);
1055 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001056 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001057
1058 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001059 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001060
1061 INIT_LIST_HEAD(&dma_dev->channels);
1062
1063 /* set base routines */
1064 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1065 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001066 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001067 dma_dev->device_issue_pending = mv_xor_issue_pending;
Andrew Lunn34c93c82012-11-18 11:44:56 +01001068 dma_dev->device_control = mv_xor_control;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001069 dma_dev->dev = &pdev->dev;
1070
1071 /* set prep routines based on capability */
1072 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1073 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001074 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001075 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001076 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1077 }
1078
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001079 mv_chan->mmr_base = xordev->xor_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001080 if (!mv_chan->mmr_base) {
1081 ret = -ENOMEM;
1082 goto err_free_dma;
1083 }
1084 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1085 mv_chan);
1086
1087 /* clear errors before enabling interrupts */
1088 mv_xor_device_clear_err_status(mv_chan);
1089
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001090 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1091 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001092 if (ret)
1093 goto err_free_dma;
1094
1095 mv_chan_unmask_interrupts(mv_chan);
1096
1097 mv_set_mode(mv_chan, DMA_MEMCPY);
1098
1099 spin_lock_init(&mv_chan->lock);
1100 INIT_LIST_HEAD(&mv_chan->chain);
1101 INIT_LIST_HEAD(&mv_chan->completed_slots);
1102 INIT_LIST_HEAD(&mv_chan->all_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001103 mv_chan->dmachan.device = dma_dev;
1104 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001105
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001106 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001107
1108 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001109 ret = mv_xor_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001110 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1111 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001112 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001113 }
1114
1115 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001116 ret = mv_xor_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001117 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1118 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001119 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001120 }
1121
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -07001122 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
Joe Perches1ba151c2012-10-28 01:05:44 -07001123 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001124 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1125 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001126
1127 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001128 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001129
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001130err_free_irq:
1131 free_irq(mv_chan->irq, mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001132 err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001133 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001134 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001135 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001136}
1137
1138static void
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001139mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001140 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001141{
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001142 void __iomem *base = xordev->xor_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001143 u32 win_enable = 0;
1144 int i;
1145
1146 for (i = 0; i < 8; i++) {
1147 writel(0, base + WINDOW_BASE(i));
1148 writel(0, base + WINDOW_SIZE(i));
1149 if (i < 4)
1150 writel(0, base + WINDOW_REMAP_HIGH(i));
1151 }
1152
1153 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001154 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001155
1156 writel((cs->base & 0xffff0000) |
1157 (cs->mbus_attr << 8) |
1158 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1159 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1160
1161 win_enable |= (1 << i);
1162 win_enable |= 3 << (16 + (2 * i));
1163 }
1164
1165 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1166 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001167 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1168 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001169}
1170
Linus Torvaldsc2714332012-12-14 14:54:26 -08001171static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001172{
Andrew Lunn63a93322011-12-07 21:48:07 +01001173 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001174 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001175 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001176 struct resource *res;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001177 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001178
Joe Perches1ba151c2012-10-28 01:05:44 -07001179 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001180
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001181 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1182 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001183 return -ENOMEM;
1184
1185 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1186 if (!res)
1187 return -ENODEV;
1188
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001189 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1190 resource_size(res));
1191 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001192 return -EBUSY;
1193
1194 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1195 if (!res)
1196 return -ENODEV;
1197
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001198 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1199 resource_size(res));
1200 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001201 return -EBUSY;
1202
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001203 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001204
1205 /*
1206 * (Re-)program MBUS remapping windows if we are asked to.
1207 */
Andrew Lunn63a93322011-12-07 21:48:07 +01001208 dram = mv_mbus_dram_info();
1209 if (dram)
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001210 mv_xor_conf_mbus_windows(xordev, dram);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001211
Andrew Lunnc5101822012-02-19 13:30:26 +01001212 /* Not all platforms can gate the clock, so it is not
1213 * an error if the clock does not exists.
1214 */
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001215 xordev->clk = clk_get(&pdev->dev, NULL);
1216 if (!IS_ERR(xordev->clk))
1217 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001218
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001219 if (pdev->dev.of_node) {
1220 struct device_node *np;
1221 int i = 0;
1222
1223 for_each_child_of_node(pdev->dev.of_node, np) {
1224 dma_cap_mask_t cap_mask;
1225 int irq;
1226
1227 dma_cap_zero(cap_mask);
1228 if (of_property_read_bool(np, "dmacap,memcpy"))
1229 dma_cap_set(DMA_MEMCPY, cap_mask);
1230 if (of_property_read_bool(np, "dmacap,xor"))
1231 dma_cap_set(DMA_XOR, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001232 if (of_property_read_bool(np, "dmacap,interrupt"))
1233 dma_cap_set(DMA_INTERRUPT, cap_mask);
1234
1235 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001236 if (!irq) {
1237 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001238 goto err_channel_add;
1239 }
1240
1241 xordev->channels[i] =
1242 mv_xor_channel_add(xordev, pdev, i,
1243 cap_mask, irq);
1244 if (IS_ERR(xordev->channels[i])) {
1245 ret = PTR_ERR(xordev->channels[i]);
Thomas Petazzoni73d9cdc2012-11-22 18:22:59 +01001246 xordev->channels[i] = NULL;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001247 irq_dispose_mapping(irq);
1248 goto err_channel_add;
1249 }
1250
1251 i++;
1252 }
1253 } else if (pdata && pdata->channels) {
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001254 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001255 struct mv_xor_channel_data *cd;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001256 int irq;
1257
1258 cd = &pdata->channels[i];
1259 if (!cd) {
1260 ret = -ENODEV;
1261 goto err_channel_add;
1262 }
1263
1264 irq = platform_get_irq(pdev, i);
1265 if (irq < 0) {
1266 ret = irq;
1267 goto err_channel_add;
1268 }
1269
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001270 xordev->channels[i] =
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +01001271 mv_xor_channel_add(xordev, pdev, i,
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001272 cd->cap_mask, irq);
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001273 if (IS_ERR(xordev->channels[i])) {
1274 ret = PTR_ERR(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001275 goto err_channel_add;
1276 }
1277 }
1278 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001279
1280 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001281
1282err_channel_add:
1283 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001284 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001285 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001286 if (pdev->dev.of_node)
1287 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001288 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001289
Thomas Petazzonidab92062013-01-06 11:10:44 +01001290 if (!IS_ERR(xordev->clk)) {
1291 clk_disable_unprepare(xordev->clk);
1292 clk_put(xordev->clk);
1293 }
1294
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001295 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001296}
1297
Linus Torvaldsc2714332012-12-14 14:54:26 -08001298static int mv_xor_remove(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001299{
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001300 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001301 int i;
Andrew Lunnc5101822012-02-19 13:30:26 +01001302
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001303 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001304 if (xordev->channels[i])
1305 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001306 }
Andrew Lunnc5101822012-02-19 13:30:26 +01001307
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001308 if (!IS_ERR(xordev->clk)) {
1309 clk_disable_unprepare(xordev->clk);
1310 clk_put(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001311 }
1312
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001313 return 0;
1314}
1315
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001316#ifdef CONFIG_OF
Linus Torvaldsc2714332012-12-14 14:54:26 -08001317static struct of_device_id mv_xor_dt_ids[] = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001318 { .compatible = "marvell,orion-xor", },
1319 {},
1320};
1321MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1322#endif
1323
Thomas Petazzoni61971652012-10-30 12:05:40 +01001324static struct platform_driver mv_xor_driver = {
1325 .probe = mv_xor_probe,
Linus Torvaldsc2714332012-12-14 14:54:26 -08001326 .remove = mv_xor_remove,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001327 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001328 .owner = THIS_MODULE,
1329 .name = MV_XOR_NAME,
1330 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001331 },
1332};
1333
1334
1335static int __init mv_xor_init(void)
1336{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001337 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001338}
1339module_init(mv_xor_init);
1340
1341/* it's currently unsafe to unload this module */
1342#if 0
1343static void __exit mv_xor_exit(void)
1344{
1345 platform_driver_unregister(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001346 return;
1347}
1348
1349module_exit(mv_xor_exit);
1350#endif
1351
1352MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1353MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1354MODULE_LICENSE("GPL");