blob: 536dcb8ba5fdfe69ed5f726fc6b5897f00266698 [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010028#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010029#include <linux/of.h>
30#include <linux/of_irq.h>
31#include <linux/irqdomain.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020032#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000033
34#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070035#include "mv_xor.h"
36
37static void mv_xor_issue_pending(struct dma_chan *chan);
38
39#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010040 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070041
42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44
Thomas Petazzonic98c1782012-11-15 14:17:18 +010045#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010046 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010047
Saeed Bisharaff7b0472008-07-08 11:58:36 -070048static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
49{
50 struct mv_xor_desc *hw_desc = desc->hw_desc;
51
52 hw_desc->status = (1 << 31);
53 hw_desc->phy_next_desc = 0;
54 hw_desc->desc_command = (1 << 31);
55}
56
57static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58{
59 struct mv_xor_desc *hw_desc = desc->hw_desc;
60 return hw_desc->phy_dest_addr;
61}
62
63static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
64 int src_idx)
65{
66 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +020067 return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
Saeed Bisharaff7b0472008-07-08 11:58:36 -070068}
69
70
71static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
72 u32 byte_count)
73{
74 struct mv_xor_desc *hw_desc = desc->hw_desc;
75 hw_desc->byte_count = byte_count;
76}
77
78static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
79 u32 next_desc_addr)
80{
81 struct mv_xor_desc *hw_desc = desc->hw_desc;
82 BUG_ON(hw_desc->phy_next_desc);
83 hw_desc->phy_next_desc = next_desc_addr;
84}
85
86static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
87{
88 struct mv_xor_desc *hw_desc = desc->hw_desc;
89 hw_desc->phy_next_desc = 0;
90}
91
Saeed Bisharaff7b0472008-07-08 11:58:36 -070092static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
93 dma_addr_t addr)
94{
95 struct mv_xor_desc *hw_desc = desc->hw_desc;
96 hw_desc->phy_dest_addr = addr;
97}
98
99static int mv_chan_memset_slot_count(size_t len)
100{
101 return 1;
102}
103
104#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
105
106static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
107 int index, dma_addr_t addr)
108{
109 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200110 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700111 if (desc->type == DMA_XOR)
112 hw_desc->desc_command |= (1 << index);
113}
114
115static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
116{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200117 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700118}
119
120static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
121 u32 next_desc_addr)
122{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200123 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700124}
125
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700126static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
127{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200128 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700129 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200130 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700131}
132
133static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
134{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200135 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700136 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
137 return intr_cause;
138}
139
140static int mv_is_err_intr(u32 intr_cause)
141{
142 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
143 return 1;
144
145 return 0;
146}
147
148static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
149{
Simon Guinot86363682010-09-17 23:33:51 +0200150 u32 val = ~(1 << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100151 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200152 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700153}
154
155static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
156{
157 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200158 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700159}
160
161static int mv_can_chain(struct mv_xor_desc_slot *desc)
162{
163 struct mv_xor_desc_slot *chain_old_tail = list_entry(
164 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
165
166 if (chain_old_tail->type != desc->type)
167 return 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700168
169 return 1;
170}
171
172static void mv_set_mode(struct mv_xor_chan *chan,
173 enum dma_transaction_type type)
174{
175 u32 op_mode;
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200176 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700177
178 switch (type) {
179 case DMA_XOR:
180 op_mode = XOR_OPERATION_MODE_XOR;
181 break;
182 case DMA_MEMCPY:
183 op_mode = XOR_OPERATION_MODE_MEMCPY;
184 break;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700185 default:
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100186 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700187 "error: unsupported operation %d\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100188 type);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700189 BUG();
190 return;
191 }
192
193 config &= ~0x7;
194 config |= op_mode;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200195
196#if defined(__BIG_ENDIAN)
197 config |= XOR_DESCRIPTOR_SWAP;
198#else
199 config &= ~XOR_DESCRIPTOR_SWAP;
200#endif
201
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200202 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700203 chan->current_type = type;
204}
205
206static void mv_chan_activate(struct mv_xor_chan *chan)
207{
208 u32 activation;
209
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100210 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200211 activation = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700212 activation |= 0x1;
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200213 writel_relaxed(activation, XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700214}
215
216static char mv_chan_is_busy(struct mv_xor_chan *chan)
217{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200218 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700219
220 state = (state >> 4) & 0x3;
221
222 return (state == 1) ? 1 : 0;
223}
224
225static int mv_chan_xor_slot_count(size_t len, int src_cnt)
226{
227 return 1;
228}
229
230/**
231 * mv_xor_free_slots - flags descriptor slots for reuse
232 * @slot: Slot to free
233 * Caller must hold &mv_chan->lock while calling this function
234 */
235static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
236 struct mv_xor_desc_slot *slot)
237{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700239 __func__, __LINE__, slot);
240
241 slot->slots_per_op = 0;
242
243}
244
245/*
246 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
247 * sw_desc
248 * Caller must hold &mv_chan->lock while calling this function
249 */
250static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
251 struct mv_xor_desc_slot *sw_desc)
252{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100253 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700254 __func__, __LINE__, sw_desc);
255 if (sw_desc->type != mv_chan->current_type)
256 mv_set_mode(mv_chan, sw_desc->type);
257
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700258 /* set the hardware chain */
259 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
260
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700261 mv_chan->pending += sw_desc->slot_cnt;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100262 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700263}
264
265static dma_cookie_t
266mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
267 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
268{
269 BUG_ON(desc->async_tx.cookie < 0);
270
271 if (desc->async_tx.cookie > 0) {
272 cookie = desc->async_tx.cookie;
273
274 /* call the callback (must not sleep or submit new
275 * operations to this channel)
276 */
277 if (desc->async_tx.callback)
278 desc->async_tx.callback(
279 desc->async_tx.callback_param);
280
281 /* unmap dma addresses
282 * (unmap_single vs unmap_page?)
283 */
284 if (desc->group_head && desc->unmap_len) {
285 struct mv_xor_desc_slot *unmap = desc->group_head;
Thomas Petazzoniecde6cd2012-11-15 14:37:36 +0100286 struct device *dev = mv_chan_to_devp(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700287 u32 len = unmap->unmap_len;
Dan Williamse1d181e2008-07-04 00:13:40 -0700288 enum dma_ctrl_flags flags = desc->async_tx.flags;
289 u32 src_cnt;
290 dma_addr_t addr;
Dan Williamsa06d5682008-12-08 13:46:00 -0700291 dma_addr_t dest;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700292
Dan Williamsa06d5682008-12-08 13:46:00 -0700293 src_cnt = unmap->unmap_src_cnt;
294 dest = mv_desc_get_dest_addr(unmap);
Dan Williamse1d181e2008-07-04 00:13:40 -0700295 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
Dan Williamsa06d5682008-12-08 13:46:00 -0700296 enum dma_data_direction dir;
297
298 if (src_cnt > 1) /* is xor ? */
299 dir = DMA_BIDIRECTIONAL;
300 else
301 dir = DMA_FROM_DEVICE;
302 dma_unmap_page(dev, dest, len, dir);
Dan Williamse1d181e2008-07-04 00:13:40 -0700303 }
304
305 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
Dan Williamse1d181e2008-07-04 00:13:40 -0700306 while (src_cnt--) {
307 addr = mv_desc_get_src_addr(unmap,
308 src_cnt);
Dan Williamsa06d5682008-12-08 13:46:00 -0700309 if (addr == dest)
310 continue;
Dan Williamse1d181e2008-07-04 00:13:40 -0700311 dma_unmap_page(dev, addr, len,
312 DMA_TO_DEVICE);
313 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700314 }
315 desc->group_head = NULL;
316 }
317 }
318
319 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700320 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700321
322 return cookie;
323}
324
325static int
326mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
327{
328 struct mv_xor_desc_slot *iter, *_iter;
329
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100330 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700331 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
332 completed_node) {
333
334 if (async_tx_test_ack(&iter->async_tx)) {
335 list_del(&iter->completed_node);
336 mv_xor_free_slots(mv_chan, iter);
337 }
338 }
339 return 0;
340}
341
342static int
343mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
344 struct mv_xor_chan *mv_chan)
345{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100346 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700347 __func__, __LINE__, desc, desc->async_tx.flags);
348 list_del(&desc->chain_node);
349 /* the client is allowed to attach dependent operations
350 * until 'ack' is set
351 */
352 if (!async_tx_test_ack(&desc->async_tx)) {
353 /* move this slot to the completed_slots */
354 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
355 return 0;
356 }
357
358 mv_xor_free_slots(mv_chan, desc);
359 return 0;
360}
361
362static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
363{
364 struct mv_xor_desc_slot *iter, *_iter;
365 dma_cookie_t cookie = 0;
366 int busy = mv_chan_is_busy(mv_chan);
367 u32 current_desc = mv_chan_get_current_desc(mv_chan);
368 int seen_current = 0;
369
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100370 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
371 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700372 mv_xor_clean_completed_slots(mv_chan);
373
374 /* free completed slots from the chain starting with
375 * the oldest descriptor
376 */
377
378 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
379 chain_node) {
380 prefetch(_iter);
381 prefetch(&_iter->async_tx);
382
383 /* do not advance past the current descriptor loaded into the
384 * hardware channel, subsequent descriptors are either in
385 * process or have not been submitted
386 */
387 if (seen_current)
388 break;
389
390 /* stop the search if we reach the current descriptor and the
391 * channel is busy
392 */
393 if (iter->async_tx.phys == current_desc) {
394 seen_current = 1;
395 if (busy)
396 break;
397 }
398
399 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
400
401 if (mv_xor_clean_slot(iter, mv_chan))
402 break;
403 }
404
405 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
406 struct mv_xor_desc_slot *chain_head;
407 chain_head = list_entry(mv_chan->chain.next,
408 struct mv_xor_desc_slot,
409 chain_node);
410
411 mv_xor_start_new_chain(mv_chan, chain_head);
412 }
413
414 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100415 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700416}
417
418static void
419mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
420{
421 spin_lock_bh(&mv_chan->lock);
422 __mv_xor_slot_cleanup(mv_chan);
423 spin_unlock_bh(&mv_chan->lock);
424}
425
426static void mv_xor_tasklet(unsigned long data)
427{
428 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Saeed Bishara8333f652010-12-21 16:53:39 +0200429 mv_xor_slot_cleanup(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700430}
431
432static struct mv_xor_desc_slot *
433mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
434 int slots_per_op)
435{
436 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
437 LIST_HEAD(chain);
438 int slots_found, retry = 0;
439
440 /* start search from the last allocated descrtiptor
441 * if a contiguous allocation can not be found start searching
442 * from the beginning of the list
443 */
444retry:
445 slots_found = 0;
446 if (retry == 0)
447 iter = mv_chan->last_used;
448 else
449 iter = list_entry(&mv_chan->all_slots,
450 struct mv_xor_desc_slot,
451 slot_node);
452
453 list_for_each_entry_safe_continue(
454 iter, _iter, &mv_chan->all_slots, slot_node) {
455 prefetch(_iter);
456 prefetch(&_iter->async_tx);
457 if (iter->slots_per_op) {
458 /* give up after finding the first busy slot
459 * on the second pass through the list
460 */
461 if (retry)
462 break;
463
464 slots_found = 0;
465 continue;
466 }
467
468 /* start the allocation if the slot is correctly aligned */
469 if (!slots_found++)
470 alloc_start = iter;
471
472 if (slots_found == num_slots) {
473 struct mv_xor_desc_slot *alloc_tail = NULL;
474 struct mv_xor_desc_slot *last_used = NULL;
475 iter = alloc_start;
476 while (num_slots) {
477 int i;
478
479 /* pre-ack all but the last descriptor */
480 async_tx_ack(&iter->async_tx);
481
482 list_add_tail(&iter->chain_node, &chain);
483 alloc_tail = iter;
484 iter->async_tx.cookie = 0;
485 iter->slot_cnt = num_slots;
486 iter->xor_check_result = NULL;
487 for (i = 0; i < slots_per_op; i++) {
488 iter->slots_per_op = slots_per_op - i;
489 last_used = iter;
490 iter = list_entry(iter->slot_node.next,
491 struct mv_xor_desc_slot,
492 slot_node);
493 }
494 num_slots -= slots_per_op;
495 }
496 alloc_tail->group_head = alloc_start;
497 alloc_tail->async_tx.cookie = -EBUSY;
Dan Williams64203b62009-09-08 17:53:03 -0700498 list_splice(&chain, &alloc_tail->tx_list);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700499 mv_chan->last_used = last_used;
500 mv_desc_clear_next_desc(alloc_start);
501 mv_desc_clear_next_desc(alloc_tail);
502 return alloc_tail;
503 }
504 }
505 if (!retry++)
506 goto retry;
507
508 /* try to free some slots if the allocation fails */
509 tasklet_schedule(&mv_chan->irq_tasklet);
510
511 return NULL;
512}
513
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700514/************************ DMA engine API functions ****************************/
515static dma_cookie_t
516mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
517{
518 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
519 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
520 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
521 dma_cookie_t cookie;
522 int new_hw_chain = 1;
523
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100524 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700525 "%s sw_desc %p: async_tx %p\n",
526 __func__, sw_desc, &sw_desc->async_tx);
527
528 grp_start = sw_desc->group_head;
529
530 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000531 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700532
533 if (list_empty(&mv_chan->chain))
Dan Williams64203b62009-09-08 17:53:03 -0700534 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700535 else {
536 new_hw_chain = 0;
537
538 old_chain_tail = list_entry(mv_chan->chain.prev,
539 struct mv_xor_desc_slot,
540 chain_node);
Dan Williams64203b62009-09-08 17:53:03 -0700541 list_splice_init(&grp_start->tx_list,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700542 &old_chain_tail->chain_node);
543
544 if (!mv_can_chain(grp_start))
545 goto submit_done;
546
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100547 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700548 old_chain_tail->async_tx.phys);
549
550 /* fix up the hardware chain */
551 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
552
553 /* if the channel is not busy */
554 if (!mv_chan_is_busy(mv_chan)) {
555 u32 current_desc = mv_chan_get_current_desc(mv_chan);
556 /*
557 * and the curren desc is the end of the chain before
558 * the append, then we need to start the channel
559 */
560 if (current_desc == old_chain_tail->async_tx.phys)
561 new_hw_chain = 1;
562 }
563 }
564
565 if (new_hw_chain)
566 mv_xor_start_new_chain(mv_chan, grp_start);
567
568submit_done:
569 spin_unlock_bh(&mv_chan->lock);
570
571 return cookie;
572}
573
574/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700575static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700576{
577 char *hw_desc;
578 int idx;
579 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
580 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100581 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700582
583 /* Allocate descriptor slots */
584 idx = mv_chan->slots_allocated;
585 while (idx < num_descs_in_pool) {
586 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
587 if (!slot) {
588 printk(KERN_INFO "MV XOR Channel only initialized"
589 " %d descriptor slots", idx);
590 break;
591 }
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100592 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700593 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
594
595 dma_async_tx_descriptor_init(&slot->async_tx, chan);
596 slot->async_tx.tx_submit = mv_xor_tx_submit;
597 INIT_LIST_HEAD(&slot->chain_node);
598 INIT_LIST_HEAD(&slot->slot_node);
Dan Williams64203b62009-09-08 17:53:03 -0700599 INIT_LIST_HEAD(&slot->tx_list);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100600 hw_desc = (char *) mv_chan->dma_desc_pool;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700601 slot->async_tx.phys =
602 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
603 slot->idx = idx++;
604
605 spin_lock_bh(&mv_chan->lock);
606 mv_chan->slots_allocated = idx;
607 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
608 spin_unlock_bh(&mv_chan->lock);
609 }
610
611 if (mv_chan->slots_allocated && !mv_chan->last_used)
612 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
613 struct mv_xor_desc_slot,
614 slot_node);
615
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100616 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700617 "allocated %d descriptor slots last_used: %p\n",
618 mv_chan->slots_allocated, mv_chan->last_used);
619
620 return mv_chan->slots_allocated ? : -ENOMEM;
621}
622
623static struct dma_async_tx_descriptor *
624mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
625 size_t len, unsigned long flags)
626{
627 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
628 struct mv_xor_desc_slot *sw_desc, *grp_start;
629 int slot_cnt;
630
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100631 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700632 "%s dest: %x src %x len: %u flags: %ld\n",
633 __func__, dest, src, len, flags);
634 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
635 return NULL;
636
Coly Li7912d302011-03-27 01:26:53 +0800637 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700638
639 spin_lock_bh(&mv_chan->lock);
640 slot_cnt = mv_chan_memcpy_slot_count(len);
641 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
642 if (sw_desc) {
643 sw_desc->type = DMA_MEMCPY;
644 sw_desc->async_tx.flags = flags;
645 grp_start = sw_desc->group_head;
646 mv_desc_init(grp_start, flags);
647 mv_desc_set_byte_count(grp_start, len);
648 mv_desc_set_dest_addr(sw_desc->group_head, dest);
649 mv_desc_set_src_addr(grp_start, 0, src);
650 sw_desc->unmap_src_cnt = 1;
651 sw_desc->unmap_len = len;
652 }
653 spin_unlock_bh(&mv_chan->lock);
654
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100655 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700656 "%s sw_desc %p async_tx %p\n",
Jingoo Han4c143722013-08-06 19:37:08 +0900657 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700658
659 return sw_desc ? &sw_desc->async_tx : NULL;
660}
661
662static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700663mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
664 unsigned int src_cnt, size_t len, unsigned long flags)
665{
666 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
667 struct mv_xor_desc_slot *sw_desc, *grp_start;
668 int slot_cnt;
669
670 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
671 return NULL;
672
Coly Li7912d302011-03-27 01:26:53 +0800673 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700674
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100675 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700676 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
677 __func__, src_cnt, len, dest, flags);
678
679 spin_lock_bh(&mv_chan->lock);
680 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
681 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
682 if (sw_desc) {
683 sw_desc->type = DMA_XOR;
684 sw_desc->async_tx.flags = flags;
685 grp_start = sw_desc->group_head;
686 mv_desc_init(grp_start, flags);
687 /* the byte count field is the same as in memcpy desc*/
688 mv_desc_set_byte_count(grp_start, len);
689 mv_desc_set_dest_addr(sw_desc->group_head, dest);
690 sw_desc->unmap_src_cnt = src_cnt;
691 sw_desc->unmap_len = len;
692 while (src_cnt--)
693 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
694 }
695 spin_unlock_bh(&mv_chan->lock);
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100696 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700697 "%s sw_desc %p async_tx %p \n",
698 __func__, sw_desc, &sw_desc->async_tx);
699 return sw_desc ? &sw_desc->async_tx : NULL;
700}
701
702static void mv_xor_free_chan_resources(struct dma_chan *chan)
703{
704 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
705 struct mv_xor_desc_slot *iter, *_iter;
706 int in_use_descs = 0;
707
708 mv_xor_slot_cleanup(mv_chan);
709
710 spin_lock_bh(&mv_chan->lock);
711 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
712 chain_node) {
713 in_use_descs++;
714 list_del(&iter->chain_node);
715 }
716 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
717 completed_node) {
718 in_use_descs++;
719 list_del(&iter->completed_node);
720 }
721 list_for_each_entry_safe_reverse(
722 iter, _iter, &mv_chan->all_slots, slot_node) {
723 list_del(&iter->slot_node);
724 kfree(iter);
725 mv_chan->slots_allocated--;
726 }
727 mv_chan->last_used = NULL;
728
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100729 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700730 __func__, mv_chan->slots_allocated);
731 spin_unlock_bh(&mv_chan->lock);
732
733 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100734 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700735 "freeing %d in use descriptors!\n", in_use_descs);
736}
737
738/**
Linus Walleij07934482010-03-26 16:50:49 -0700739 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700740 * @chan: XOR channel handle
741 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700742 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700743 */
Linus Walleij07934482010-03-26 16:50:49 -0700744static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700745 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700746 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700747{
748 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700749 enum dma_status ret;
750
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000751 ret = dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700752 if (ret == DMA_SUCCESS) {
753 mv_xor_clean_completed_slots(mv_chan);
754 return ret;
755 }
756 mv_xor_slot_cleanup(mv_chan);
757
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000758 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700759}
760
761static void mv_dump_xor_regs(struct mv_xor_chan *chan)
762{
763 u32 val;
764
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200765 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700766 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700767
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200768 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700769 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700770
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200771 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700772 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700773
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200774 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700775 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700776
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200777 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700778 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700779
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200780 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700781 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700782}
783
784static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
785 u32 intr_cause)
786{
787 if (intr_cause & (1 << 4)) {
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100788 dev_dbg(mv_chan_to_devp(chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700789 "ignore this error\n");
790 return;
791 }
792
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100793 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700794 "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100795 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700796
797 mv_dump_xor_regs(chan);
798 BUG();
799}
800
801static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
802{
803 struct mv_xor_chan *chan = data;
804 u32 intr_cause = mv_chan_get_intr_cause(chan);
805
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100806 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700807
808 if (mv_is_err_intr(intr_cause))
809 mv_xor_err_interrupt_handler(chan, intr_cause);
810
811 tasklet_schedule(&chan->irq_tasklet);
812
813 mv_xor_device_clear_eoc_cause(chan);
814
815 return IRQ_HANDLED;
816}
817
818static void mv_xor_issue_pending(struct dma_chan *chan)
819{
820 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
821
822 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
823 mv_chan->pending = 0;
824 mv_chan_activate(mv_chan);
825 }
826}
827
828/*
829 * Perform a transaction to verify the HW works.
830 */
831#define MV_XOR_TEST_SIZE 2000
832
Linus Torvaldsc2714332012-12-14 14:54:26 -0800833static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700834{
835 int i;
836 void *src, *dest;
837 dma_addr_t src_dma, dest_dma;
838 struct dma_chan *dma_chan;
839 dma_cookie_t cookie;
840 struct dma_async_tx_descriptor *tx;
841 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700842
843 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
844 if (!src)
845 return -ENOMEM;
846
847 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
848 if (!dest) {
849 kfree(src);
850 return -ENOMEM;
851 }
852
853 /* Fill in src buffer */
854 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
855 ((u8 *) src)[i] = (u8)i;
856
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100857 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700858 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700859 err = -ENODEV;
860 goto out;
861 }
862
863 dest_dma = dma_map_single(dma_chan->device->dev, dest,
864 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
865
866 src_dma = dma_map_single(dma_chan->device->dev, src,
867 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
868
869 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
870 MV_XOR_TEST_SIZE, 0);
871 cookie = mv_xor_tx_submit(tx);
872 mv_xor_issue_pending(dma_chan);
873 async_tx_ack(tx);
874 msleep(1);
875
Linus Walleij07934482010-03-26 16:50:49 -0700876 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700877 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100878 dev_err(dma_chan->device->dev,
879 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700880 err = -ENODEV;
881 goto free_resources;
882 }
883
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100884 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700885 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
886 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100887 dev_err(dma_chan->device->dev,
888 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700889 err = -ENODEV;
890 goto free_resources;
891 }
892
893free_resources:
894 mv_xor_free_chan_resources(dma_chan);
895out:
896 kfree(src);
897 kfree(dest);
898 return err;
899}
900
901#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -0500902static int
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100903mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700904{
905 int i, src_idx;
906 struct page *dest;
907 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
908 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
909 dma_addr_t dest_dma;
910 struct dma_async_tx_descriptor *tx;
911 struct dma_chan *dma_chan;
912 dma_cookie_t cookie;
913 u8 cmp_byte = 0;
914 u32 cmp_word;
915 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700916
917 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
918 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100919 if (!xor_srcs[src_idx]) {
920 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700921 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100922 return -ENOMEM;
923 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700924 }
925
926 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100927 if (!dest) {
928 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700929 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100930 return -ENOMEM;
931 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700932
933 /* Fill in src buffers */
934 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
935 u8 *ptr = page_address(xor_srcs[src_idx]);
936 for (i = 0; i < PAGE_SIZE; i++)
937 ptr[i] = (1 << src_idx);
938 }
939
940 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
941 cmp_byte ^= (u8) (1 << src_idx);
942
943 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
944 (cmp_byte << 8) | cmp_byte;
945
946 memset(page_address(dest), 0, PAGE_SIZE);
947
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100948 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700949 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700950 err = -ENODEV;
951 goto out;
952 }
953
954 /* test xor */
955 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
956 DMA_FROM_DEVICE);
957
958 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
959 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
960 0, PAGE_SIZE, DMA_TO_DEVICE);
961
962 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
963 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
964
965 cookie = mv_xor_tx_submit(tx);
966 mv_xor_issue_pending(dma_chan);
967 async_tx_ack(tx);
968 msleep(8);
969
Linus Walleij07934482010-03-26 16:50:49 -0700970 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700971 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100972 dev_err(dma_chan->device->dev,
973 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700974 err = -ENODEV;
975 goto free_resources;
976 }
977
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100978 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700979 PAGE_SIZE, DMA_FROM_DEVICE);
980 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
981 u32 *ptr = page_address(dest);
982 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100983 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -0700984 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
985 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700986 err = -ENODEV;
987 goto free_resources;
988 }
989 }
990
991free_resources:
992 mv_xor_free_chan_resources(dma_chan);
993out:
994 src_idx = MV_XOR_NUM_SRC_TEST;
995 while (src_idx--)
996 __free_page(xor_srcs[src_idx]);
997 __free_page(dest);
998 return err;
999}
1000
Andrew Lunn34c93c82012-11-18 11:44:56 +01001001/* This driver does not implement any of the optional DMA operations. */
1002static int
1003mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1004 unsigned long arg)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001005{
Andrew Lunn34c93c82012-11-18 11:44:56 +01001006 return -ENOSYS;
1007}
1008
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001009static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001010{
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001011 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001012 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001013
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001014 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001015
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001016 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001017 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001018
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001019 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001020 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001021 list_del(&chan->device_node);
1022 }
1023
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001024 free_irq(mv_chan->irq, mv_chan);
1025
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001026 return 0;
1027}
1028
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001029static struct mv_xor_chan *
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001030mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001031 struct platform_device *pdev,
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001032 int idx, dma_cap_mask_t cap_mask, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001033{
1034 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001035 struct mv_xor_chan *mv_chan;
1036 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001037
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001038 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +05301039 if (!mv_chan)
1040 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001041
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +01001042 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001043 mv_chan->irq = irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001044
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001045 dma_dev = &mv_chan->dmadev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001046
1047 /* allocate coherent memory for hardware descriptors
1048 * note: writecombine gives slightly better performance, but
1049 * requires that we explicitly flush the writes
1050 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001051 mv_chan->dma_desc_pool_virt =
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001052 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001053 &mv_chan->dma_desc_pool, GFP_KERNEL);
1054 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001055 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001056
1057 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001058 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001059
1060 INIT_LIST_HEAD(&dma_dev->channels);
1061
1062 /* set base routines */
1063 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1064 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001065 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001066 dma_dev->device_issue_pending = mv_xor_issue_pending;
Andrew Lunn34c93c82012-11-18 11:44:56 +01001067 dma_dev->device_control = mv_xor_control;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001068 dma_dev->dev = &pdev->dev;
1069
1070 /* set prep routines based on capability */
1071 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1072 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001073 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001074 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001075 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1076 }
1077
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001078 mv_chan->mmr_base = xordev->xor_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001079 if (!mv_chan->mmr_base) {
1080 ret = -ENOMEM;
1081 goto err_free_dma;
1082 }
1083 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1084 mv_chan);
1085
1086 /* clear errors before enabling interrupts */
1087 mv_xor_device_clear_err_status(mv_chan);
1088
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001089 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1090 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001091 if (ret)
1092 goto err_free_dma;
1093
1094 mv_chan_unmask_interrupts(mv_chan);
1095
1096 mv_set_mode(mv_chan, DMA_MEMCPY);
1097
1098 spin_lock_init(&mv_chan->lock);
1099 INIT_LIST_HEAD(&mv_chan->chain);
1100 INIT_LIST_HEAD(&mv_chan->completed_slots);
1101 INIT_LIST_HEAD(&mv_chan->all_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001102 mv_chan->dmachan.device = dma_dev;
1103 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001104
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001105 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001106
1107 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001108 ret = mv_xor_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001109 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1110 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001111 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001112 }
1113
1114 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001115 ret = mv_xor_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001116 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1117 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001118 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001119 }
1120
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -07001121 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
Joe Perches1ba151c2012-10-28 01:05:44 -07001122 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001123 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1124 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001125
1126 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001127 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001128
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001129err_free_irq:
1130 free_irq(mv_chan->irq, mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001131 err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001132 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001133 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001134 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001135}
1136
1137static void
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001138mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001139 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001140{
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001141 void __iomem *base = xordev->xor_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001142 u32 win_enable = 0;
1143 int i;
1144
1145 for (i = 0; i < 8; i++) {
1146 writel(0, base + WINDOW_BASE(i));
1147 writel(0, base + WINDOW_SIZE(i));
1148 if (i < 4)
1149 writel(0, base + WINDOW_REMAP_HIGH(i));
1150 }
1151
1152 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001153 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001154
1155 writel((cs->base & 0xffff0000) |
1156 (cs->mbus_attr << 8) |
1157 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1158 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1159
1160 win_enable |= (1 << i);
1161 win_enable |= 3 << (16 + (2 * i));
1162 }
1163
1164 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1165 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001166 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1167 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001168}
1169
Linus Torvaldsc2714332012-12-14 14:54:26 -08001170static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001171{
Andrew Lunn63a93322011-12-07 21:48:07 +01001172 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001173 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001174 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001175 struct resource *res;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001176 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001177
Joe Perches1ba151c2012-10-28 01:05:44 -07001178 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001179
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001180 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1181 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001182 return -ENOMEM;
1183
1184 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1185 if (!res)
1186 return -ENODEV;
1187
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001188 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1189 resource_size(res));
1190 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001191 return -EBUSY;
1192
1193 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1194 if (!res)
1195 return -ENODEV;
1196
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001197 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1198 resource_size(res));
1199 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001200 return -EBUSY;
1201
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001202 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001203
1204 /*
1205 * (Re-)program MBUS remapping windows if we are asked to.
1206 */
Andrew Lunn63a93322011-12-07 21:48:07 +01001207 dram = mv_mbus_dram_info();
1208 if (dram)
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001209 mv_xor_conf_mbus_windows(xordev, dram);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001210
Andrew Lunnc5101822012-02-19 13:30:26 +01001211 /* Not all platforms can gate the clock, so it is not
1212 * an error if the clock does not exists.
1213 */
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001214 xordev->clk = clk_get(&pdev->dev, NULL);
1215 if (!IS_ERR(xordev->clk))
1216 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001217
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001218 if (pdev->dev.of_node) {
1219 struct device_node *np;
1220 int i = 0;
1221
1222 for_each_child_of_node(pdev->dev.of_node, np) {
1223 dma_cap_mask_t cap_mask;
1224 int irq;
1225
1226 dma_cap_zero(cap_mask);
1227 if (of_property_read_bool(np, "dmacap,memcpy"))
1228 dma_cap_set(DMA_MEMCPY, cap_mask);
1229 if (of_property_read_bool(np, "dmacap,xor"))
1230 dma_cap_set(DMA_XOR, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001231 if (of_property_read_bool(np, "dmacap,interrupt"))
1232 dma_cap_set(DMA_INTERRUPT, cap_mask);
1233
1234 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001235 if (!irq) {
1236 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001237 goto err_channel_add;
1238 }
1239
1240 xordev->channels[i] =
1241 mv_xor_channel_add(xordev, pdev, i,
1242 cap_mask, irq);
1243 if (IS_ERR(xordev->channels[i])) {
1244 ret = PTR_ERR(xordev->channels[i]);
Thomas Petazzoni73d9cdc2012-11-22 18:22:59 +01001245 xordev->channels[i] = NULL;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001246 irq_dispose_mapping(irq);
1247 goto err_channel_add;
1248 }
1249
1250 i++;
1251 }
1252 } else if (pdata && pdata->channels) {
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001253 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001254 struct mv_xor_channel_data *cd;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001255 int irq;
1256
1257 cd = &pdata->channels[i];
1258 if (!cd) {
1259 ret = -ENODEV;
1260 goto err_channel_add;
1261 }
1262
1263 irq = platform_get_irq(pdev, i);
1264 if (irq < 0) {
1265 ret = irq;
1266 goto err_channel_add;
1267 }
1268
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001269 xordev->channels[i] =
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +01001270 mv_xor_channel_add(xordev, pdev, i,
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001271 cd->cap_mask, irq);
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001272 if (IS_ERR(xordev->channels[i])) {
1273 ret = PTR_ERR(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001274 goto err_channel_add;
1275 }
1276 }
1277 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001278
1279 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001280
1281err_channel_add:
1282 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001283 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001284 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001285 if (pdev->dev.of_node)
1286 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001287 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001288
Thomas Petazzonidab92062013-01-06 11:10:44 +01001289 if (!IS_ERR(xordev->clk)) {
1290 clk_disable_unprepare(xordev->clk);
1291 clk_put(xordev->clk);
1292 }
1293
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001294 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001295}
1296
Linus Torvaldsc2714332012-12-14 14:54:26 -08001297static int mv_xor_remove(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001298{
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001299 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001300 int i;
Andrew Lunnc5101822012-02-19 13:30:26 +01001301
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001302 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001303 if (xordev->channels[i])
1304 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001305 }
Andrew Lunnc5101822012-02-19 13:30:26 +01001306
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001307 if (!IS_ERR(xordev->clk)) {
1308 clk_disable_unprepare(xordev->clk);
1309 clk_put(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001310 }
1311
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001312 return 0;
1313}
1314
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001315#ifdef CONFIG_OF
Linus Torvaldsc2714332012-12-14 14:54:26 -08001316static struct of_device_id mv_xor_dt_ids[] = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001317 { .compatible = "marvell,orion-xor", },
1318 {},
1319};
1320MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1321#endif
1322
Thomas Petazzoni61971652012-10-30 12:05:40 +01001323static struct platform_driver mv_xor_driver = {
1324 .probe = mv_xor_probe,
Linus Torvaldsc2714332012-12-14 14:54:26 -08001325 .remove = mv_xor_remove,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001326 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001327 .owner = THIS_MODULE,
1328 .name = MV_XOR_NAME,
1329 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001330 },
1331};
1332
1333
1334static int __init mv_xor_init(void)
1335{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001336 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001337}
1338module_init(mv_xor_init);
1339
1340/* it's currently unsafe to unload this module */
1341#if 0
1342static void __exit mv_xor_exit(void)
1343{
1344 platform_driver_unregister(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001345 return;
1346}
1347
1348module_exit(mv_xor_exit);
1349#endif
1350
1351MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1352MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1353MODULE_LICENSE("GPL");