blob: 766b68ed505c4d2b3964bfb1f0de6ab5ae1ff3a9 [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010028#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010029#include <linux/of.h>
30#include <linux/of_irq.h>
31#include <linux/irqdomain.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020032#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000033
34#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070035#include "mv_xor.h"
36
37static void mv_xor_issue_pending(struct dma_chan *chan);
38
39#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010040 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070041
42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44
Thomas Petazzonic98c1782012-11-15 14:17:18 +010045#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010046 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010047
Saeed Bisharaff7b0472008-07-08 11:58:36 -070048static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
49{
50 struct mv_xor_desc *hw_desc = desc->hw_desc;
51
52 hw_desc->status = (1 << 31);
53 hw_desc->phy_next_desc = 0;
54 hw_desc->desc_command = (1 << 31);
55}
56
Saeed Bisharaff7b0472008-07-08 11:58:36 -070057static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
58 u32 byte_count)
59{
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61 hw_desc->byte_count = byte_count;
62}
63
64static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
65 u32 next_desc_addr)
66{
67 struct mv_xor_desc *hw_desc = desc->hw_desc;
68 BUG_ON(hw_desc->phy_next_desc);
69 hw_desc->phy_next_desc = next_desc_addr;
70}
71
72static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
73{
74 struct mv_xor_desc *hw_desc = desc->hw_desc;
75 hw_desc->phy_next_desc = 0;
76}
77
Saeed Bisharaff7b0472008-07-08 11:58:36 -070078static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
79 dma_addr_t addr)
80{
81 struct mv_xor_desc *hw_desc = desc->hw_desc;
82 hw_desc->phy_dest_addr = addr;
83}
84
85static int mv_chan_memset_slot_count(size_t len)
86{
87 return 1;
88}
89
90#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
91
92static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
93 int index, dma_addr_t addr)
94{
95 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +020096 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070097 if (desc->type == DMA_XOR)
98 hw_desc->desc_command |= (1 << index);
99}
100
101static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
102{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200103 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700104}
105
106static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
107 u32 next_desc_addr)
108{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200109 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700110}
111
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700112static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
113{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200114 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700115 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200116 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700117}
118
119static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
120{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200121 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700122 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
123 return intr_cause;
124}
125
126static int mv_is_err_intr(u32 intr_cause)
127{
128 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
129 return 1;
130
131 return 0;
132}
133
134static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
135{
Simon Guinot86363682010-09-17 23:33:51 +0200136 u32 val = ~(1 << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100137 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200138 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700139}
140
141static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
142{
143 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200144 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700145}
146
147static int mv_can_chain(struct mv_xor_desc_slot *desc)
148{
149 struct mv_xor_desc_slot *chain_old_tail = list_entry(
150 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
151
152 if (chain_old_tail->type != desc->type)
153 return 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700154
155 return 1;
156}
157
158static void mv_set_mode(struct mv_xor_chan *chan,
159 enum dma_transaction_type type)
160{
161 u32 op_mode;
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200162 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700163
164 switch (type) {
165 case DMA_XOR:
166 op_mode = XOR_OPERATION_MODE_XOR;
167 break;
168 case DMA_MEMCPY:
169 op_mode = XOR_OPERATION_MODE_MEMCPY;
170 break;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700171 default:
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100172 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700173 "error: unsupported operation %d\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100174 type);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700175 BUG();
176 return;
177 }
178
179 config &= ~0x7;
180 config |= op_mode;
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200181
182#if defined(__BIG_ENDIAN)
183 config |= XOR_DESCRIPTOR_SWAP;
184#else
185 config &= ~XOR_DESCRIPTOR_SWAP;
186#endif
187
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200188 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700189 chan->current_type = type;
190}
191
192static void mv_chan_activate(struct mv_xor_chan *chan)
193{
194 u32 activation;
195
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100196 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200197 activation = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700198 activation |= 0x1;
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200199 writel_relaxed(activation, XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700200}
201
202static char mv_chan_is_busy(struct mv_xor_chan *chan)
203{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200204 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700205
206 state = (state >> 4) & 0x3;
207
208 return (state == 1) ? 1 : 0;
209}
210
211static int mv_chan_xor_slot_count(size_t len, int src_cnt)
212{
213 return 1;
214}
215
216/**
217 * mv_xor_free_slots - flags descriptor slots for reuse
218 * @slot: Slot to free
219 * Caller must hold &mv_chan->lock while calling this function
220 */
221static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
222 struct mv_xor_desc_slot *slot)
223{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100224 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700225 __func__, __LINE__, slot);
226
227 slot->slots_per_op = 0;
228
229}
230
231/*
232 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
233 * sw_desc
234 * Caller must hold &mv_chan->lock while calling this function
235 */
236static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
237 struct mv_xor_desc_slot *sw_desc)
238{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100239 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700240 __func__, __LINE__, sw_desc);
241 if (sw_desc->type != mv_chan->current_type)
242 mv_set_mode(mv_chan, sw_desc->type);
243
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700244 /* set the hardware chain */
245 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
246
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700247 mv_chan->pending += sw_desc->slot_cnt;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100248 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700249}
250
251static dma_cookie_t
252mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
253 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
254{
255 BUG_ON(desc->async_tx.cookie < 0);
256
257 if (desc->async_tx.cookie > 0) {
258 cookie = desc->async_tx.cookie;
259
260 /* call the callback (must not sleep or submit new
261 * operations to this channel)
262 */
263 if (desc->async_tx.callback)
264 desc->async_tx.callback(
265 desc->async_tx.callback_param);
266
Dan Williamsd38a8c62013-10-18 19:35:23 +0200267 dma_descriptor_unmap(&desc->async_tx);
Bartlomiej Zolnierkiewicz54f8d502013-10-18 19:35:32 +0200268 if (desc->group_head)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700269 desc->group_head = NULL;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700270 }
271
272 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700273 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700274
275 return cookie;
276}
277
278static int
279mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
280{
281 struct mv_xor_desc_slot *iter, *_iter;
282
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100283 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700284 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
285 completed_node) {
286
287 if (async_tx_test_ack(&iter->async_tx)) {
288 list_del(&iter->completed_node);
289 mv_xor_free_slots(mv_chan, iter);
290 }
291 }
292 return 0;
293}
294
295static int
296mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
297 struct mv_xor_chan *mv_chan)
298{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100299 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700300 __func__, __LINE__, desc, desc->async_tx.flags);
301 list_del(&desc->chain_node);
302 /* the client is allowed to attach dependent operations
303 * until 'ack' is set
304 */
305 if (!async_tx_test_ack(&desc->async_tx)) {
306 /* move this slot to the completed_slots */
307 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
308 return 0;
309 }
310
311 mv_xor_free_slots(mv_chan, desc);
312 return 0;
313}
314
315static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
316{
317 struct mv_xor_desc_slot *iter, *_iter;
318 dma_cookie_t cookie = 0;
319 int busy = mv_chan_is_busy(mv_chan);
320 u32 current_desc = mv_chan_get_current_desc(mv_chan);
321 int seen_current = 0;
322
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100323 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
324 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700325 mv_xor_clean_completed_slots(mv_chan);
326
327 /* free completed slots from the chain starting with
328 * the oldest descriptor
329 */
330
331 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
332 chain_node) {
333 prefetch(_iter);
334 prefetch(&_iter->async_tx);
335
336 /* do not advance past the current descriptor loaded into the
337 * hardware channel, subsequent descriptors are either in
338 * process or have not been submitted
339 */
340 if (seen_current)
341 break;
342
343 /* stop the search if we reach the current descriptor and the
344 * channel is busy
345 */
346 if (iter->async_tx.phys == current_desc) {
347 seen_current = 1;
348 if (busy)
349 break;
350 }
351
352 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
353
354 if (mv_xor_clean_slot(iter, mv_chan))
355 break;
356 }
357
358 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
359 struct mv_xor_desc_slot *chain_head;
360 chain_head = list_entry(mv_chan->chain.next,
361 struct mv_xor_desc_slot,
362 chain_node);
363
364 mv_xor_start_new_chain(mv_chan, chain_head);
365 }
366
367 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100368 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700369}
370
371static void
372mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
373{
374 spin_lock_bh(&mv_chan->lock);
375 __mv_xor_slot_cleanup(mv_chan);
376 spin_unlock_bh(&mv_chan->lock);
377}
378
379static void mv_xor_tasklet(unsigned long data)
380{
381 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Saeed Bishara8333f652010-12-21 16:53:39 +0200382 mv_xor_slot_cleanup(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700383}
384
385static struct mv_xor_desc_slot *
386mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
387 int slots_per_op)
388{
389 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
390 LIST_HEAD(chain);
391 int slots_found, retry = 0;
392
393 /* start search from the last allocated descrtiptor
394 * if a contiguous allocation can not be found start searching
395 * from the beginning of the list
396 */
397retry:
398 slots_found = 0;
399 if (retry == 0)
400 iter = mv_chan->last_used;
401 else
402 iter = list_entry(&mv_chan->all_slots,
403 struct mv_xor_desc_slot,
404 slot_node);
405
406 list_for_each_entry_safe_continue(
407 iter, _iter, &mv_chan->all_slots, slot_node) {
408 prefetch(_iter);
409 prefetch(&_iter->async_tx);
410 if (iter->slots_per_op) {
411 /* give up after finding the first busy slot
412 * on the second pass through the list
413 */
414 if (retry)
415 break;
416
417 slots_found = 0;
418 continue;
419 }
420
421 /* start the allocation if the slot is correctly aligned */
422 if (!slots_found++)
423 alloc_start = iter;
424
425 if (slots_found == num_slots) {
426 struct mv_xor_desc_slot *alloc_tail = NULL;
427 struct mv_xor_desc_slot *last_used = NULL;
428 iter = alloc_start;
429 while (num_slots) {
430 int i;
431
432 /* pre-ack all but the last descriptor */
433 async_tx_ack(&iter->async_tx);
434
435 list_add_tail(&iter->chain_node, &chain);
436 alloc_tail = iter;
437 iter->async_tx.cookie = 0;
438 iter->slot_cnt = num_slots;
439 iter->xor_check_result = NULL;
440 for (i = 0; i < slots_per_op; i++) {
441 iter->slots_per_op = slots_per_op - i;
442 last_used = iter;
443 iter = list_entry(iter->slot_node.next,
444 struct mv_xor_desc_slot,
445 slot_node);
446 }
447 num_slots -= slots_per_op;
448 }
449 alloc_tail->group_head = alloc_start;
450 alloc_tail->async_tx.cookie = -EBUSY;
Dan Williams64203b62009-09-08 17:53:03 -0700451 list_splice(&chain, &alloc_tail->tx_list);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700452 mv_chan->last_used = last_used;
453 mv_desc_clear_next_desc(alloc_start);
454 mv_desc_clear_next_desc(alloc_tail);
455 return alloc_tail;
456 }
457 }
458 if (!retry++)
459 goto retry;
460
461 /* try to free some slots if the allocation fails */
462 tasklet_schedule(&mv_chan->irq_tasklet);
463
464 return NULL;
465}
466
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700467/************************ DMA engine API functions ****************************/
468static dma_cookie_t
469mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
470{
471 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
472 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
473 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
474 dma_cookie_t cookie;
475 int new_hw_chain = 1;
476
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100477 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700478 "%s sw_desc %p: async_tx %p\n",
479 __func__, sw_desc, &sw_desc->async_tx);
480
481 grp_start = sw_desc->group_head;
482
483 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000484 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700485
486 if (list_empty(&mv_chan->chain))
Dan Williams64203b62009-09-08 17:53:03 -0700487 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700488 else {
489 new_hw_chain = 0;
490
491 old_chain_tail = list_entry(mv_chan->chain.prev,
492 struct mv_xor_desc_slot,
493 chain_node);
Dan Williams64203b62009-09-08 17:53:03 -0700494 list_splice_init(&grp_start->tx_list,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700495 &old_chain_tail->chain_node);
496
497 if (!mv_can_chain(grp_start))
498 goto submit_done;
499
Olof Johansson31fd8f52014-02-03 17:13:23 -0800500 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
501 &old_chain_tail->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700502
503 /* fix up the hardware chain */
504 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
505
506 /* if the channel is not busy */
507 if (!mv_chan_is_busy(mv_chan)) {
508 u32 current_desc = mv_chan_get_current_desc(mv_chan);
509 /*
510 * and the curren desc is the end of the chain before
511 * the append, then we need to start the channel
512 */
513 if (current_desc == old_chain_tail->async_tx.phys)
514 new_hw_chain = 1;
515 }
516 }
517
518 if (new_hw_chain)
519 mv_xor_start_new_chain(mv_chan, grp_start);
520
521submit_done:
522 spin_unlock_bh(&mv_chan->lock);
523
524 return cookie;
525}
526
527/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700528static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700529{
Olof Johansson31fd8f52014-02-03 17:13:23 -0800530 void *virt_desc;
531 dma_addr_t dma_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700532 int idx;
533 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
534 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100535 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700536
537 /* Allocate descriptor slots */
538 idx = mv_chan->slots_allocated;
539 while (idx < num_descs_in_pool) {
540 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
541 if (!slot) {
542 printk(KERN_INFO "MV XOR Channel only initialized"
543 " %d descriptor slots", idx);
544 break;
545 }
Olof Johansson31fd8f52014-02-03 17:13:23 -0800546 virt_desc = mv_chan->dma_desc_pool_virt;
547 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700548
549 dma_async_tx_descriptor_init(&slot->async_tx, chan);
550 slot->async_tx.tx_submit = mv_xor_tx_submit;
551 INIT_LIST_HEAD(&slot->chain_node);
552 INIT_LIST_HEAD(&slot->slot_node);
Dan Williams64203b62009-09-08 17:53:03 -0700553 INIT_LIST_HEAD(&slot->tx_list);
Olof Johansson31fd8f52014-02-03 17:13:23 -0800554 dma_desc = mv_chan->dma_desc_pool;
555 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700556 slot->idx = idx++;
557
558 spin_lock_bh(&mv_chan->lock);
559 mv_chan->slots_allocated = idx;
560 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
561 spin_unlock_bh(&mv_chan->lock);
562 }
563
564 if (mv_chan->slots_allocated && !mv_chan->last_used)
565 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
566 struct mv_xor_desc_slot,
567 slot_node);
568
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100569 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700570 "allocated %d descriptor slots last_used: %p\n",
571 mv_chan->slots_allocated, mv_chan->last_used);
572
573 return mv_chan->slots_allocated ? : -ENOMEM;
574}
575
576static struct dma_async_tx_descriptor *
577mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
578 size_t len, unsigned long flags)
579{
580 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
581 struct mv_xor_desc_slot *sw_desc, *grp_start;
582 int slot_cnt;
583
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100584 dev_dbg(mv_chan_to_devp(mv_chan),
Olof Johansson31fd8f52014-02-03 17:13:23 -0800585 "%s dest: %pad src %pad len: %u flags: %ld\n",
586 __func__, &dest, &src, len, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700587 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
588 return NULL;
589
Coly Li7912d302011-03-27 01:26:53 +0800590 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700591
592 spin_lock_bh(&mv_chan->lock);
593 slot_cnt = mv_chan_memcpy_slot_count(len);
594 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
595 if (sw_desc) {
596 sw_desc->type = DMA_MEMCPY;
597 sw_desc->async_tx.flags = flags;
598 grp_start = sw_desc->group_head;
599 mv_desc_init(grp_start, flags);
600 mv_desc_set_byte_count(grp_start, len);
601 mv_desc_set_dest_addr(sw_desc->group_head, dest);
602 mv_desc_set_src_addr(grp_start, 0, src);
603 sw_desc->unmap_src_cnt = 1;
604 sw_desc->unmap_len = len;
605 }
606 spin_unlock_bh(&mv_chan->lock);
607
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100608 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700609 "%s sw_desc %p async_tx %p\n",
Jingoo Han4c143722013-08-06 19:37:08 +0900610 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700611
612 return sw_desc ? &sw_desc->async_tx : NULL;
613}
614
615static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700616mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
617 unsigned int src_cnt, size_t len, unsigned long flags)
618{
619 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
620 struct mv_xor_desc_slot *sw_desc, *grp_start;
621 int slot_cnt;
622
623 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
624 return NULL;
625
Coly Li7912d302011-03-27 01:26:53 +0800626 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700627
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100628 dev_dbg(mv_chan_to_devp(mv_chan),
Olof Johansson31fd8f52014-02-03 17:13:23 -0800629 "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
630 __func__, src_cnt, len, &dest, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700631
632 spin_lock_bh(&mv_chan->lock);
633 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
634 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
635 if (sw_desc) {
636 sw_desc->type = DMA_XOR;
637 sw_desc->async_tx.flags = flags;
638 grp_start = sw_desc->group_head;
639 mv_desc_init(grp_start, flags);
640 /* the byte count field is the same as in memcpy desc*/
641 mv_desc_set_byte_count(grp_start, len);
642 mv_desc_set_dest_addr(sw_desc->group_head, dest);
643 sw_desc->unmap_src_cnt = src_cnt;
644 sw_desc->unmap_len = len;
645 while (src_cnt--)
646 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
647 }
648 spin_unlock_bh(&mv_chan->lock);
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100649 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700650 "%s sw_desc %p async_tx %p \n",
651 __func__, sw_desc, &sw_desc->async_tx);
652 return sw_desc ? &sw_desc->async_tx : NULL;
653}
654
655static void mv_xor_free_chan_resources(struct dma_chan *chan)
656{
657 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
658 struct mv_xor_desc_slot *iter, *_iter;
659 int in_use_descs = 0;
660
661 mv_xor_slot_cleanup(mv_chan);
662
663 spin_lock_bh(&mv_chan->lock);
664 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
665 chain_node) {
666 in_use_descs++;
667 list_del(&iter->chain_node);
668 }
669 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
670 completed_node) {
671 in_use_descs++;
672 list_del(&iter->completed_node);
673 }
674 list_for_each_entry_safe_reverse(
675 iter, _iter, &mv_chan->all_slots, slot_node) {
676 list_del(&iter->slot_node);
677 kfree(iter);
678 mv_chan->slots_allocated--;
679 }
680 mv_chan->last_used = NULL;
681
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100682 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700683 __func__, mv_chan->slots_allocated);
684 spin_unlock_bh(&mv_chan->lock);
685
686 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100687 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700688 "freeing %d in use descriptors!\n", in_use_descs);
689}
690
691/**
Linus Walleij07934482010-03-26 16:50:49 -0700692 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700693 * @chan: XOR channel handle
694 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700695 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700696 */
Linus Walleij07934482010-03-26 16:50:49 -0700697static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700698 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700699 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700700{
701 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700702 enum dma_status ret;
703
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000704 ret = dma_cookie_status(chan, cookie, txstate);
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530705 if (ret == DMA_COMPLETE) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700706 mv_xor_clean_completed_slots(mv_chan);
707 return ret;
708 }
709 mv_xor_slot_cleanup(mv_chan);
710
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000711 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700712}
713
714static void mv_dump_xor_regs(struct mv_xor_chan *chan)
715{
716 u32 val;
717
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200718 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700719 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700720
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200721 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700722 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700723
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200724 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700725 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700726
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200727 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700728 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700729
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200730 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700731 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700732
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200733 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700734 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700735}
736
737static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
738 u32 intr_cause)
739{
740 if (intr_cause & (1 << 4)) {
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100741 dev_dbg(mv_chan_to_devp(chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700742 "ignore this error\n");
743 return;
744 }
745
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100746 dev_err(mv_chan_to_devp(chan),
Joe Perches1ba151c2012-10-28 01:05:44 -0700747 "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100748 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700749
750 mv_dump_xor_regs(chan);
751 BUG();
752}
753
754static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
755{
756 struct mv_xor_chan *chan = data;
757 u32 intr_cause = mv_chan_get_intr_cause(chan);
758
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100759 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700760
761 if (mv_is_err_intr(intr_cause))
762 mv_xor_err_interrupt_handler(chan, intr_cause);
763
764 tasklet_schedule(&chan->irq_tasklet);
765
766 mv_xor_device_clear_eoc_cause(chan);
767
768 return IRQ_HANDLED;
769}
770
771static void mv_xor_issue_pending(struct dma_chan *chan)
772{
773 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
774
775 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
776 mv_chan->pending = 0;
777 mv_chan_activate(mv_chan);
778 }
779}
780
781/*
782 * Perform a transaction to verify the HW works.
783 */
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700784
Linus Torvaldsc2714332012-12-14 14:54:26 -0800785static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700786{
787 int i;
788 void *src, *dest;
789 dma_addr_t src_dma, dest_dma;
790 struct dma_chan *dma_chan;
791 dma_cookie_t cookie;
792 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300793 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700794 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700795
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300796 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700797 if (!src)
798 return -ENOMEM;
799
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300800 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700801 if (!dest) {
802 kfree(src);
803 return -ENOMEM;
804 }
805
806 /* Fill in src buffer */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300807 for (i = 0; i < PAGE_SIZE; i++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700808 ((u8 *) src)[i] = (u8)i;
809
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100810 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700811 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700812 err = -ENODEV;
813 goto out;
814 }
815
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300816 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
817 if (!unmap) {
818 err = -ENOMEM;
819 goto free_resources;
820 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700821
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300822 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
823 PAGE_SIZE, DMA_TO_DEVICE);
824 unmap->to_cnt = 1;
825 unmap->addr[0] = src_dma;
826
827 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
828 PAGE_SIZE, DMA_FROM_DEVICE);
829 unmap->from_cnt = 1;
830 unmap->addr[1] = dest_dma;
831
832 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700833
834 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300835 PAGE_SIZE, 0);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700836 cookie = mv_xor_tx_submit(tx);
837 mv_xor_issue_pending(dma_chan);
838 async_tx_ack(tx);
839 msleep(1);
840
Linus Walleij07934482010-03-26 16:50:49 -0700841 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530842 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100843 dev_err(dma_chan->device->dev,
844 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700845 err = -ENODEV;
846 goto free_resources;
847 }
848
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100849 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300850 PAGE_SIZE, DMA_FROM_DEVICE);
851 if (memcmp(src, dest, PAGE_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100852 dev_err(dma_chan->device->dev,
853 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700854 err = -ENODEV;
855 goto free_resources;
856 }
857
858free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300859 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700860 mv_xor_free_chan_resources(dma_chan);
861out:
862 kfree(src);
863 kfree(dest);
864 return err;
865}
866
867#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -0500868static int
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100869mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700870{
871 int i, src_idx;
872 struct page *dest;
873 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
874 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
875 dma_addr_t dest_dma;
876 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300877 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700878 struct dma_chan *dma_chan;
879 dma_cookie_t cookie;
880 u8 cmp_byte = 0;
881 u32 cmp_word;
882 int err = 0;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300883 int src_count = MV_XOR_NUM_SRC_TEST;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700884
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300885 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700886 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100887 if (!xor_srcs[src_idx]) {
888 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700889 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100890 return -ENOMEM;
891 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700892 }
893
894 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100895 if (!dest) {
896 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700897 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100898 return -ENOMEM;
899 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700900
901 /* Fill in src buffers */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300902 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700903 u8 *ptr = page_address(xor_srcs[src_idx]);
904 for (i = 0; i < PAGE_SIZE; i++)
905 ptr[i] = (1 << src_idx);
906 }
907
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300908 for (src_idx = 0; src_idx < src_count; src_idx++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700909 cmp_byte ^= (u8) (1 << src_idx);
910
911 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
912 (cmp_byte << 8) | cmp_byte;
913
914 memset(page_address(dest), 0, PAGE_SIZE);
915
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100916 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700917 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700918 err = -ENODEV;
919 goto out;
920 }
921
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300922 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
923 GFP_KERNEL);
924 if (!unmap) {
925 err = -ENOMEM;
926 goto free_resources;
927 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700928
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300929 /* test xor */
930 for (i = 0; i < src_count; i++) {
931 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
932 0, PAGE_SIZE, DMA_TO_DEVICE);
933 dma_srcs[i] = unmap->addr[i];
934 unmap->to_cnt++;
935 }
936
937 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
938 DMA_FROM_DEVICE);
939 dest_dma = unmap->addr[src_count];
940 unmap->from_cnt = 1;
941 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700942
943 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300944 src_count, PAGE_SIZE, 0);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700945
946 cookie = mv_xor_tx_submit(tx);
947 mv_xor_issue_pending(dma_chan);
948 async_tx_ack(tx);
949 msleep(8);
950
Linus Walleij07934482010-03-26 16:50:49 -0700951 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530952 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100953 dev_err(dma_chan->device->dev,
954 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700955 err = -ENODEV;
956 goto free_resources;
957 }
958
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100959 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700960 PAGE_SIZE, DMA_FROM_DEVICE);
961 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
962 u32 *ptr = page_address(dest);
963 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100964 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -0700965 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
966 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700967 err = -ENODEV;
968 goto free_resources;
969 }
970 }
971
972free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300973 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700974 mv_xor_free_chan_resources(dma_chan);
975out:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300976 src_idx = src_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700977 while (src_idx--)
978 __free_page(xor_srcs[src_idx]);
979 __free_page(dest);
980 return err;
981}
982
Andrew Lunn34c93c82012-11-18 11:44:56 +0100983/* This driver does not implement any of the optional DMA operations. */
984static int
985mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
986 unsigned long arg)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700987{
Andrew Lunn34c93c82012-11-18 11:44:56 +0100988 return -ENOSYS;
989}
990
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100991static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700992{
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700993 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100994 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700995
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100996 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700997
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100998 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100999 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001000
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001001 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001002 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001003 list_del(&chan->device_node);
1004 }
1005
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001006 free_irq(mv_chan->irq, mv_chan);
1007
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001008 return 0;
1009}
1010
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001011static struct mv_xor_chan *
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001012mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001013 struct platform_device *pdev,
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001014 int idx, dma_cap_mask_t cap_mask, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001015{
1016 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001017 struct mv_xor_chan *mv_chan;
1018 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001019
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001020 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +05301021 if (!mv_chan)
1022 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001023
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +01001024 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001025 mv_chan->irq = irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001026
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001027 dma_dev = &mv_chan->dmadev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001028
1029 /* allocate coherent memory for hardware descriptors
1030 * note: writecombine gives slightly better performance, but
1031 * requires that we explicitly flush the writes
1032 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001033 mv_chan->dma_desc_pool_virt =
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001034 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001035 &mv_chan->dma_desc_pool, GFP_KERNEL);
1036 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001037 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001038
1039 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001040 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001041
1042 INIT_LIST_HEAD(&dma_dev->channels);
1043
1044 /* set base routines */
1045 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1046 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001047 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001048 dma_dev->device_issue_pending = mv_xor_issue_pending;
Andrew Lunn34c93c82012-11-18 11:44:56 +01001049 dma_dev->device_control = mv_xor_control;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001050 dma_dev->dev = &pdev->dev;
1051
1052 /* set prep routines based on capability */
1053 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1054 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001055 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001056 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001057 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1058 }
1059
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001060 mv_chan->mmr_base = xordev->xor_base;
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001061 mv_chan->mmr_high_base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001062 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1063 mv_chan);
1064
1065 /* clear errors before enabling interrupts */
1066 mv_xor_device_clear_err_status(mv_chan);
1067
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001068 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1069 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001070 if (ret)
1071 goto err_free_dma;
1072
1073 mv_chan_unmask_interrupts(mv_chan);
1074
1075 mv_set_mode(mv_chan, DMA_MEMCPY);
1076
1077 spin_lock_init(&mv_chan->lock);
1078 INIT_LIST_HEAD(&mv_chan->chain);
1079 INIT_LIST_HEAD(&mv_chan->completed_slots);
1080 INIT_LIST_HEAD(&mv_chan->all_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001081 mv_chan->dmachan.device = dma_dev;
1082 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001083
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001084 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001085
1086 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001087 ret = mv_xor_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001088 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1089 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001090 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001091 }
1092
1093 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001094 ret = mv_xor_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001095 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1096 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001097 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001098 }
1099
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -07001100 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
Joe Perches1ba151c2012-10-28 01:05:44 -07001101 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001102 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1103 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001104
1105 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001106 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001107
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001108err_free_irq:
1109 free_irq(mv_chan->irq, mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001110 err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001111 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001112 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001113 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001114}
1115
1116static void
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001117mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001118 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001119{
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001120 void __iomem *base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001121 u32 win_enable = 0;
1122 int i;
1123
1124 for (i = 0; i < 8; i++) {
1125 writel(0, base + WINDOW_BASE(i));
1126 writel(0, base + WINDOW_SIZE(i));
1127 if (i < 4)
1128 writel(0, base + WINDOW_REMAP_HIGH(i));
1129 }
1130
1131 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001132 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001133
1134 writel((cs->base & 0xffff0000) |
1135 (cs->mbus_attr << 8) |
1136 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1137 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1138
1139 win_enable |= (1 << i);
1140 win_enable |= 3 << (16 + (2 * i));
1141 }
1142
1143 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1144 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001145 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1146 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001147}
1148
Linus Torvaldsc2714332012-12-14 14:54:26 -08001149static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001150{
Andrew Lunn63a93322011-12-07 21:48:07 +01001151 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001152 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001153 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001154 struct resource *res;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001155 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001156
Joe Perches1ba151c2012-10-28 01:05:44 -07001157 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001158
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001159 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1160 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001161 return -ENOMEM;
1162
1163 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1164 if (!res)
1165 return -ENODEV;
1166
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001167 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1168 resource_size(res));
1169 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001170 return -EBUSY;
1171
1172 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1173 if (!res)
1174 return -ENODEV;
1175
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001176 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1177 resource_size(res));
1178 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001179 return -EBUSY;
1180
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001181 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001182
1183 /*
1184 * (Re-)program MBUS remapping windows if we are asked to.
1185 */
Andrew Lunn63a93322011-12-07 21:48:07 +01001186 dram = mv_mbus_dram_info();
1187 if (dram)
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001188 mv_xor_conf_mbus_windows(xordev, dram);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001189
Andrew Lunnc5101822012-02-19 13:30:26 +01001190 /* Not all platforms can gate the clock, so it is not
1191 * an error if the clock does not exists.
1192 */
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001193 xordev->clk = clk_get(&pdev->dev, NULL);
1194 if (!IS_ERR(xordev->clk))
1195 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001196
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001197 if (pdev->dev.of_node) {
1198 struct device_node *np;
1199 int i = 0;
1200
1201 for_each_child_of_node(pdev->dev.of_node, np) {
Russell King0be82532013-12-12 23:59:08 +00001202 struct mv_xor_chan *chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001203 dma_cap_mask_t cap_mask;
1204 int irq;
1205
1206 dma_cap_zero(cap_mask);
1207 if (of_property_read_bool(np, "dmacap,memcpy"))
1208 dma_cap_set(DMA_MEMCPY, cap_mask);
1209 if (of_property_read_bool(np, "dmacap,xor"))
1210 dma_cap_set(DMA_XOR, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001211 if (of_property_read_bool(np, "dmacap,interrupt"))
1212 dma_cap_set(DMA_INTERRUPT, cap_mask);
1213
1214 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001215 if (!irq) {
1216 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001217 goto err_channel_add;
1218 }
1219
Russell King0be82532013-12-12 23:59:08 +00001220 chan = mv_xor_channel_add(xordev, pdev, i,
1221 cap_mask, irq);
1222 if (IS_ERR(chan)) {
1223 ret = PTR_ERR(chan);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001224 irq_dispose_mapping(irq);
1225 goto err_channel_add;
1226 }
1227
Russell King0be82532013-12-12 23:59:08 +00001228 xordev->channels[i] = chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001229 i++;
1230 }
1231 } else if (pdata && pdata->channels) {
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001232 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001233 struct mv_xor_channel_data *cd;
Russell King0be82532013-12-12 23:59:08 +00001234 struct mv_xor_chan *chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001235 int irq;
1236
1237 cd = &pdata->channels[i];
1238 if (!cd) {
1239 ret = -ENODEV;
1240 goto err_channel_add;
1241 }
1242
1243 irq = platform_get_irq(pdev, i);
1244 if (irq < 0) {
1245 ret = irq;
1246 goto err_channel_add;
1247 }
1248
Russell King0be82532013-12-12 23:59:08 +00001249 chan = mv_xor_channel_add(xordev, pdev, i,
1250 cd->cap_mask, irq);
1251 if (IS_ERR(chan)) {
1252 ret = PTR_ERR(chan);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001253 goto err_channel_add;
1254 }
Russell King0be82532013-12-12 23:59:08 +00001255
1256 xordev->channels[i] = chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001257 }
1258 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001259
1260 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001261
1262err_channel_add:
1263 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001264 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001265 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001266 if (pdev->dev.of_node)
1267 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001268 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001269
Thomas Petazzonidab92062013-01-06 11:10:44 +01001270 if (!IS_ERR(xordev->clk)) {
1271 clk_disable_unprepare(xordev->clk);
1272 clk_put(xordev->clk);
1273 }
1274
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001275 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001276}
1277
Linus Torvaldsc2714332012-12-14 14:54:26 -08001278static int mv_xor_remove(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001279{
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001280 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001281 int i;
Andrew Lunnc5101822012-02-19 13:30:26 +01001282
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001283 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001284 if (xordev->channels[i])
1285 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001286 }
Andrew Lunnc5101822012-02-19 13:30:26 +01001287
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001288 if (!IS_ERR(xordev->clk)) {
1289 clk_disable_unprepare(xordev->clk);
1290 clk_put(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001291 }
1292
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001293 return 0;
1294}
1295
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001296#ifdef CONFIG_OF
Linus Torvaldsc2714332012-12-14 14:54:26 -08001297static struct of_device_id mv_xor_dt_ids[] = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001298 { .compatible = "marvell,orion-xor", },
1299 {},
1300};
1301MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1302#endif
1303
Thomas Petazzoni61971652012-10-30 12:05:40 +01001304static struct platform_driver mv_xor_driver = {
1305 .probe = mv_xor_probe,
Linus Torvaldsc2714332012-12-14 14:54:26 -08001306 .remove = mv_xor_remove,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001307 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001308 .owner = THIS_MODULE,
1309 .name = MV_XOR_NAME,
1310 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001311 },
1312};
1313
1314
1315static int __init mv_xor_init(void)
1316{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001317 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001318}
1319module_init(mv_xor_init);
1320
1321/* it's currently unsafe to unload this module */
1322#if 0
1323static void __exit mv_xor_exit(void)
1324{
1325 platform_driver_unregister(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001326 return;
1327}
1328
1329module_exit(mv_xor_exit);
1330#endif
1331
1332MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1333MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1334MODULE_LICENSE("GPL");