blob: 38dac0d9265c324e15b0509c81574e671bc38b84 [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010028#include <linux/clk.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020029#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000030
31#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070032#include "mv_xor.h"
33
34static void mv_xor_issue_pending(struct dma_chan *chan);
35
36#define to_mv_xor_chan(chan) \
37 container_of(chan, struct mv_xor_chan, common)
38
Saeed Bisharaff7b0472008-07-08 11:58:36 -070039#define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
41
Thomas Petazzonic98c1782012-11-15 14:17:18 +010042#define mv_chan_to_devp(chan) \
43 ((chan)->device->common.dev)
44
Saeed Bisharaff7b0472008-07-08 11:58:36 -070045static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{
47 struct mv_xor_desc *hw_desc = desc->hw_desc;
48
49 hw_desc->status = (1 << 31);
50 hw_desc->phy_next_desc = 0;
51 hw_desc->desc_command = (1 << 31);
52}
53
54static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
55{
56 struct mv_xor_desc *hw_desc = desc->hw_desc;
57 return hw_desc->phy_dest_addr;
58}
59
60static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
61 int src_idx)
62{
63 struct mv_xor_desc *hw_desc = desc->hw_desc;
64 return hw_desc->phy_src_addr[src_idx];
65}
66
67
68static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
69 u32 byte_count)
70{
71 struct mv_xor_desc *hw_desc = desc->hw_desc;
72 hw_desc->byte_count = byte_count;
73}
74
75static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
76 u32 next_desc_addr)
77{
78 struct mv_xor_desc *hw_desc = desc->hw_desc;
79 BUG_ON(hw_desc->phy_next_desc);
80 hw_desc->phy_next_desc = next_desc_addr;
81}
82
83static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
84{
85 struct mv_xor_desc *hw_desc = desc->hw_desc;
86 hw_desc->phy_next_desc = 0;
87}
88
89static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
90{
91 desc->value = val;
92}
93
94static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
95 dma_addr_t addr)
96{
97 struct mv_xor_desc *hw_desc = desc->hw_desc;
98 hw_desc->phy_dest_addr = addr;
99}
100
101static int mv_chan_memset_slot_count(size_t len)
102{
103 return 1;
104}
105
106#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
107
108static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
109 int index, dma_addr_t addr)
110{
111 struct mv_xor_desc *hw_desc = desc->hw_desc;
112 hw_desc->phy_src_addr[index] = addr;
113 if (desc->type == DMA_XOR)
114 hw_desc->desc_command |= (1 << index);
115}
116
117static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
118{
119 return __raw_readl(XOR_CURR_DESC(chan));
120}
121
122static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
123 u32 next_desc_addr)
124{
125 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
126}
127
128static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
129{
130 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
131}
132
133static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
134{
135 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
136}
137
138static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
139{
140 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
141 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
142}
143
144static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
145{
146 u32 val = __raw_readl(XOR_INTR_MASK(chan));
147 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
148 __raw_writel(val, XOR_INTR_MASK(chan));
149}
150
151static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
152{
153 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
154 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
155 return intr_cause;
156}
157
158static int mv_is_err_intr(u32 intr_cause)
159{
160 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
161 return 1;
162
163 return 0;
164}
165
166static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167{
Simon Guinot86363682010-09-17 23:33:51 +0200168 u32 val = ~(1 << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100169 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700170 __raw_writel(val, XOR_INTR_CAUSE(chan));
171}
172
173static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
174{
175 u32 val = 0xFFFF0000 >> (chan->idx * 16);
176 __raw_writel(val, XOR_INTR_CAUSE(chan));
177}
178
179static int mv_can_chain(struct mv_xor_desc_slot *desc)
180{
181 struct mv_xor_desc_slot *chain_old_tail = list_entry(
182 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
183
184 if (chain_old_tail->type != desc->type)
185 return 0;
186 if (desc->type == DMA_MEMSET)
187 return 0;
188
189 return 1;
190}
191
192static void mv_set_mode(struct mv_xor_chan *chan,
193 enum dma_transaction_type type)
194{
195 u32 op_mode;
196 u32 config = __raw_readl(XOR_CONFIG(chan));
197
198 switch (type) {
199 case DMA_XOR:
200 op_mode = XOR_OPERATION_MODE_XOR;
201 break;
202 case DMA_MEMCPY:
203 op_mode = XOR_OPERATION_MODE_MEMCPY;
204 break;
205 case DMA_MEMSET:
206 op_mode = XOR_OPERATION_MODE_MEMSET;
207 break;
208 default:
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100209 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100210 "error: unsupported operation %d.\n",
211 type);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700212 BUG();
213 return;
214 }
215
216 config &= ~0x7;
217 config |= op_mode;
218 __raw_writel(config, XOR_CONFIG(chan));
219 chan->current_type = type;
220}
221
222static void mv_chan_activate(struct mv_xor_chan *chan)
223{
224 u32 activation;
225
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100226 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700227 activation = __raw_readl(XOR_ACTIVATION(chan));
228 activation |= 0x1;
229 __raw_writel(activation, XOR_ACTIVATION(chan));
230}
231
232static char mv_chan_is_busy(struct mv_xor_chan *chan)
233{
234 u32 state = __raw_readl(XOR_ACTIVATION(chan));
235
236 state = (state >> 4) & 0x3;
237
238 return (state == 1) ? 1 : 0;
239}
240
241static int mv_chan_xor_slot_count(size_t len, int src_cnt)
242{
243 return 1;
244}
245
246/**
247 * mv_xor_free_slots - flags descriptor slots for reuse
248 * @slot: Slot to free
249 * Caller must hold &mv_chan->lock while calling this function
250 */
251static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 struct mv_xor_desc_slot *slot)
253{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100254 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700255 __func__, __LINE__, slot);
256
257 slot->slots_per_op = 0;
258
259}
260
261/*
262 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
263 * sw_desc
264 * Caller must hold &mv_chan->lock while calling this function
265 */
266static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 struct mv_xor_desc_slot *sw_desc)
268{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100269 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700270 __func__, __LINE__, sw_desc);
271 if (sw_desc->type != mv_chan->current_type)
272 mv_set_mode(mv_chan, sw_desc->type);
273
274 if (sw_desc->type == DMA_MEMSET) {
275 /* for memset requests we need to program the engine, no
276 * descriptors used.
277 */
278 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
279 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
280 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
281 mv_chan_set_value(mv_chan, sw_desc->value);
282 } else {
283 /* set the hardware chain */
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 }
286 mv_chan->pending += sw_desc->slot_cnt;
287 mv_xor_issue_pending(&mv_chan->common);
288}
289
290static dma_cookie_t
291mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
292 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
293{
294 BUG_ON(desc->async_tx.cookie < 0);
295
296 if (desc->async_tx.cookie > 0) {
297 cookie = desc->async_tx.cookie;
298
299 /* call the callback (must not sleep or submit new
300 * operations to this channel)
301 */
302 if (desc->async_tx.callback)
303 desc->async_tx.callback(
304 desc->async_tx.callback_param);
305
306 /* unmap dma addresses
307 * (unmap_single vs unmap_page?)
308 */
309 if (desc->group_head && desc->unmap_len) {
310 struct mv_xor_desc_slot *unmap = desc->group_head;
311 struct device *dev =
312 &mv_chan->device->pdev->dev;
313 u32 len = unmap->unmap_len;
Dan Williamse1d181e2008-07-04 00:13:40 -0700314 enum dma_ctrl_flags flags = desc->async_tx.flags;
315 u32 src_cnt;
316 dma_addr_t addr;
Dan Williamsa06d5682008-12-08 13:46:00 -0700317 dma_addr_t dest;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700318
Dan Williamsa06d5682008-12-08 13:46:00 -0700319 src_cnt = unmap->unmap_src_cnt;
320 dest = mv_desc_get_dest_addr(unmap);
Dan Williamse1d181e2008-07-04 00:13:40 -0700321 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
Dan Williamsa06d5682008-12-08 13:46:00 -0700322 enum dma_data_direction dir;
323
324 if (src_cnt > 1) /* is xor ? */
325 dir = DMA_BIDIRECTIONAL;
326 else
327 dir = DMA_FROM_DEVICE;
328 dma_unmap_page(dev, dest, len, dir);
Dan Williamse1d181e2008-07-04 00:13:40 -0700329 }
330
331 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
Dan Williamse1d181e2008-07-04 00:13:40 -0700332 while (src_cnt--) {
333 addr = mv_desc_get_src_addr(unmap,
334 src_cnt);
Dan Williamsa06d5682008-12-08 13:46:00 -0700335 if (addr == dest)
336 continue;
Dan Williamse1d181e2008-07-04 00:13:40 -0700337 dma_unmap_page(dev, addr, len,
338 DMA_TO_DEVICE);
339 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700340 }
341 desc->group_head = NULL;
342 }
343 }
344
345 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700346 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700347
348 return cookie;
349}
350
351static int
352mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
353{
354 struct mv_xor_desc_slot *iter, *_iter;
355
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100356 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700357 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
358 completed_node) {
359
360 if (async_tx_test_ack(&iter->async_tx)) {
361 list_del(&iter->completed_node);
362 mv_xor_free_slots(mv_chan, iter);
363 }
364 }
365 return 0;
366}
367
368static int
369mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
370 struct mv_xor_chan *mv_chan)
371{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100372 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700373 __func__, __LINE__, desc, desc->async_tx.flags);
374 list_del(&desc->chain_node);
375 /* the client is allowed to attach dependent operations
376 * until 'ack' is set
377 */
378 if (!async_tx_test_ack(&desc->async_tx)) {
379 /* move this slot to the completed_slots */
380 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
381 return 0;
382 }
383
384 mv_xor_free_slots(mv_chan, desc);
385 return 0;
386}
387
388static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
389{
390 struct mv_xor_desc_slot *iter, *_iter;
391 dma_cookie_t cookie = 0;
392 int busy = mv_chan_is_busy(mv_chan);
393 u32 current_desc = mv_chan_get_current_desc(mv_chan);
394 int seen_current = 0;
395
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100396 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
397 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700398 mv_xor_clean_completed_slots(mv_chan);
399
400 /* free completed slots from the chain starting with
401 * the oldest descriptor
402 */
403
404 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
405 chain_node) {
406 prefetch(_iter);
407 prefetch(&_iter->async_tx);
408
409 /* do not advance past the current descriptor loaded into the
410 * hardware channel, subsequent descriptors are either in
411 * process or have not been submitted
412 */
413 if (seen_current)
414 break;
415
416 /* stop the search if we reach the current descriptor and the
417 * channel is busy
418 */
419 if (iter->async_tx.phys == current_desc) {
420 seen_current = 1;
421 if (busy)
422 break;
423 }
424
425 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
426
427 if (mv_xor_clean_slot(iter, mv_chan))
428 break;
429 }
430
431 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
432 struct mv_xor_desc_slot *chain_head;
433 chain_head = list_entry(mv_chan->chain.next,
434 struct mv_xor_desc_slot,
435 chain_node);
436
437 mv_xor_start_new_chain(mv_chan, chain_head);
438 }
439
440 if (cookie > 0)
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000441 mv_chan->common.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700442}
443
444static void
445mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
446{
447 spin_lock_bh(&mv_chan->lock);
448 __mv_xor_slot_cleanup(mv_chan);
449 spin_unlock_bh(&mv_chan->lock);
450}
451
452static void mv_xor_tasklet(unsigned long data)
453{
454 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Saeed Bishara8333f652010-12-21 16:53:39 +0200455 mv_xor_slot_cleanup(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700456}
457
458static struct mv_xor_desc_slot *
459mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
460 int slots_per_op)
461{
462 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
463 LIST_HEAD(chain);
464 int slots_found, retry = 0;
465
466 /* start search from the last allocated descrtiptor
467 * if a contiguous allocation can not be found start searching
468 * from the beginning of the list
469 */
470retry:
471 slots_found = 0;
472 if (retry == 0)
473 iter = mv_chan->last_used;
474 else
475 iter = list_entry(&mv_chan->all_slots,
476 struct mv_xor_desc_slot,
477 slot_node);
478
479 list_for_each_entry_safe_continue(
480 iter, _iter, &mv_chan->all_slots, slot_node) {
481 prefetch(_iter);
482 prefetch(&_iter->async_tx);
483 if (iter->slots_per_op) {
484 /* give up after finding the first busy slot
485 * on the second pass through the list
486 */
487 if (retry)
488 break;
489
490 slots_found = 0;
491 continue;
492 }
493
494 /* start the allocation if the slot is correctly aligned */
495 if (!slots_found++)
496 alloc_start = iter;
497
498 if (slots_found == num_slots) {
499 struct mv_xor_desc_slot *alloc_tail = NULL;
500 struct mv_xor_desc_slot *last_used = NULL;
501 iter = alloc_start;
502 while (num_slots) {
503 int i;
504
505 /* pre-ack all but the last descriptor */
506 async_tx_ack(&iter->async_tx);
507
508 list_add_tail(&iter->chain_node, &chain);
509 alloc_tail = iter;
510 iter->async_tx.cookie = 0;
511 iter->slot_cnt = num_slots;
512 iter->xor_check_result = NULL;
513 for (i = 0; i < slots_per_op; i++) {
514 iter->slots_per_op = slots_per_op - i;
515 last_used = iter;
516 iter = list_entry(iter->slot_node.next,
517 struct mv_xor_desc_slot,
518 slot_node);
519 }
520 num_slots -= slots_per_op;
521 }
522 alloc_tail->group_head = alloc_start;
523 alloc_tail->async_tx.cookie = -EBUSY;
Dan Williams64203b62009-09-08 17:53:03 -0700524 list_splice(&chain, &alloc_tail->tx_list);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700525 mv_chan->last_used = last_used;
526 mv_desc_clear_next_desc(alloc_start);
527 mv_desc_clear_next_desc(alloc_tail);
528 return alloc_tail;
529 }
530 }
531 if (!retry++)
532 goto retry;
533
534 /* try to free some slots if the allocation fails */
535 tasklet_schedule(&mv_chan->irq_tasklet);
536
537 return NULL;
538}
539
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700540/************************ DMA engine API functions ****************************/
541static dma_cookie_t
542mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
543{
544 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
545 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
546 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
547 dma_cookie_t cookie;
548 int new_hw_chain = 1;
549
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100550 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700551 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx);
553
554 grp_start = sw_desc->group_head;
555
556 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000557 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700558
559 if (list_empty(&mv_chan->chain))
Dan Williams64203b62009-09-08 17:53:03 -0700560 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700561 else {
562 new_hw_chain = 0;
563
564 old_chain_tail = list_entry(mv_chan->chain.prev,
565 struct mv_xor_desc_slot,
566 chain_node);
Dan Williams64203b62009-09-08 17:53:03 -0700567 list_splice_init(&grp_start->tx_list,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700568 &old_chain_tail->chain_node);
569
570 if (!mv_can_chain(grp_start))
571 goto submit_done;
572
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100573 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700574 old_chain_tail->async_tx.phys);
575
576 /* fix up the hardware chain */
577 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
578
579 /* if the channel is not busy */
580 if (!mv_chan_is_busy(mv_chan)) {
581 u32 current_desc = mv_chan_get_current_desc(mv_chan);
582 /*
583 * and the curren desc is the end of the chain before
584 * the append, then we need to start the channel
585 */
586 if (current_desc == old_chain_tail->async_tx.phys)
587 new_hw_chain = 1;
588 }
589 }
590
591 if (new_hw_chain)
592 mv_xor_start_new_chain(mv_chan, grp_start);
593
594submit_done:
595 spin_unlock_bh(&mv_chan->lock);
596
597 return cookie;
598}
599
600/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700601static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700602{
603 char *hw_desc;
604 int idx;
605 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
606 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzoni09f2b782012-10-29 16:27:34 +0100607 int num_descs_in_pool = mv_chan->device->pool_size/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700608
609 /* Allocate descriptor slots */
610 idx = mv_chan->slots_allocated;
611 while (idx < num_descs_in_pool) {
612 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
613 if (!slot) {
614 printk(KERN_INFO "MV XOR Channel only initialized"
615 " %d descriptor slots", idx);
616 break;
617 }
618 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
619 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
620
621 dma_async_tx_descriptor_init(&slot->async_tx, chan);
622 slot->async_tx.tx_submit = mv_xor_tx_submit;
623 INIT_LIST_HEAD(&slot->chain_node);
624 INIT_LIST_HEAD(&slot->slot_node);
Dan Williams64203b62009-09-08 17:53:03 -0700625 INIT_LIST_HEAD(&slot->tx_list);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700626 hw_desc = (char *) mv_chan->device->dma_desc_pool;
627 slot->async_tx.phys =
628 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
629 slot->idx = idx++;
630
631 spin_lock_bh(&mv_chan->lock);
632 mv_chan->slots_allocated = idx;
633 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
634 spin_unlock_bh(&mv_chan->lock);
635 }
636
637 if (mv_chan->slots_allocated && !mv_chan->last_used)
638 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
639 struct mv_xor_desc_slot,
640 slot_node);
641
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100642 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700643 "allocated %d descriptor slots last_used: %p\n",
644 mv_chan->slots_allocated, mv_chan->last_used);
645
646 return mv_chan->slots_allocated ? : -ENOMEM;
647}
648
649static struct dma_async_tx_descriptor *
650mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
651 size_t len, unsigned long flags)
652{
653 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
654 struct mv_xor_desc_slot *sw_desc, *grp_start;
655 int slot_cnt;
656
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100657 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700658 "%s dest: %x src %x len: %u flags: %ld\n",
659 __func__, dest, src, len, flags);
660 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
661 return NULL;
662
Coly Li7912d302011-03-27 01:26:53 +0800663 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700664
665 spin_lock_bh(&mv_chan->lock);
666 slot_cnt = mv_chan_memcpy_slot_count(len);
667 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
668 if (sw_desc) {
669 sw_desc->type = DMA_MEMCPY;
670 sw_desc->async_tx.flags = flags;
671 grp_start = sw_desc->group_head;
672 mv_desc_init(grp_start, flags);
673 mv_desc_set_byte_count(grp_start, len);
674 mv_desc_set_dest_addr(sw_desc->group_head, dest);
675 mv_desc_set_src_addr(grp_start, 0, src);
676 sw_desc->unmap_src_cnt = 1;
677 sw_desc->unmap_len = len;
678 }
679 spin_unlock_bh(&mv_chan->lock);
680
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100681 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700682 "%s sw_desc %p async_tx %p\n",
683 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
684
685 return sw_desc ? &sw_desc->async_tx : NULL;
686}
687
688static struct dma_async_tx_descriptor *
689mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
690 size_t len, unsigned long flags)
691{
692 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
693 struct mv_xor_desc_slot *sw_desc, *grp_start;
694 int slot_cnt;
695
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100696 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700697 "%s dest: %x len: %u flags: %ld\n",
698 __func__, dest, len, flags);
699 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
700 return NULL;
701
Coly Li7912d302011-03-27 01:26:53 +0800702 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700703
704 spin_lock_bh(&mv_chan->lock);
705 slot_cnt = mv_chan_memset_slot_count(len);
706 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
707 if (sw_desc) {
708 sw_desc->type = DMA_MEMSET;
709 sw_desc->async_tx.flags = flags;
710 grp_start = sw_desc->group_head;
711 mv_desc_init(grp_start, flags);
712 mv_desc_set_byte_count(grp_start, len);
713 mv_desc_set_dest_addr(sw_desc->group_head, dest);
714 mv_desc_set_block_fill_val(grp_start, value);
715 sw_desc->unmap_src_cnt = 1;
716 sw_desc->unmap_len = len;
717 }
718 spin_unlock_bh(&mv_chan->lock);
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100719 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700720 "%s sw_desc %p async_tx %p \n",
721 __func__, sw_desc, &sw_desc->async_tx);
722 return sw_desc ? &sw_desc->async_tx : NULL;
723}
724
725static struct dma_async_tx_descriptor *
726mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
727 unsigned int src_cnt, size_t len, unsigned long flags)
728{
729 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
730 struct mv_xor_desc_slot *sw_desc, *grp_start;
731 int slot_cnt;
732
733 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
734 return NULL;
735
Coly Li7912d302011-03-27 01:26:53 +0800736 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700737
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100738 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700739 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
740 __func__, src_cnt, len, dest, flags);
741
742 spin_lock_bh(&mv_chan->lock);
743 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
744 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
745 if (sw_desc) {
746 sw_desc->type = DMA_XOR;
747 sw_desc->async_tx.flags = flags;
748 grp_start = sw_desc->group_head;
749 mv_desc_init(grp_start, flags);
750 /* the byte count field is the same as in memcpy desc*/
751 mv_desc_set_byte_count(grp_start, len);
752 mv_desc_set_dest_addr(sw_desc->group_head, dest);
753 sw_desc->unmap_src_cnt = src_cnt;
754 sw_desc->unmap_len = len;
755 while (src_cnt--)
756 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
757 }
758 spin_unlock_bh(&mv_chan->lock);
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100759 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700760 "%s sw_desc %p async_tx %p \n",
761 __func__, sw_desc, &sw_desc->async_tx);
762 return sw_desc ? &sw_desc->async_tx : NULL;
763}
764
765static void mv_xor_free_chan_resources(struct dma_chan *chan)
766{
767 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
768 struct mv_xor_desc_slot *iter, *_iter;
769 int in_use_descs = 0;
770
771 mv_xor_slot_cleanup(mv_chan);
772
773 spin_lock_bh(&mv_chan->lock);
774 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
775 chain_node) {
776 in_use_descs++;
777 list_del(&iter->chain_node);
778 }
779 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
780 completed_node) {
781 in_use_descs++;
782 list_del(&iter->completed_node);
783 }
784 list_for_each_entry_safe_reverse(
785 iter, _iter, &mv_chan->all_slots, slot_node) {
786 list_del(&iter->slot_node);
787 kfree(iter);
788 mv_chan->slots_allocated--;
789 }
790 mv_chan->last_used = NULL;
791
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100792 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700793 __func__, mv_chan->slots_allocated);
794 spin_unlock_bh(&mv_chan->lock);
795
796 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100797 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700798 "freeing %d in use descriptors!\n", in_use_descs);
799}
800
801/**
Linus Walleij07934482010-03-26 16:50:49 -0700802 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700803 * @chan: XOR channel handle
804 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700805 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700806 */
Linus Walleij07934482010-03-26 16:50:49 -0700807static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700808 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700809 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700810{
811 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700812 enum dma_status ret;
813
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000814 ret = dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700815 if (ret == DMA_SUCCESS) {
816 mv_xor_clean_completed_slots(mv_chan);
817 return ret;
818 }
819 mv_xor_slot_cleanup(mv_chan);
820
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000821 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700822}
823
824static void mv_dump_xor_regs(struct mv_xor_chan *chan)
825{
826 u32 val;
827
828 val = __raw_readl(XOR_CONFIG(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100829 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100830 "config 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700831
832 val = __raw_readl(XOR_ACTIVATION(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100833 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100834 "activation 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700835
836 val = __raw_readl(XOR_INTR_CAUSE(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100837 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100838 "intr cause 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700839
840 val = __raw_readl(XOR_INTR_MASK(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100841 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100842 "intr mask 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700843
844 val = __raw_readl(XOR_ERROR_CAUSE(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100845 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100846 "error cause 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700847
848 val = __raw_readl(XOR_ERROR_ADDR(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100849 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100850 "error addr 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700851}
852
853static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
854 u32 intr_cause)
855{
856 if (intr_cause & (1 << 4)) {
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100857 dev_dbg(mv_chan_to_devp(chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700858 "ignore this error\n");
859 return;
860 }
861
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100862 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100863 "error on chan %d. intr cause 0x%08x.\n",
864 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700865
866 mv_dump_xor_regs(chan);
867 BUG();
868}
869
870static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
871{
872 struct mv_xor_chan *chan = data;
873 u32 intr_cause = mv_chan_get_intr_cause(chan);
874
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100875 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700876
877 if (mv_is_err_intr(intr_cause))
878 mv_xor_err_interrupt_handler(chan, intr_cause);
879
880 tasklet_schedule(&chan->irq_tasklet);
881
882 mv_xor_device_clear_eoc_cause(chan);
883
884 return IRQ_HANDLED;
885}
886
887static void mv_xor_issue_pending(struct dma_chan *chan)
888{
889 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
890
891 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
892 mv_chan->pending = 0;
893 mv_chan_activate(mv_chan);
894 }
895}
896
897/*
898 * Perform a transaction to verify the HW works.
899 */
900#define MV_XOR_TEST_SIZE 2000
901
902static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
903{
904 int i;
905 void *src, *dest;
906 dma_addr_t src_dma, dest_dma;
907 struct dma_chan *dma_chan;
908 dma_cookie_t cookie;
909 struct dma_async_tx_descriptor *tx;
910 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700911
912 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
913 if (!src)
914 return -ENOMEM;
915
916 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
917 if (!dest) {
918 kfree(src);
919 return -ENOMEM;
920 }
921
922 /* Fill in src buffer */
923 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
924 ((u8 *) src)[i] = (u8)i;
925
926 /* Start copy, using first DMA channel */
927 dma_chan = container_of(device->common.channels.next,
928 struct dma_chan,
929 device_node);
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700930 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700931 err = -ENODEV;
932 goto out;
933 }
934
935 dest_dma = dma_map_single(dma_chan->device->dev, dest,
936 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
937
938 src_dma = dma_map_single(dma_chan->device->dev, src,
939 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
940
941 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
942 MV_XOR_TEST_SIZE, 0);
943 cookie = mv_xor_tx_submit(tx);
944 mv_xor_issue_pending(dma_chan);
945 async_tx_ack(tx);
946 msleep(1);
947
Linus Walleij07934482010-03-26 16:50:49 -0700948 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700949 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100950 dev_err(dma_chan->device->dev,
951 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700952 err = -ENODEV;
953 goto free_resources;
954 }
955
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100956 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700957 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
958 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100959 dev_err(dma_chan->device->dev,
960 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700961 err = -ENODEV;
962 goto free_resources;
963 }
964
965free_resources:
966 mv_xor_free_chan_resources(dma_chan);
967out:
968 kfree(src);
969 kfree(dest);
970 return err;
971}
972
973#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
974static int __devinit
975mv_xor_xor_self_test(struct mv_xor_device *device)
976{
977 int i, src_idx;
978 struct page *dest;
979 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
980 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
981 dma_addr_t dest_dma;
982 struct dma_async_tx_descriptor *tx;
983 struct dma_chan *dma_chan;
984 dma_cookie_t cookie;
985 u8 cmp_byte = 0;
986 u32 cmp_word;
987 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700988
989 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
990 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100991 if (!xor_srcs[src_idx]) {
992 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700993 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100994 return -ENOMEM;
995 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700996 }
997
998 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100999 if (!dest) {
1000 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001001 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +01001002 return -ENOMEM;
1003 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001004
1005 /* Fill in src buffers */
1006 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1007 u8 *ptr = page_address(xor_srcs[src_idx]);
1008 for (i = 0; i < PAGE_SIZE; i++)
1009 ptr[i] = (1 << src_idx);
1010 }
1011
1012 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1013 cmp_byte ^= (u8) (1 << src_idx);
1014
1015 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1016 (cmp_byte << 8) | cmp_byte;
1017
1018 memset(page_address(dest), 0, PAGE_SIZE);
1019
1020 dma_chan = container_of(device->common.channels.next,
1021 struct dma_chan,
1022 device_node);
Dan Williamsaa1e6f12009-01-06 11:38:17 -07001023 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001024 err = -ENODEV;
1025 goto out;
1026 }
1027
1028 /* test xor */
1029 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1030 DMA_FROM_DEVICE);
1031
1032 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1033 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1034 0, PAGE_SIZE, DMA_TO_DEVICE);
1035
1036 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1037 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1038
1039 cookie = mv_xor_tx_submit(tx);
1040 mv_xor_issue_pending(dma_chan);
1041 async_tx_ack(tx);
1042 msleep(8);
1043
Linus Walleij07934482010-03-26 16:50:49 -07001044 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001045 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001046 dev_err(dma_chan->device->dev,
1047 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001048 err = -ENODEV;
1049 goto free_resources;
1050 }
1051
Thomas Petazzonic35064c2012-11-15 13:01:59 +01001052 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001053 PAGE_SIZE, DMA_FROM_DEVICE);
1054 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1055 u32 *ptr = page_address(dest);
1056 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001057 dev_err(dma_chan->device->dev,
1058 "Self-test xor failed compare, disabling."
1059 " index %d, data %x, expected %x\n", i,
1060 ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001061 err = -ENODEV;
1062 goto free_resources;
1063 }
1064 }
1065
1066free_resources:
1067 mv_xor_free_chan_resources(dma_chan);
1068out:
1069 src_idx = MV_XOR_NUM_SRC_TEST;
1070 while (src_idx--)
1071 __free_page(xor_srcs[src_idx]);
1072 __free_page(dest);
1073 return err;
1074}
1075
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001076static int mv_xor_channel_remove(struct mv_xor_device *device)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001077{
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001078 struct dma_chan *chan, *_chan;
1079 struct mv_xor_chan *mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001080
1081 dma_async_device_unregister(&device->common);
1082
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001083 dma_free_coherent(&device->pdev->dev, device->pool_size,
1084 device->dma_desc_pool_virt, device->dma_desc_pool);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001085
1086 list_for_each_entry_safe(chan, _chan, &device->common.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001087 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001088 mv_chan = to_mv_xor_chan(chan);
1089 list_del(&chan->device_node);
1090 }
1091
1092 return 0;
1093}
1094
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001095static struct mv_xor_device *
Thomas Petazzoni61971652012-10-30 12:05:40 +01001096mv_xor_channel_add(struct mv_xor_private *msp,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001097 struct platform_device *pdev,
1098 int hw_id, dma_cap_mask_t cap_mask,
1099 size_t pool_size, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001100{
1101 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001102 struct mv_xor_device *adev;
1103 struct mv_xor_chan *mv_chan;
1104 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001105
1106 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1107 if (!adev)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001108 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001109
1110 dma_dev = &adev->common;
1111
1112 /* allocate coherent memory for hardware descriptors
1113 * note: writecombine gives slightly better performance, but
1114 * requires that we explicitly flush the writes
1115 */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001116 adev->pool_size = pool_size;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001117 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
Thomas Petazzoni09f2b782012-10-29 16:27:34 +01001118 adev->pool_size,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001119 &adev->dma_desc_pool,
1120 GFP_KERNEL);
1121 if (!adev->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001122 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001123
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001124 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001125 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001126 adev->pdev = pdev;
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001127 adev->shared = msp;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001128
1129 INIT_LIST_HEAD(&dma_dev->channels);
1130
1131 /* set base routines */
1132 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1133 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001134 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001135 dma_dev->device_issue_pending = mv_xor_issue_pending;
1136 dma_dev->dev = &pdev->dev;
1137
1138 /* set prep routines based on capability */
1139 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1140 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1141 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1142 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1143 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001144 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001145 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1146 }
1147
1148 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1149 if (!mv_chan) {
1150 ret = -ENOMEM;
1151 goto err_free_dma;
1152 }
1153 mv_chan->device = adev;
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001154 mv_chan->idx = hw_id;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001155 mv_chan->mmr_base = adev->shared->xor_base;
1156
1157 if (!mv_chan->mmr_base) {
1158 ret = -ENOMEM;
1159 goto err_free_dma;
1160 }
1161 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1162 mv_chan);
1163
1164 /* clear errors before enabling interrupts */
1165 mv_xor_device_clear_err_status(mv_chan);
1166
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001167 ret = devm_request_irq(&pdev->dev, irq,
1168 mv_xor_interrupt_handler,
1169 0, dev_name(&pdev->dev), mv_chan);
1170 if (ret)
1171 goto err_free_dma;
1172
1173 mv_chan_unmask_interrupts(mv_chan);
1174
1175 mv_set_mode(mv_chan, DMA_MEMCPY);
1176
1177 spin_lock_init(&mv_chan->lock);
1178 INIT_LIST_HEAD(&mv_chan->chain);
1179 INIT_LIST_HEAD(&mv_chan->completed_slots);
1180 INIT_LIST_HEAD(&mv_chan->all_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001181 mv_chan->common.device = dma_dev;
Russell King - ARM Linux8ac69542012-03-06 22:36:27 +00001182 dma_cookie_init(&mv_chan->common);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001183
1184 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1185
1186 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1187 ret = mv_xor_memcpy_self_test(adev);
1188 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1189 if (ret)
1190 goto err_free_dma;
1191 }
1192
1193 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1194 ret = mv_xor_xor_self_test(adev);
1195 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1196 if (ret)
1197 goto err_free_dma;
1198 }
1199
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001200 dev_info(&pdev->dev, "Marvell XOR: "
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001201 "( %s%s%s%s)\n",
1202 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1203 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1204 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1205 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1206
1207 dma_async_device_register(dma_dev);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001208 return adev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001209
1210 err_free_dma:
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001211 dma_free_coherent(&adev->pdev->dev, pool_size,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001212 adev->dma_desc_pool_virt, adev->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001213 return ERR_PTR(ret);
1214}
1215
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001216static void
Thomas Petazzoni61971652012-10-30 12:05:40 +01001217mv_xor_conf_mbus_windows(struct mv_xor_private *msp,
Andrew Lunn63a93322011-12-07 21:48:07 +01001218 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001219{
1220 void __iomem *base = msp->xor_base;
1221 u32 win_enable = 0;
1222 int i;
1223
1224 for (i = 0; i < 8; i++) {
1225 writel(0, base + WINDOW_BASE(i));
1226 writel(0, base + WINDOW_SIZE(i));
1227 if (i < 4)
1228 writel(0, base + WINDOW_REMAP_HIGH(i));
1229 }
1230
1231 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001232 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001233
1234 writel((cs->base & 0xffff0000) |
1235 (cs->mbus_attr << 8) |
1236 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1237 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1238
1239 win_enable |= (1 << i);
1240 win_enable |= 3 << (16 + (2 * i));
1241 }
1242
1243 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1244 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1245}
1246
Thomas Petazzoni61971652012-10-30 12:05:40 +01001247static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001248{
Andrew Lunn63a93322011-12-07 21:48:07 +01001249 const struct mbus_dram_target_info *dram;
Thomas Petazzoni61971652012-10-30 12:05:40 +01001250 struct mv_xor_private *msp;
Thomas Petazzoni7dde4532012-10-30 11:58:14 +01001251 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001252 struct resource *res;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001253 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001254
Thomas Petazzoni61971652012-10-30 12:05:40 +01001255 dev_notice(&pdev->dev, "Marvell XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001256
1257 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1258 if (!msp)
1259 return -ENOMEM;
1260
1261 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1262 if (!res)
1263 return -ENODEV;
1264
1265 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
H Hartley Sweeten4de1ba12011-06-06 13:49:00 -07001266 resource_size(res));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001267 if (!msp->xor_base)
1268 return -EBUSY;
1269
1270 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1271 if (!res)
1272 return -ENODEV;
1273
1274 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
H Hartley Sweeten4de1ba12011-06-06 13:49:00 -07001275 resource_size(res));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001276 if (!msp->xor_high_base)
1277 return -EBUSY;
1278
1279 platform_set_drvdata(pdev, msp);
1280
1281 /*
1282 * (Re-)program MBUS remapping windows if we are asked to.
1283 */
Andrew Lunn63a93322011-12-07 21:48:07 +01001284 dram = mv_mbus_dram_info();
1285 if (dram)
1286 mv_xor_conf_mbus_windows(msp, dram);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001287
Andrew Lunnc5101822012-02-19 13:30:26 +01001288 /* Not all platforms can gate the clock, so it is not
1289 * an error if the clock does not exists.
1290 */
1291 msp->clk = clk_get(&pdev->dev, NULL);
1292 if (!IS_ERR(msp->clk))
1293 clk_prepare_enable(msp->clk);
1294
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001295 if (pdata && pdata->channels) {
1296 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001297 struct mv_xor_channel_data *cd;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001298 int irq;
1299
1300 cd = &pdata->channels[i];
1301 if (!cd) {
1302 ret = -ENODEV;
1303 goto err_channel_add;
1304 }
1305
1306 irq = platform_get_irq(pdev, i);
1307 if (irq < 0) {
1308 ret = irq;
1309 goto err_channel_add;
1310 }
1311
1312 msp->channels[i] =
1313 mv_xor_channel_add(msp, pdev, cd->hw_id,
1314 cd->cap_mask,
1315 cd->pool_size, irq);
1316 if (IS_ERR(msp->channels[i])) {
1317 ret = PTR_ERR(msp->channels[i]);
1318 goto err_channel_add;
1319 }
1320 }
1321 }
1322
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001323 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001324
1325err_channel_add:
1326 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1327 if (msp->channels[i])
1328 mv_xor_channel_remove(msp->channels[i]);
1329
1330 clk_disable_unprepare(msp->clk);
1331 clk_put(msp->clk);
1332 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001333}
1334
Thomas Petazzoni61971652012-10-30 12:05:40 +01001335static int mv_xor_remove(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001336{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001337 struct mv_xor_private *msp = platform_get_drvdata(pdev);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001338 int i;
1339
1340 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1341 if (msp->channels[i])
1342 mv_xor_channel_remove(msp->channels[i]);
1343 }
Andrew Lunnc5101822012-02-19 13:30:26 +01001344
1345 if (!IS_ERR(msp->clk)) {
1346 clk_disable_unprepare(msp->clk);
1347 clk_put(msp->clk);
1348 }
1349
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001350 return 0;
1351}
1352
Thomas Petazzoni61971652012-10-30 12:05:40 +01001353static struct platform_driver mv_xor_driver = {
1354 .probe = mv_xor_probe,
1355 .remove = mv_xor_remove,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001356 .driver = {
1357 .owner = THIS_MODULE,
Thomas Petazzoni0dddee72012-10-30 11:59:42 +01001358 .name = MV_XOR_NAME,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001359 },
1360};
1361
1362
1363static int __init mv_xor_init(void)
1364{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001365 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001366}
1367module_init(mv_xor_init);
1368
1369/* it's currently unsafe to unload this module */
1370#if 0
1371static void __exit mv_xor_exit(void)
1372{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001373 platform_driver_unregister(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001374 return;
1375}
1376
1377module_exit(mv_xor_exit);
1378#endif
1379
1380MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1381MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1382MODULE_LICENSE("GPL");