blob: aec771917d54c2c5b86d7a8de9078c801e39cd2d [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010028#include <linux/clk.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020029#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000030
31#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070032#include "mv_xor.h"
33
34static void mv_xor_issue_pending(struct dma_chan *chan);
35
36#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010037 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070038
Saeed Bisharaff7b0472008-07-08 11:58:36 -070039#define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
41
Thomas Petazzonic98c1782012-11-15 14:17:18 +010042#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010043 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010044
Saeed Bisharaff7b0472008-07-08 11:58:36 -070045static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{
47 struct mv_xor_desc *hw_desc = desc->hw_desc;
48
49 hw_desc->status = (1 << 31);
50 hw_desc->phy_next_desc = 0;
51 hw_desc->desc_command = (1 << 31);
52}
53
54static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
55{
56 struct mv_xor_desc *hw_desc = desc->hw_desc;
57 return hw_desc->phy_dest_addr;
58}
59
60static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
61 int src_idx)
62{
63 struct mv_xor_desc *hw_desc = desc->hw_desc;
64 return hw_desc->phy_src_addr[src_idx];
65}
66
67
68static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
69 u32 byte_count)
70{
71 struct mv_xor_desc *hw_desc = desc->hw_desc;
72 hw_desc->byte_count = byte_count;
73}
74
75static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
76 u32 next_desc_addr)
77{
78 struct mv_xor_desc *hw_desc = desc->hw_desc;
79 BUG_ON(hw_desc->phy_next_desc);
80 hw_desc->phy_next_desc = next_desc_addr;
81}
82
83static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
84{
85 struct mv_xor_desc *hw_desc = desc->hw_desc;
86 hw_desc->phy_next_desc = 0;
87}
88
89static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
90{
91 desc->value = val;
92}
93
94static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
95 dma_addr_t addr)
96{
97 struct mv_xor_desc *hw_desc = desc->hw_desc;
98 hw_desc->phy_dest_addr = addr;
99}
100
101static int mv_chan_memset_slot_count(size_t len)
102{
103 return 1;
104}
105
106#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
107
108static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
109 int index, dma_addr_t addr)
110{
111 struct mv_xor_desc *hw_desc = desc->hw_desc;
112 hw_desc->phy_src_addr[index] = addr;
113 if (desc->type == DMA_XOR)
114 hw_desc->desc_command |= (1 << index);
115}
116
117static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
118{
119 return __raw_readl(XOR_CURR_DESC(chan));
120}
121
122static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
123 u32 next_desc_addr)
124{
125 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
126}
127
128static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
129{
130 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
131}
132
133static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
134{
135 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
136}
137
138static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
139{
140 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
141 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
142}
143
144static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
145{
146 u32 val = __raw_readl(XOR_INTR_MASK(chan));
147 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
148 __raw_writel(val, XOR_INTR_MASK(chan));
149}
150
151static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
152{
153 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
154 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
155 return intr_cause;
156}
157
158static int mv_is_err_intr(u32 intr_cause)
159{
160 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
161 return 1;
162
163 return 0;
164}
165
166static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167{
Simon Guinot86363682010-09-17 23:33:51 +0200168 u32 val = ~(1 << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100169 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700170 __raw_writel(val, XOR_INTR_CAUSE(chan));
171}
172
173static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
174{
175 u32 val = 0xFFFF0000 >> (chan->idx * 16);
176 __raw_writel(val, XOR_INTR_CAUSE(chan));
177}
178
179static int mv_can_chain(struct mv_xor_desc_slot *desc)
180{
181 struct mv_xor_desc_slot *chain_old_tail = list_entry(
182 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
183
184 if (chain_old_tail->type != desc->type)
185 return 0;
186 if (desc->type == DMA_MEMSET)
187 return 0;
188
189 return 1;
190}
191
192static void mv_set_mode(struct mv_xor_chan *chan,
193 enum dma_transaction_type type)
194{
195 u32 op_mode;
196 u32 config = __raw_readl(XOR_CONFIG(chan));
197
198 switch (type) {
199 case DMA_XOR:
200 op_mode = XOR_OPERATION_MODE_XOR;
201 break;
202 case DMA_MEMCPY:
203 op_mode = XOR_OPERATION_MODE_MEMCPY;
204 break;
205 case DMA_MEMSET:
206 op_mode = XOR_OPERATION_MODE_MEMSET;
207 break;
208 default:
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100209 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100210 "error: unsupported operation %d.\n",
211 type);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700212 BUG();
213 return;
214 }
215
216 config &= ~0x7;
217 config |= op_mode;
218 __raw_writel(config, XOR_CONFIG(chan));
219 chan->current_type = type;
220}
221
222static void mv_chan_activate(struct mv_xor_chan *chan)
223{
224 u32 activation;
225
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100226 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700227 activation = __raw_readl(XOR_ACTIVATION(chan));
228 activation |= 0x1;
229 __raw_writel(activation, XOR_ACTIVATION(chan));
230}
231
232static char mv_chan_is_busy(struct mv_xor_chan *chan)
233{
234 u32 state = __raw_readl(XOR_ACTIVATION(chan));
235
236 state = (state >> 4) & 0x3;
237
238 return (state == 1) ? 1 : 0;
239}
240
241static int mv_chan_xor_slot_count(size_t len, int src_cnt)
242{
243 return 1;
244}
245
246/**
247 * mv_xor_free_slots - flags descriptor slots for reuse
248 * @slot: Slot to free
249 * Caller must hold &mv_chan->lock while calling this function
250 */
251static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 struct mv_xor_desc_slot *slot)
253{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100254 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700255 __func__, __LINE__, slot);
256
257 slot->slots_per_op = 0;
258
259}
260
261/*
262 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
263 * sw_desc
264 * Caller must hold &mv_chan->lock while calling this function
265 */
266static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 struct mv_xor_desc_slot *sw_desc)
268{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100269 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700270 __func__, __LINE__, sw_desc);
271 if (sw_desc->type != mv_chan->current_type)
272 mv_set_mode(mv_chan, sw_desc->type);
273
274 if (sw_desc->type == DMA_MEMSET) {
275 /* for memset requests we need to program the engine, no
276 * descriptors used.
277 */
278 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
279 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
280 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
281 mv_chan_set_value(mv_chan, sw_desc->value);
282 } else {
283 /* set the hardware chain */
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 }
286 mv_chan->pending += sw_desc->slot_cnt;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100287 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700288}
289
290static dma_cookie_t
291mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
292 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
293{
294 BUG_ON(desc->async_tx.cookie < 0);
295
296 if (desc->async_tx.cookie > 0) {
297 cookie = desc->async_tx.cookie;
298
299 /* call the callback (must not sleep or submit new
300 * operations to this channel)
301 */
302 if (desc->async_tx.callback)
303 desc->async_tx.callback(
304 desc->async_tx.callback_param);
305
306 /* unmap dma addresses
307 * (unmap_single vs unmap_page?)
308 */
309 if (desc->group_head && desc->unmap_len) {
310 struct mv_xor_desc_slot *unmap = desc->group_head;
Thomas Petazzoniecde6cd2012-11-15 14:37:36 +0100311 struct device *dev = mv_chan_to_devp(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700312 u32 len = unmap->unmap_len;
Dan Williamse1d181e2008-07-04 00:13:40 -0700313 enum dma_ctrl_flags flags = desc->async_tx.flags;
314 u32 src_cnt;
315 dma_addr_t addr;
Dan Williamsa06d5682008-12-08 13:46:00 -0700316 dma_addr_t dest;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700317
Dan Williamsa06d5682008-12-08 13:46:00 -0700318 src_cnt = unmap->unmap_src_cnt;
319 dest = mv_desc_get_dest_addr(unmap);
Dan Williamse1d181e2008-07-04 00:13:40 -0700320 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
Dan Williamsa06d5682008-12-08 13:46:00 -0700321 enum dma_data_direction dir;
322
323 if (src_cnt > 1) /* is xor ? */
324 dir = DMA_BIDIRECTIONAL;
325 else
326 dir = DMA_FROM_DEVICE;
327 dma_unmap_page(dev, dest, len, dir);
Dan Williamse1d181e2008-07-04 00:13:40 -0700328 }
329
330 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
Dan Williamse1d181e2008-07-04 00:13:40 -0700331 while (src_cnt--) {
332 addr = mv_desc_get_src_addr(unmap,
333 src_cnt);
Dan Williamsa06d5682008-12-08 13:46:00 -0700334 if (addr == dest)
335 continue;
Dan Williamse1d181e2008-07-04 00:13:40 -0700336 dma_unmap_page(dev, addr, len,
337 DMA_TO_DEVICE);
338 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700339 }
340 desc->group_head = NULL;
341 }
342 }
343
344 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700345 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700346
347 return cookie;
348}
349
350static int
351mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
352{
353 struct mv_xor_desc_slot *iter, *_iter;
354
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100355 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700356 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
357 completed_node) {
358
359 if (async_tx_test_ack(&iter->async_tx)) {
360 list_del(&iter->completed_node);
361 mv_xor_free_slots(mv_chan, iter);
362 }
363 }
364 return 0;
365}
366
367static int
368mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
369 struct mv_xor_chan *mv_chan)
370{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100371 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700372 __func__, __LINE__, desc, desc->async_tx.flags);
373 list_del(&desc->chain_node);
374 /* the client is allowed to attach dependent operations
375 * until 'ack' is set
376 */
377 if (!async_tx_test_ack(&desc->async_tx)) {
378 /* move this slot to the completed_slots */
379 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
380 return 0;
381 }
382
383 mv_xor_free_slots(mv_chan, desc);
384 return 0;
385}
386
387static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
388{
389 struct mv_xor_desc_slot *iter, *_iter;
390 dma_cookie_t cookie = 0;
391 int busy = mv_chan_is_busy(mv_chan);
392 u32 current_desc = mv_chan_get_current_desc(mv_chan);
393 int seen_current = 0;
394
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100395 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
396 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700397 mv_xor_clean_completed_slots(mv_chan);
398
399 /* free completed slots from the chain starting with
400 * the oldest descriptor
401 */
402
403 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
404 chain_node) {
405 prefetch(_iter);
406 prefetch(&_iter->async_tx);
407
408 /* do not advance past the current descriptor loaded into the
409 * hardware channel, subsequent descriptors are either in
410 * process or have not been submitted
411 */
412 if (seen_current)
413 break;
414
415 /* stop the search if we reach the current descriptor and the
416 * channel is busy
417 */
418 if (iter->async_tx.phys == current_desc) {
419 seen_current = 1;
420 if (busy)
421 break;
422 }
423
424 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
425
426 if (mv_xor_clean_slot(iter, mv_chan))
427 break;
428 }
429
430 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
431 struct mv_xor_desc_slot *chain_head;
432 chain_head = list_entry(mv_chan->chain.next,
433 struct mv_xor_desc_slot,
434 chain_node);
435
436 mv_xor_start_new_chain(mv_chan, chain_head);
437 }
438
439 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100440 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700441}
442
443static void
444mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
445{
446 spin_lock_bh(&mv_chan->lock);
447 __mv_xor_slot_cleanup(mv_chan);
448 spin_unlock_bh(&mv_chan->lock);
449}
450
451static void mv_xor_tasklet(unsigned long data)
452{
453 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Saeed Bishara8333f652010-12-21 16:53:39 +0200454 mv_xor_slot_cleanup(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700455}
456
457static struct mv_xor_desc_slot *
458mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
459 int slots_per_op)
460{
461 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
462 LIST_HEAD(chain);
463 int slots_found, retry = 0;
464
465 /* start search from the last allocated descrtiptor
466 * if a contiguous allocation can not be found start searching
467 * from the beginning of the list
468 */
469retry:
470 slots_found = 0;
471 if (retry == 0)
472 iter = mv_chan->last_used;
473 else
474 iter = list_entry(&mv_chan->all_slots,
475 struct mv_xor_desc_slot,
476 slot_node);
477
478 list_for_each_entry_safe_continue(
479 iter, _iter, &mv_chan->all_slots, slot_node) {
480 prefetch(_iter);
481 prefetch(&_iter->async_tx);
482 if (iter->slots_per_op) {
483 /* give up after finding the first busy slot
484 * on the second pass through the list
485 */
486 if (retry)
487 break;
488
489 slots_found = 0;
490 continue;
491 }
492
493 /* start the allocation if the slot is correctly aligned */
494 if (!slots_found++)
495 alloc_start = iter;
496
497 if (slots_found == num_slots) {
498 struct mv_xor_desc_slot *alloc_tail = NULL;
499 struct mv_xor_desc_slot *last_used = NULL;
500 iter = alloc_start;
501 while (num_slots) {
502 int i;
503
504 /* pre-ack all but the last descriptor */
505 async_tx_ack(&iter->async_tx);
506
507 list_add_tail(&iter->chain_node, &chain);
508 alloc_tail = iter;
509 iter->async_tx.cookie = 0;
510 iter->slot_cnt = num_slots;
511 iter->xor_check_result = NULL;
512 for (i = 0; i < slots_per_op; i++) {
513 iter->slots_per_op = slots_per_op - i;
514 last_used = iter;
515 iter = list_entry(iter->slot_node.next,
516 struct mv_xor_desc_slot,
517 slot_node);
518 }
519 num_slots -= slots_per_op;
520 }
521 alloc_tail->group_head = alloc_start;
522 alloc_tail->async_tx.cookie = -EBUSY;
Dan Williams64203b62009-09-08 17:53:03 -0700523 list_splice(&chain, &alloc_tail->tx_list);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700524 mv_chan->last_used = last_used;
525 mv_desc_clear_next_desc(alloc_start);
526 mv_desc_clear_next_desc(alloc_tail);
527 return alloc_tail;
528 }
529 }
530 if (!retry++)
531 goto retry;
532
533 /* try to free some slots if the allocation fails */
534 tasklet_schedule(&mv_chan->irq_tasklet);
535
536 return NULL;
537}
538
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700539/************************ DMA engine API functions ****************************/
540static dma_cookie_t
541mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
542{
543 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
544 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
545 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
546 dma_cookie_t cookie;
547 int new_hw_chain = 1;
548
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100549 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700550 "%s sw_desc %p: async_tx %p\n",
551 __func__, sw_desc, &sw_desc->async_tx);
552
553 grp_start = sw_desc->group_head;
554
555 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000556 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700557
558 if (list_empty(&mv_chan->chain))
Dan Williams64203b62009-09-08 17:53:03 -0700559 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700560 else {
561 new_hw_chain = 0;
562
563 old_chain_tail = list_entry(mv_chan->chain.prev,
564 struct mv_xor_desc_slot,
565 chain_node);
Dan Williams64203b62009-09-08 17:53:03 -0700566 list_splice_init(&grp_start->tx_list,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700567 &old_chain_tail->chain_node);
568
569 if (!mv_can_chain(grp_start))
570 goto submit_done;
571
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100572 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700573 old_chain_tail->async_tx.phys);
574
575 /* fix up the hardware chain */
576 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
577
578 /* if the channel is not busy */
579 if (!mv_chan_is_busy(mv_chan)) {
580 u32 current_desc = mv_chan_get_current_desc(mv_chan);
581 /*
582 * and the curren desc is the end of the chain before
583 * the append, then we need to start the channel
584 */
585 if (current_desc == old_chain_tail->async_tx.phys)
586 new_hw_chain = 1;
587 }
588 }
589
590 if (new_hw_chain)
591 mv_xor_start_new_chain(mv_chan, grp_start);
592
593submit_done:
594 spin_unlock_bh(&mv_chan->lock);
595
596 return cookie;
597}
598
599/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700600static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700601{
602 char *hw_desc;
603 int idx;
604 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
605 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100606 int num_descs_in_pool = mv_chan->pool_size/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700607
608 /* Allocate descriptor slots */
609 idx = mv_chan->slots_allocated;
610 while (idx < num_descs_in_pool) {
611 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
612 if (!slot) {
613 printk(KERN_INFO "MV XOR Channel only initialized"
614 " %d descriptor slots", idx);
615 break;
616 }
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100617 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700618 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
619
620 dma_async_tx_descriptor_init(&slot->async_tx, chan);
621 slot->async_tx.tx_submit = mv_xor_tx_submit;
622 INIT_LIST_HEAD(&slot->chain_node);
623 INIT_LIST_HEAD(&slot->slot_node);
Dan Williams64203b62009-09-08 17:53:03 -0700624 INIT_LIST_HEAD(&slot->tx_list);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +0100625 hw_desc = (char *) mv_chan->dma_desc_pool;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700626 slot->async_tx.phys =
627 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
628 slot->idx = idx++;
629
630 spin_lock_bh(&mv_chan->lock);
631 mv_chan->slots_allocated = idx;
632 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
633 spin_unlock_bh(&mv_chan->lock);
634 }
635
636 if (mv_chan->slots_allocated && !mv_chan->last_used)
637 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
638 struct mv_xor_desc_slot,
639 slot_node);
640
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100641 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700642 "allocated %d descriptor slots last_used: %p\n",
643 mv_chan->slots_allocated, mv_chan->last_used);
644
645 return mv_chan->slots_allocated ? : -ENOMEM;
646}
647
648static struct dma_async_tx_descriptor *
649mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
650 size_t len, unsigned long flags)
651{
652 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
653 struct mv_xor_desc_slot *sw_desc, *grp_start;
654 int slot_cnt;
655
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100656 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700657 "%s dest: %x src %x len: %u flags: %ld\n",
658 __func__, dest, src, len, flags);
659 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
660 return NULL;
661
Coly Li7912d302011-03-27 01:26:53 +0800662 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700663
664 spin_lock_bh(&mv_chan->lock);
665 slot_cnt = mv_chan_memcpy_slot_count(len);
666 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
667 if (sw_desc) {
668 sw_desc->type = DMA_MEMCPY;
669 sw_desc->async_tx.flags = flags;
670 grp_start = sw_desc->group_head;
671 mv_desc_init(grp_start, flags);
672 mv_desc_set_byte_count(grp_start, len);
673 mv_desc_set_dest_addr(sw_desc->group_head, dest);
674 mv_desc_set_src_addr(grp_start, 0, src);
675 sw_desc->unmap_src_cnt = 1;
676 sw_desc->unmap_len = len;
677 }
678 spin_unlock_bh(&mv_chan->lock);
679
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100680 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700681 "%s sw_desc %p async_tx %p\n",
682 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
683
684 return sw_desc ? &sw_desc->async_tx : NULL;
685}
686
687static struct dma_async_tx_descriptor *
688mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
689 size_t len, unsigned long flags)
690{
691 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
692 struct mv_xor_desc_slot *sw_desc, *grp_start;
693 int slot_cnt;
694
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100695 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700696 "%s dest: %x len: %u flags: %ld\n",
697 __func__, dest, len, flags);
698 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
699 return NULL;
700
Coly Li7912d302011-03-27 01:26:53 +0800701 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700702
703 spin_lock_bh(&mv_chan->lock);
704 slot_cnt = mv_chan_memset_slot_count(len);
705 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
706 if (sw_desc) {
707 sw_desc->type = DMA_MEMSET;
708 sw_desc->async_tx.flags = flags;
709 grp_start = sw_desc->group_head;
710 mv_desc_init(grp_start, flags);
711 mv_desc_set_byte_count(grp_start, len);
712 mv_desc_set_dest_addr(sw_desc->group_head, dest);
713 mv_desc_set_block_fill_val(grp_start, value);
714 sw_desc->unmap_src_cnt = 1;
715 sw_desc->unmap_len = len;
716 }
717 spin_unlock_bh(&mv_chan->lock);
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100718 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700719 "%s sw_desc %p async_tx %p \n",
720 __func__, sw_desc, &sw_desc->async_tx);
721 return sw_desc ? &sw_desc->async_tx : NULL;
722}
723
724static struct dma_async_tx_descriptor *
725mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
726 unsigned int src_cnt, size_t len, unsigned long flags)
727{
728 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
729 struct mv_xor_desc_slot *sw_desc, *grp_start;
730 int slot_cnt;
731
732 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
733 return NULL;
734
Coly Li7912d302011-03-27 01:26:53 +0800735 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700736
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100737 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700738 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
739 __func__, src_cnt, len, dest, flags);
740
741 spin_lock_bh(&mv_chan->lock);
742 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
743 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
744 if (sw_desc) {
745 sw_desc->type = DMA_XOR;
746 sw_desc->async_tx.flags = flags;
747 grp_start = sw_desc->group_head;
748 mv_desc_init(grp_start, flags);
749 /* the byte count field is the same as in memcpy desc*/
750 mv_desc_set_byte_count(grp_start, len);
751 mv_desc_set_dest_addr(sw_desc->group_head, dest);
752 sw_desc->unmap_src_cnt = src_cnt;
753 sw_desc->unmap_len = len;
754 while (src_cnt--)
755 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
756 }
757 spin_unlock_bh(&mv_chan->lock);
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100758 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700759 "%s sw_desc %p async_tx %p \n",
760 __func__, sw_desc, &sw_desc->async_tx);
761 return sw_desc ? &sw_desc->async_tx : NULL;
762}
763
764static void mv_xor_free_chan_resources(struct dma_chan *chan)
765{
766 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
767 struct mv_xor_desc_slot *iter, *_iter;
768 int in_use_descs = 0;
769
770 mv_xor_slot_cleanup(mv_chan);
771
772 spin_lock_bh(&mv_chan->lock);
773 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
774 chain_node) {
775 in_use_descs++;
776 list_del(&iter->chain_node);
777 }
778 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
779 completed_node) {
780 in_use_descs++;
781 list_del(&iter->completed_node);
782 }
783 list_for_each_entry_safe_reverse(
784 iter, _iter, &mv_chan->all_slots, slot_node) {
785 list_del(&iter->slot_node);
786 kfree(iter);
787 mv_chan->slots_allocated--;
788 }
789 mv_chan->last_used = NULL;
790
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100791 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700792 __func__, mv_chan->slots_allocated);
793 spin_unlock_bh(&mv_chan->lock);
794
795 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100796 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700797 "freeing %d in use descriptors!\n", in_use_descs);
798}
799
800/**
Linus Walleij07934482010-03-26 16:50:49 -0700801 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700802 * @chan: XOR channel handle
803 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700804 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700805 */
Linus Walleij07934482010-03-26 16:50:49 -0700806static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700807 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700808 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700809{
810 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700811 enum dma_status ret;
812
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000813 ret = dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700814 if (ret == DMA_SUCCESS) {
815 mv_xor_clean_completed_slots(mv_chan);
816 return ret;
817 }
818 mv_xor_slot_cleanup(mv_chan);
819
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000820 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700821}
822
823static void mv_dump_xor_regs(struct mv_xor_chan *chan)
824{
825 u32 val;
826
827 val = __raw_readl(XOR_CONFIG(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100828 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100829 "config 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700830
831 val = __raw_readl(XOR_ACTIVATION(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100832 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100833 "activation 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700834
835 val = __raw_readl(XOR_INTR_CAUSE(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100836 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100837 "intr cause 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700838
839 val = __raw_readl(XOR_INTR_MASK(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100840 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100841 "intr mask 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700842
843 val = __raw_readl(XOR_ERROR_CAUSE(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100844 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100845 "error cause 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700846
847 val = __raw_readl(XOR_ERROR_ADDR(chan));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100848 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100849 "error addr 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700850}
851
852static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
853 u32 intr_cause)
854{
855 if (intr_cause & (1 << 4)) {
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100856 dev_dbg(mv_chan_to_devp(chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700857 "ignore this error\n");
858 return;
859 }
860
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100861 dev_err(mv_chan_to_devp(chan),
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100862 "error on chan %d. intr cause 0x%08x.\n",
863 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700864
865 mv_dump_xor_regs(chan);
866 BUG();
867}
868
869static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
870{
871 struct mv_xor_chan *chan = data;
872 u32 intr_cause = mv_chan_get_intr_cause(chan);
873
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100874 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700875
876 if (mv_is_err_intr(intr_cause))
877 mv_xor_err_interrupt_handler(chan, intr_cause);
878
879 tasklet_schedule(&chan->irq_tasklet);
880
881 mv_xor_device_clear_eoc_cause(chan);
882
883 return IRQ_HANDLED;
884}
885
886static void mv_xor_issue_pending(struct dma_chan *chan)
887{
888 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
889
890 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
891 mv_chan->pending = 0;
892 mv_chan_activate(mv_chan);
893 }
894}
895
896/*
897 * Perform a transaction to verify the HW works.
898 */
899#define MV_XOR_TEST_SIZE 2000
900
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100901static int __devinit mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700902{
903 int i;
904 void *src, *dest;
905 dma_addr_t src_dma, dest_dma;
906 struct dma_chan *dma_chan;
907 dma_cookie_t cookie;
908 struct dma_async_tx_descriptor *tx;
909 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700910
911 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
912 if (!src)
913 return -ENOMEM;
914
915 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
916 if (!dest) {
917 kfree(src);
918 return -ENOMEM;
919 }
920
921 /* Fill in src buffer */
922 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
923 ((u8 *) src)[i] = (u8)i;
924
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100925 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700926 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700927 err = -ENODEV;
928 goto out;
929 }
930
931 dest_dma = dma_map_single(dma_chan->device->dev, dest,
932 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
933
934 src_dma = dma_map_single(dma_chan->device->dev, src,
935 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
936
937 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
938 MV_XOR_TEST_SIZE, 0);
939 cookie = mv_xor_tx_submit(tx);
940 mv_xor_issue_pending(dma_chan);
941 async_tx_ack(tx);
942 msleep(1);
943
Linus Walleij07934482010-03-26 16:50:49 -0700944 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700945 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100946 dev_err(dma_chan->device->dev,
947 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700948 err = -ENODEV;
949 goto free_resources;
950 }
951
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100952 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700953 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
954 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100955 dev_err(dma_chan->device->dev,
956 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700957 err = -ENODEV;
958 goto free_resources;
959 }
960
961free_resources:
962 mv_xor_free_chan_resources(dma_chan);
963out:
964 kfree(src);
965 kfree(dest);
966 return err;
967}
968
969#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
970static int __devinit
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100971mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700972{
973 int i, src_idx;
974 struct page *dest;
975 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
976 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
977 dma_addr_t dest_dma;
978 struct dma_async_tx_descriptor *tx;
979 struct dma_chan *dma_chan;
980 dma_cookie_t cookie;
981 u8 cmp_byte = 0;
982 u32 cmp_word;
983 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700984
985 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
986 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100987 if (!xor_srcs[src_idx]) {
988 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700989 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100990 return -ENOMEM;
991 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700992 }
993
994 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100995 if (!dest) {
996 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700997 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100998 return -ENOMEM;
999 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001000
1001 /* Fill in src buffers */
1002 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1003 u8 *ptr = page_address(xor_srcs[src_idx]);
1004 for (i = 0; i < PAGE_SIZE; i++)
1005 ptr[i] = (1 << src_idx);
1006 }
1007
1008 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1009 cmp_byte ^= (u8) (1 << src_idx);
1010
1011 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1012 (cmp_byte << 8) | cmp_byte;
1013
1014 memset(page_address(dest), 0, PAGE_SIZE);
1015
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001016 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -07001017 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001018 err = -ENODEV;
1019 goto out;
1020 }
1021
1022 /* test xor */
1023 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1024 DMA_FROM_DEVICE);
1025
1026 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1027 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1028 0, PAGE_SIZE, DMA_TO_DEVICE);
1029
1030 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1031 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1032
1033 cookie = mv_xor_tx_submit(tx);
1034 mv_xor_issue_pending(dma_chan);
1035 async_tx_ack(tx);
1036 msleep(8);
1037
Linus Walleij07934482010-03-26 16:50:49 -07001038 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001039 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001040 dev_err(dma_chan->device->dev,
1041 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001042 err = -ENODEV;
1043 goto free_resources;
1044 }
1045
Thomas Petazzonic35064c2012-11-15 13:01:59 +01001046 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001047 PAGE_SIZE, DMA_FROM_DEVICE);
1048 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1049 u32 *ptr = page_address(dest);
1050 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001051 dev_err(dma_chan->device->dev,
1052 "Self-test xor failed compare, disabling."
1053 " index %d, data %x, expected %x\n", i,
1054 ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001055 err = -ENODEV;
1056 goto free_resources;
1057 }
1058 }
1059
1060free_resources:
1061 mv_xor_free_chan_resources(dma_chan);
1062out:
1063 src_idx = MV_XOR_NUM_SRC_TEST;
1064 while (src_idx--)
1065 __free_page(xor_srcs[src_idx]);
1066 __free_page(dest);
1067 return err;
1068}
1069
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001070static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001071{
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001072 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001073 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001074
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001075 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001076
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001077 dma_free_coherent(dev, mv_chan->pool_size,
1078 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001079
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001080 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001081 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001082 list_del(&chan->device_node);
1083 }
1084
1085 return 0;
1086}
1087
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001088static struct mv_xor_chan *
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001089mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001090 struct platform_device *pdev,
1091 int hw_id, dma_cap_mask_t cap_mask,
1092 size_t pool_size, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001093{
1094 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001095 struct mv_xor_chan *mv_chan;
1096 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001097
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001098 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1099 if (!mv_chan) {
1100 ret = -ENOMEM;
1101 goto err_free_dma;
1102 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001103
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001104 mv_chan->idx = hw_id;
1105
1106 dma_dev = &mv_chan->dmadev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001107
1108 /* allocate coherent memory for hardware descriptors
1109 * note: writecombine gives slightly better performance, but
1110 * requires that we explicitly flush the writes
1111 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001112 mv_chan->pool_size = pool_size;
1113 mv_chan->dma_desc_pool_virt =
1114 dma_alloc_writecombine(&pdev->dev, mv_chan->pool_size,
1115 &mv_chan->dma_desc_pool, GFP_KERNEL);
1116 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001117 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001118
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001119 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001120 dma_dev->cap_mask = cap_mask;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001121 mv_chan->shared = xordev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001122
1123 INIT_LIST_HEAD(&dma_dev->channels);
1124
1125 /* set base routines */
1126 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1127 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001128 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001129 dma_dev->device_issue_pending = mv_xor_issue_pending;
1130 dma_dev->dev = &pdev->dev;
1131
1132 /* set prep routines based on capability */
1133 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1134 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1135 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1136 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1137 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001138 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001139 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1140 }
1141
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001142 mv_chan->mmr_base = xordev->xor_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001143 if (!mv_chan->mmr_base) {
1144 ret = -ENOMEM;
1145 goto err_free_dma;
1146 }
1147 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1148 mv_chan);
1149
1150 /* clear errors before enabling interrupts */
1151 mv_xor_device_clear_err_status(mv_chan);
1152
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001153 ret = devm_request_irq(&pdev->dev, irq,
1154 mv_xor_interrupt_handler,
1155 0, dev_name(&pdev->dev), mv_chan);
1156 if (ret)
1157 goto err_free_dma;
1158
1159 mv_chan_unmask_interrupts(mv_chan);
1160
1161 mv_set_mode(mv_chan, DMA_MEMCPY);
1162
1163 spin_lock_init(&mv_chan->lock);
1164 INIT_LIST_HEAD(&mv_chan->chain);
1165 INIT_LIST_HEAD(&mv_chan->completed_slots);
1166 INIT_LIST_HEAD(&mv_chan->all_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001167 mv_chan->dmachan.device = dma_dev;
1168 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001169
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001170 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001171
1172 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001173 ret = mv_xor_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001174 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1175 if (ret)
1176 goto err_free_dma;
1177 }
1178
1179 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +01001180 ret = mv_xor_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001181 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1182 if (ret)
1183 goto err_free_dma;
1184 }
1185
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001186 dev_info(&pdev->dev, "Marvell XOR: "
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001187 "( %s%s%s%s)\n",
1188 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1189 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1190 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1191 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1192
1193 dma_async_device_register(dma_dev);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001194 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001195
1196 err_free_dma:
Thomas Petazzoniecde6cd2012-11-15 14:37:36 +01001197 dma_free_coherent(&pdev->dev, pool_size,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001198 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001199 return ERR_PTR(ret);
1200}
1201
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001202static void
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001203mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001204 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001205{
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001206 void __iomem *base = xordev->xor_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001207 u32 win_enable = 0;
1208 int i;
1209
1210 for (i = 0; i < 8; i++) {
1211 writel(0, base + WINDOW_BASE(i));
1212 writel(0, base + WINDOW_SIZE(i));
1213 if (i < 4)
1214 writel(0, base + WINDOW_REMAP_HIGH(i));
1215 }
1216
1217 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001218 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001219
1220 writel((cs->base & 0xffff0000) |
1221 (cs->mbus_attr << 8) |
1222 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1223 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1224
1225 win_enable |= (1 << i);
1226 win_enable |= 3 << (16 + (2 * i));
1227 }
1228
1229 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1230 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1231}
1232
Thomas Petazzoni61971652012-10-30 12:05:40 +01001233static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001234{
Andrew Lunn63a93322011-12-07 21:48:07 +01001235 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001236 struct mv_xor_device *xordev;
Thomas Petazzoni7dde4532012-10-30 11:58:14 +01001237 struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001238 struct resource *res;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001239 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001240
Thomas Petazzoni61971652012-10-30 12:05:40 +01001241 dev_notice(&pdev->dev, "Marvell XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001242
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001243 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1244 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001245 return -ENOMEM;
1246
1247 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1248 if (!res)
1249 return -ENODEV;
1250
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001251 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1252 resource_size(res));
1253 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001254 return -EBUSY;
1255
1256 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1257 if (!res)
1258 return -ENODEV;
1259
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001260 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1261 resource_size(res));
1262 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001263 return -EBUSY;
1264
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001265 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001266
1267 /*
1268 * (Re-)program MBUS remapping windows if we are asked to.
1269 */
Andrew Lunn63a93322011-12-07 21:48:07 +01001270 dram = mv_mbus_dram_info();
1271 if (dram)
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001272 mv_xor_conf_mbus_windows(xordev, dram);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001273
Andrew Lunnc5101822012-02-19 13:30:26 +01001274 /* Not all platforms can gate the clock, so it is not
1275 * an error if the clock does not exists.
1276 */
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001277 xordev->clk = clk_get(&pdev->dev, NULL);
1278 if (!IS_ERR(xordev->clk))
1279 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001280
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001281 if (pdata && pdata->channels) {
1282 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001283 struct mv_xor_channel_data *cd;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001284 int irq;
1285
1286 cd = &pdata->channels[i];
1287 if (!cd) {
1288 ret = -ENODEV;
1289 goto err_channel_add;
1290 }
1291
1292 irq = platform_get_irq(pdev, i);
1293 if (irq < 0) {
1294 ret = irq;
1295 goto err_channel_add;
1296 }
1297
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001298 xordev->channels[i] =
1299 mv_xor_channel_add(xordev, pdev, cd->hw_id,
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001300 cd->cap_mask,
1301 cd->pool_size, irq);
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001302 if (IS_ERR(xordev->channels[i])) {
1303 ret = PTR_ERR(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001304 goto err_channel_add;
1305 }
1306 }
1307 }
1308
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001309 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001310
1311err_channel_add:
1312 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001313 if (xordev->channels[i])
1314 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001315
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001316 clk_disable_unprepare(xordev->clk);
1317 clk_put(xordev->clk);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001318 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001319}
1320
Thomas Petazzoni61971652012-10-30 12:05:40 +01001321static int mv_xor_remove(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001322{
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001323 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001324 int i;
1325
1326 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001327 if (xordev->channels[i])
1328 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001329 }
Andrew Lunnc5101822012-02-19 13:30:26 +01001330
Thomas Petazzoni297eedb2012-11-15 15:29:53 +01001331 if (!IS_ERR(xordev->clk)) {
1332 clk_disable_unprepare(xordev->clk);
1333 clk_put(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001334 }
1335
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001336 return 0;
1337}
1338
Thomas Petazzoni61971652012-10-30 12:05:40 +01001339static struct platform_driver mv_xor_driver = {
1340 .probe = mv_xor_probe,
1341 .remove = mv_xor_remove,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001342 .driver = {
1343 .owner = THIS_MODULE,
Thomas Petazzoni0dddee72012-10-30 11:59:42 +01001344 .name = MV_XOR_NAME,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001345 },
1346};
1347
1348
1349static int __init mv_xor_init(void)
1350{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001351 return platform_driver_register(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001352}
1353module_init(mv_xor_init);
1354
1355/* it's currently unsafe to unload this module */
1356#if 0
1357static void __exit mv_xor_exit(void)
1358{
Thomas Petazzoni61971652012-10-30 12:05:40 +01001359 platform_driver_unregister(&mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001360 return;
1361}
1362
1363module_exit(mv_xor_exit);
1364#endif
1365
1366MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1367MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1368MODULE_LICENSE("GPL");