blob: 6ed3162cccc9f854009d5a9d5144ca133563aafe [file] [log] [blame]
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001/*
2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070022#include <linux/delay.h>
23#include <linux/dma-mapping.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010028#include <linux/clk.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020029#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000030
31#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070032#include "mv_xor.h"
33
34static void mv_xor_issue_pending(struct dma_chan *chan);
35
36#define to_mv_xor_chan(chan) \
37 container_of(chan, struct mv_xor_chan, common)
38
39#define to_mv_xor_device(dev) \
40 container_of(dev, struct mv_xor_device, common)
41
42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44
45static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
46{
47 struct mv_xor_desc *hw_desc = desc->hw_desc;
48
49 hw_desc->status = (1 << 31);
50 hw_desc->phy_next_desc = 0;
51 hw_desc->desc_command = (1 << 31);
52}
53
54static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
55{
56 struct mv_xor_desc *hw_desc = desc->hw_desc;
57 return hw_desc->phy_dest_addr;
58}
59
60static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
61 int src_idx)
62{
63 struct mv_xor_desc *hw_desc = desc->hw_desc;
64 return hw_desc->phy_src_addr[src_idx];
65}
66
67
68static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
69 u32 byte_count)
70{
71 struct mv_xor_desc *hw_desc = desc->hw_desc;
72 hw_desc->byte_count = byte_count;
73}
74
75static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
76 u32 next_desc_addr)
77{
78 struct mv_xor_desc *hw_desc = desc->hw_desc;
79 BUG_ON(hw_desc->phy_next_desc);
80 hw_desc->phy_next_desc = next_desc_addr;
81}
82
83static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
84{
85 struct mv_xor_desc *hw_desc = desc->hw_desc;
86 hw_desc->phy_next_desc = 0;
87}
88
89static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
90{
91 desc->value = val;
92}
93
94static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
95 dma_addr_t addr)
96{
97 struct mv_xor_desc *hw_desc = desc->hw_desc;
98 hw_desc->phy_dest_addr = addr;
99}
100
101static int mv_chan_memset_slot_count(size_t len)
102{
103 return 1;
104}
105
106#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
107
108static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
109 int index, dma_addr_t addr)
110{
111 struct mv_xor_desc *hw_desc = desc->hw_desc;
112 hw_desc->phy_src_addr[index] = addr;
113 if (desc->type == DMA_XOR)
114 hw_desc->desc_command |= (1 << index);
115}
116
117static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
118{
119 return __raw_readl(XOR_CURR_DESC(chan));
120}
121
122static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
123 u32 next_desc_addr)
124{
125 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
126}
127
128static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
129{
130 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
131}
132
133static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
134{
135 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
136}
137
138static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
139{
140 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
141 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
142}
143
144static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
145{
146 u32 val = __raw_readl(XOR_INTR_MASK(chan));
147 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
148 __raw_writel(val, XOR_INTR_MASK(chan));
149}
150
151static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
152{
153 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
154 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
155 return intr_cause;
156}
157
158static int mv_is_err_intr(u32 intr_cause)
159{
160 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
161 return 1;
162
163 return 0;
164}
165
166static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
167{
Simon Guinot86363682010-09-17 23:33:51 +0200168 u32 val = ~(1 << (chan->idx * 16));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700169 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
170 __raw_writel(val, XOR_INTR_CAUSE(chan));
171}
172
173static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
174{
175 u32 val = 0xFFFF0000 >> (chan->idx * 16);
176 __raw_writel(val, XOR_INTR_CAUSE(chan));
177}
178
179static int mv_can_chain(struct mv_xor_desc_slot *desc)
180{
181 struct mv_xor_desc_slot *chain_old_tail = list_entry(
182 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
183
184 if (chain_old_tail->type != desc->type)
185 return 0;
186 if (desc->type == DMA_MEMSET)
187 return 0;
188
189 return 1;
190}
191
192static void mv_set_mode(struct mv_xor_chan *chan,
193 enum dma_transaction_type type)
194{
195 u32 op_mode;
196 u32 config = __raw_readl(XOR_CONFIG(chan));
197
198 switch (type) {
199 case DMA_XOR:
200 op_mode = XOR_OPERATION_MODE_XOR;
201 break;
202 case DMA_MEMCPY:
203 op_mode = XOR_OPERATION_MODE_MEMCPY;
204 break;
205 case DMA_MEMSET:
206 op_mode = XOR_OPERATION_MODE_MEMSET;
207 break;
208 default:
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100209 dev_err(chan->device->common.dev,
210 "error: unsupported operation %d.\n",
211 type);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700212 BUG();
213 return;
214 }
215
216 config &= ~0x7;
217 config |= op_mode;
218 __raw_writel(config, XOR_CONFIG(chan));
219 chan->current_type = type;
220}
221
222static void mv_chan_activate(struct mv_xor_chan *chan)
223{
224 u32 activation;
225
226 dev_dbg(chan->device->common.dev, " activate chan.\n");
227 activation = __raw_readl(XOR_ACTIVATION(chan));
228 activation |= 0x1;
229 __raw_writel(activation, XOR_ACTIVATION(chan));
230}
231
232static char mv_chan_is_busy(struct mv_xor_chan *chan)
233{
234 u32 state = __raw_readl(XOR_ACTIVATION(chan));
235
236 state = (state >> 4) & 0x3;
237
238 return (state == 1) ? 1 : 0;
239}
240
241static int mv_chan_xor_slot_count(size_t len, int src_cnt)
242{
243 return 1;
244}
245
246/**
247 * mv_xor_free_slots - flags descriptor slots for reuse
248 * @slot: Slot to free
249 * Caller must hold &mv_chan->lock while calling this function
250 */
251static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
252 struct mv_xor_desc_slot *slot)
253{
254 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
255 __func__, __LINE__, slot);
256
257 slot->slots_per_op = 0;
258
259}
260
261/*
262 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
263 * sw_desc
264 * Caller must hold &mv_chan->lock while calling this function
265 */
266static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
267 struct mv_xor_desc_slot *sw_desc)
268{
269 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
270 __func__, __LINE__, sw_desc);
271 if (sw_desc->type != mv_chan->current_type)
272 mv_set_mode(mv_chan, sw_desc->type);
273
274 if (sw_desc->type == DMA_MEMSET) {
275 /* for memset requests we need to program the engine, no
276 * descriptors used.
277 */
278 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
279 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
280 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
281 mv_chan_set_value(mv_chan, sw_desc->value);
282 } else {
283 /* set the hardware chain */
284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
285 }
286 mv_chan->pending += sw_desc->slot_cnt;
287 mv_xor_issue_pending(&mv_chan->common);
288}
289
290static dma_cookie_t
291mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
292 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
293{
294 BUG_ON(desc->async_tx.cookie < 0);
295
296 if (desc->async_tx.cookie > 0) {
297 cookie = desc->async_tx.cookie;
298
299 /* call the callback (must not sleep or submit new
300 * operations to this channel)
301 */
302 if (desc->async_tx.callback)
303 desc->async_tx.callback(
304 desc->async_tx.callback_param);
305
306 /* unmap dma addresses
307 * (unmap_single vs unmap_page?)
308 */
309 if (desc->group_head && desc->unmap_len) {
310 struct mv_xor_desc_slot *unmap = desc->group_head;
311 struct device *dev =
312 &mv_chan->device->pdev->dev;
313 u32 len = unmap->unmap_len;
Dan Williamse1d181e2008-07-04 00:13:40 -0700314 enum dma_ctrl_flags flags = desc->async_tx.flags;
315 u32 src_cnt;
316 dma_addr_t addr;
Dan Williamsa06d5682008-12-08 13:46:00 -0700317 dma_addr_t dest;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700318
Dan Williamsa06d5682008-12-08 13:46:00 -0700319 src_cnt = unmap->unmap_src_cnt;
320 dest = mv_desc_get_dest_addr(unmap);
Dan Williamse1d181e2008-07-04 00:13:40 -0700321 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
Dan Williamsa06d5682008-12-08 13:46:00 -0700322 enum dma_data_direction dir;
323
324 if (src_cnt > 1) /* is xor ? */
325 dir = DMA_BIDIRECTIONAL;
326 else
327 dir = DMA_FROM_DEVICE;
328 dma_unmap_page(dev, dest, len, dir);
Dan Williamse1d181e2008-07-04 00:13:40 -0700329 }
330
331 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
Dan Williamse1d181e2008-07-04 00:13:40 -0700332 while (src_cnt--) {
333 addr = mv_desc_get_src_addr(unmap,
334 src_cnt);
Dan Williamsa06d5682008-12-08 13:46:00 -0700335 if (addr == dest)
336 continue;
Dan Williamse1d181e2008-07-04 00:13:40 -0700337 dma_unmap_page(dev, addr, len,
338 DMA_TO_DEVICE);
339 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700340 }
341 desc->group_head = NULL;
342 }
343 }
344
345 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700346 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700347
348 return cookie;
349}
350
351static int
352mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
353{
354 struct mv_xor_desc_slot *iter, *_iter;
355
356 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
357 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
358 completed_node) {
359
360 if (async_tx_test_ack(&iter->async_tx)) {
361 list_del(&iter->completed_node);
362 mv_xor_free_slots(mv_chan, iter);
363 }
364 }
365 return 0;
366}
367
368static int
369mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
370 struct mv_xor_chan *mv_chan)
371{
372 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
373 __func__, __LINE__, desc, desc->async_tx.flags);
374 list_del(&desc->chain_node);
375 /* the client is allowed to attach dependent operations
376 * until 'ack' is set
377 */
378 if (!async_tx_test_ack(&desc->async_tx)) {
379 /* move this slot to the completed_slots */
380 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
381 return 0;
382 }
383
384 mv_xor_free_slots(mv_chan, desc);
385 return 0;
386}
387
388static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
389{
390 struct mv_xor_desc_slot *iter, *_iter;
391 dma_cookie_t cookie = 0;
392 int busy = mv_chan_is_busy(mv_chan);
393 u32 current_desc = mv_chan_get_current_desc(mv_chan);
394 int seen_current = 0;
395
396 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
397 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
398 mv_xor_clean_completed_slots(mv_chan);
399
400 /* free completed slots from the chain starting with
401 * the oldest descriptor
402 */
403
404 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
405 chain_node) {
406 prefetch(_iter);
407 prefetch(&_iter->async_tx);
408
409 /* do not advance past the current descriptor loaded into the
410 * hardware channel, subsequent descriptors are either in
411 * process or have not been submitted
412 */
413 if (seen_current)
414 break;
415
416 /* stop the search if we reach the current descriptor and the
417 * channel is busy
418 */
419 if (iter->async_tx.phys == current_desc) {
420 seen_current = 1;
421 if (busy)
422 break;
423 }
424
425 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
426
427 if (mv_xor_clean_slot(iter, mv_chan))
428 break;
429 }
430
431 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
432 struct mv_xor_desc_slot *chain_head;
433 chain_head = list_entry(mv_chan->chain.next,
434 struct mv_xor_desc_slot,
435 chain_node);
436
437 mv_xor_start_new_chain(mv_chan, chain_head);
438 }
439
440 if (cookie > 0)
Russell King - ARM Linux4d4e58d2012-03-06 22:34:06 +0000441 mv_chan->common.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700442}
443
444static void
445mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
446{
447 spin_lock_bh(&mv_chan->lock);
448 __mv_xor_slot_cleanup(mv_chan);
449 spin_unlock_bh(&mv_chan->lock);
450}
451
452static void mv_xor_tasklet(unsigned long data)
453{
454 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Saeed Bishara8333f652010-12-21 16:53:39 +0200455 mv_xor_slot_cleanup(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700456}
457
458static struct mv_xor_desc_slot *
459mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
460 int slots_per_op)
461{
462 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
463 LIST_HEAD(chain);
464 int slots_found, retry = 0;
465
466 /* start search from the last allocated descrtiptor
467 * if a contiguous allocation can not be found start searching
468 * from the beginning of the list
469 */
470retry:
471 slots_found = 0;
472 if (retry == 0)
473 iter = mv_chan->last_used;
474 else
475 iter = list_entry(&mv_chan->all_slots,
476 struct mv_xor_desc_slot,
477 slot_node);
478
479 list_for_each_entry_safe_continue(
480 iter, _iter, &mv_chan->all_slots, slot_node) {
481 prefetch(_iter);
482 prefetch(&_iter->async_tx);
483 if (iter->slots_per_op) {
484 /* give up after finding the first busy slot
485 * on the second pass through the list
486 */
487 if (retry)
488 break;
489
490 slots_found = 0;
491 continue;
492 }
493
494 /* start the allocation if the slot is correctly aligned */
495 if (!slots_found++)
496 alloc_start = iter;
497
498 if (slots_found == num_slots) {
499 struct mv_xor_desc_slot *alloc_tail = NULL;
500 struct mv_xor_desc_slot *last_used = NULL;
501 iter = alloc_start;
502 while (num_slots) {
503 int i;
504
505 /* pre-ack all but the last descriptor */
506 async_tx_ack(&iter->async_tx);
507
508 list_add_tail(&iter->chain_node, &chain);
509 alloc_tail = iter;
510 iter->async_tx.cookie = 0;
511 iter->slot_cnt = num_slots;
512 iter->xor_check_result = NULL;
513 for (i = 0; i < slots_per_op; i++) {
514 iter->slots_per_op = slots_per_op - i;
515 last_used = iter;
516 iter = list_entry(iter->slot_node.next,
517 struct mv_xor_desc_slot,
518 slot_node);
519 }
520 num_slots -= slots_per_op;
521 }
522 alloc_tail->group_head = alloc_start;
523 alloc_tail->async_tx.cookie = -EBUSY;
Dan Williams64203b62009-09-08 17:53:03 -0700524 list_splice(&chain, &alloc_tail->tx_list);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700525 mv_chan->last_used = last_used;
526 mv_desc_clear_next_desc(alloc_start);
527 mv_desc_clear_next_desc(alloc_tail);
528 return alloc_tail;
529 }
530 }
531 if (!retry++)
532 goto retry;
533
534 /* try to free some slots if the allocation fails */
535 tasklet_schedule(&mv_chan->irq_tasklet);
536
537 return NULL;
538}
539
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700540/************************ DMA engine API functions ****************************/
541static dma_cookie_t
542mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
543{
544 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
545 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
546 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
547 dma_cookie_t cookie;
548 int new_hw_chain = 1;
549
550 dev_dbg(mv_chan->device->common.dev,
551 "%s sw_desc %p: async_tx %p\n",
552 __func__, sw_desc, &sw_desc->async_tx);
553
554 grp_start = sw_desc->group_head;
555
556 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000557 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700558
559 if (list_empty(&mv_chan->chain))
Dan Williams64203b62009-09-08 17:53:03 -0700560 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700561 else {
562 new_hw_chain = 0;
563
564 old_chain_tail = list_entry(mv_chan->chain.prev,
565 struct mv_xor_desc_slot,
566 chain_node);
Dan Williams64203b62009-09-08 17:53:03 -0700567 list_splice_init(&grp_start->tx_list,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700568 &old_chain_tail->chain_node);
569
570 if (!mv_can_chain(grp_start))
571 goto submit_done;
572
573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
574 old_chain_tail->async_tx.phys);
575
576 /* fix up the hardware chain */
577 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
578
579 /* if the channel is not busy */
580 if (!mv_chan_is_busy(mv_chan)) {
581 u32 current_desc = mv_chan_get_current_desc(mv_chan);
582 /*
583 * and the curren desc is the end of the chain before
584 * the append, then we need to start the channel
585 */
586 if (current_desc == old_chain_tail->async_tx.phys)
587 new_hw_chain = 1;
588 }
589 }
590
591 if (new_hw_chain)
592 mv_xor_start_new_chain(mv_chan, grp_start);
593
594submit_done:
595 spin_unlock_bh(&mv_chan->lock);
596
597 return cookie;
598}
599
600/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700601static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700602{
603 char *hw_desc;
604 int idx;
605 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
606 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzoni09f2b782012-10-29 16:27:34 +0100607 int num_descs_in_pool = mv_chan->device->pool_size/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700608
609 /* Allocate descriptor slots */
610 idx = mv_chan->slots_allocated;
611 while (idx < num_descs_in_pool) {
612 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
613 if (!slot) {
614 printk(KERN_INFO "MV XOR Channel only initialized"
615 " %d descriptor slots", idx);
616 break;
617 }
618 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
619 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
620
621 dma_async_tx_descriptor_init(&slot->async_tx, chan);
622 slot->async_tx.tx_submit = mv_xor_tx_submit;
623 INIT_LIST_HEAD(&slot->chain_node);
624 INIT_LIST_HEAD(&slot->slot_node);
Dan Williams64203b62009-09-08 17:53:03 -0700625 INIT_LIST_HEAD(&slot->tx_list);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700626 hw_desc = (char *) mv_chan->device->dma_desc_pool;
627 slot->async_tx.phys =
628 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
629 slot->idx = idx++;
630
631 spin_lock_bh(&mv_chan->lock);
632 mv_chan->slots_allocated = idx;
633 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
634 spin_unlock_bh(&mv_chan->lock);
635 }
636
637 if (mv_chan->slots_allocated && !mv_chan->last_used)
638 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
639 struct mv_xor_desc_slot,
640 slot_node);
641
642 dev_dbg(mv_chan->device->common.dev,
643 "allocated %d descriptor slots last_used: %p\n",
644 mv_chan->slots_allocated, mv_chan->last_used);
645
646 return mv_chan->slots_allocated ? : -ENOMEM;
647}
648
649static struct dma_async_tx_descriptor *
650mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
651 size_t len, unsigned long flags)
652{
653 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
654 struct mv_xor_desc_slot *sw_desc, *grp_start;
655 int slot_cnt;
656
657 dev_dbg(mv_chan->device->common.dev,
658 "%s dest: %x src %x len: %u flags: %ld\n",
659 __func__, dest, src, len, flags);
660 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
661 return NULL;
662
Coly Li7912d302011-03-27 01:26:53 +0800663 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700664
665 spin_lock_bh(&mv_chan->lock);
666 slot_cnt = mv_chan_memcpy_slot_count(len);
667 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
668 if (sw_desc) {
669 sw_desc->type = DMA_MEMCPY;
670 sw_desc->async_tx.flags = flags;
671 grp_start = sw_desc->group_head;
672 mv_desc_init(grp_start, flags);
673 mv_desc_set_byte_count(grp_start, len);
674 mv_desc_set_dest_addr(sw_desc->group_head, dest);
675 mv_desc_set_src_addr(grp_start, 0, src);
676 sw_desc->unmap_src_cnt = 1;
677 sw_desc->unmap_len = len;
678 }
679 spin_unlock_bh(&mv_chan->lock);
680
681 dev_dbg(mv_chan->device->common.dev,
682 "%s sw_desc %p async_tx %p\n",
683 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
684
685 return sw_desc ? &sw_desc->async_tx : NULL;
686}
687
688static struct dma_async_tx_descriptor *
689mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
690 size_t len, unsigned long flags)
691{
692 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
693 struct mv_xor_desc_slot *sw_desc, *grp_start;
694 int slot_cnt;
695
696 dev_dbg(mv_chan->device->common.dev,
697 "%s dest: %x len: %u flags: %ld\n",
698 __func__, dest, len, flags);
699 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
700 return NULL;
701
Coly Li7912d302011-03-27 01:26:53 +0800702 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700703
704 spin_lock_bh(&mv_chan->lock);
705 slot_cnt = mv_chan_memset_slot_count(len);
706 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
707 if (sw_desc) {
708 sw_desc->type = DMA_MEMSET;
709 sw_desc->async_tx.flags = flags;
710 grp_start = sw_desc->group_head;
711 mv_desc_init(grp_start, flags);
712 mv_desc_set_byte_count(grp_start, len);
713 mv_desc_set_dest_addr(sw_desc->group_head, dest);
714 mv_desc_set_block_fill_val(grp_start, value);
715 sw_desc->unmap_src_cnt = 1;
716 sw_desc->unmap_len = len;
717 }
718 spin_unlock_bh(&mv_chan->lock);
719 dev_dbg(mv_chan->device->common.dev,
720 "%s sw_desc %p async_tx %p \n",
721 __func__, sw_desc, &sw_desc->async_tx);
722 return sw_desc ? &sw_desc->async_tx : NULL;
723}
724
725static struct dma_async_tx_descriptor *
726mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
727 unsigned int src_cnt, size_t len, unsigned long flags)
728{
729 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
730 struct mv_xor_desc_slot *sw_desc, *grp_start;
731 int slot_cnt;
732
733 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
734 return NULL;
735
Coly Li7912d302011-03-27 01:26:53 +0800736 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700737
738 dev_dbg(mv_chan->device->common.dev,
739 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
740 __func__, src_cnt, len, dest, flags);
741
742 spin_lock_bh(&mv_chan->lock);
743 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
744 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
745 if (sw_desc) {
746 sw_desc->type = DMA_XOR;
747 sw_desc->async_tx.flags = flags;
748 grp_start = sw_desc->group_head;
749 mv_desc_init(grp_start, flags);
750 /* the byte count field is the same as in memcpy desc*/
751 mv_desc_set_byte_count(grp_start, len);
752 mv_desc_set_dest_addr(sw_desc->group_head, dest);
753 sw_desc->unmap_src_cnt = src_cnt;
754 sw_desc->unmap_len = len;
755 while (src_cnt--)
756 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
757 }
758 spin_unlock_bh(&mv_chan->lock);
759 dev_dbg(mv_chan->device->common.dev,
760 "%s sw_desc %p async_tx %p \n",
761 __func__, sw_desc, &sw_desc->async_tx);
762 return sw_desc ? &sw_desc->async_tx : NULL;
763}
764
765static void mv_xor_free_chan_resources(struct dma_chan *chan)
766{
767 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
768 struct mv_xor_desc_slot *iter, *_iter;
769 int in_use_descs = 0;
770
771 mv_xor_slot_cleanup(mv_chan);
772
773 spin_lock_bh(&mv_chan->lock);
774 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
775 chain_node) {
776 in_use_descs++;
777 list_del(&iter->chain_node);
778 }
779 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
780 completed_node) {
781 in_use_descs++;
782 list_del(&iter->completed_node);
783 }
784 list_for_each_entry_safe_reverse(
785 iter, _iter, &mv_chan->all_slots, slot_node) {
786 list_del(&iter->slot_node);
787 kfree(iter);
788 mv_chan->slots_allocated--;
789 }
790 mv_chan->last_used = NULL;
791
792 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
793 __func__, mv_chan->slots_allocated);
794 spin_unlock_bh(&mv_chan->lock);
795
796 if (in_use_descs)
797 dev_err(mv_chan->device->common.dev,
798 "freeing %d in use descriptors!\n", in_use_descs);
799}
800
801/**
Linus Walleij07934482010-03-26 16:50:49 -0700802 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700803 * @chan: XOR channel handle
804 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700805 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700806 */
Linus Walleij07934482010-03-26 16:50:49 -0700807static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700808 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700809 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700810{
811 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700812 enum dma_status ret;
813
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000814 ret = dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700815 if (ret == DMA_SUCCESS) {
816 mv_xor_clean_completed_slots(mv_chan);
817 return ret;
818 }
819 mv_xor_slot_cleanup(mv_chan);
820
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000821 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700822}
823
824static void mv_dump_xor_regs(struct mv_xor_chan *chan)
825{
826 u32 val;
827
828 val = __raw_readl(XOR_CONFIG(chan));
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100829 dev_err(chan->device->common.dev,
830 "config 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700831
832 val = __raw_readl(XOR_ACTIVATION(chan));
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100833 dev_err(chan->device->common.dev,
834 "activation 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700835
836 val = __raw_readl(XOR_INTR_CAUSE(chan));
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100837 dev_err(chan->device->common.dev,
838 "intr cause 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700839
840 val = __raw_readl(XOR_INTR_MASK(chan));
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100841 dev_err(chan->device->common.dev,
842 "intr mask 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700843
844 val = __raw_readl(XOR_ERROR_CAUSE(chan));
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100845 dev_err(chan->device->common.dev,
846 "error cause 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700847
848 val = __raw_readl(XOR_ERROR_ADDR(chan));
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100849 dev_err(chan->device->common.dev,
850 "error addr 0x%08x.\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700851}
852
853static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
854 u32 intr_cause)
855{
856 if (intr_cause & (1 << 4)) {
857 dev_dbg(chan->device->common.dev,
858 "ignore this error\n");
859 return;
860 }
861
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100862 dev_err(chan->device->common.dev,
863 "error on chan %d. intr cause 0x%08x.\n",
864 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700865
866 mv_dump_xor_regs(chan);
867 BUG();
868}
869
870static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
871{
872 struct mv_xor_chan *chan = data;
873 u32 intr_cause = mv_chan_get_intr_cause(chan);
874
875 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
876
877 if (mv_is_err_intr(intr_cause))
878 mv_xor_err_interrupt_handler(chan, intr_cause);
879
880 tasklet_schedule(&chan->irq_tasklet);
881
882 mv_xor_device_clear_eoc_cause(chan);
883
884 return IRQ_HANDLED;
885}
886
887static void mv_xor_issue_pending(struct dma_chan *chan)
888{
889 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
890
891 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
892 mv_chan->pending = 0;
893 mv_chan_activate(mv_chan);
894 }
895}
896
897/*
898 * Perform a transaction to verify the HW works.
899 */
900#define MV_XOR_TEST_SIZE 2000
901
902static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
903{
904 int i;
905 void *src, *dest;
906 dma_addr_t src_dma, dest_dma;
907 struct dma_chan *dma_chan;
908 dma_cookie_t cookie;
909 struct dma_async_tx_descriptor *tx;
910 int err = 0;
911 struct mv_xor_chan *mv_chan;
912
913 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
914 if (!src)
915 return -ENOMEM;
916
917 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
918 if (!dest) {
919 kfree(src);
920 return -ENOMEM;
921 }
922
923 /* Fill in src buffer */
924 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
925 ((u8 *) src)[i] = (u8)i;
926
927 /* Start copy, using first DMA channel */
928 dma_chan = container_of(device->common.channels.next,
929 struct dma_chan,
930 device_node);
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700931 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700932 err = -ENODEV;
933 goto out;
934 }
935
936 dest_dma = dma_map_single(dma_chan->device->dev, dest,
937 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
938
939 src_dma = dma_map_single(dma_chan->device->dev, src,
940 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
941
942 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
943 MV_XOR_TEST_SIZE, 0);
944 cookie = mv_xor_tx_submit(tx);
945 mv_xor_issue_pending(dma_chan);
946 async_tx_ack(tx);
947 msleep(1);
948
Linus Walleij07934482010-03-26 16:50:49 -0700949 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700950 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100951 dev_err(dma_chan->device->dev,
952 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700953 err = -ENODEV;
954 goto free_resources;
955 }
956
957 mv_chan = to_mv_xor_chan(dma_chan);
958 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
959 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
960 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100961 dev_err(dma_chan->device->dev,
962 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700963 err = -ENODEV;
964 goto free_resources;
965 }
966
967free_resources:
968 mv_xor_free_chan_resources(dma_chan);
969out:
970 kfree(src);
971 kfree(dest);
972 return err;
973}
974
975#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
976static int __devinit
977mv_xor_xor_self_test(struct mv_xor_device *device)
978{
979 int i, src_idx;
980 struct page *dest;
981 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
982 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
983 dma_addr_t dest_dma;
984 struct dma_async_tx_descriptor *tx;
985 struct dma_chan *dma_chan;
986 dma_cookie_t cookie;
987 u8 cmp_byte = 0;
988 u32 cmp_word;
989 int err = 0;
990 struct mv_xor_chan *mv_chan;
991
992 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
993 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100994 if (!xor_srcs[src_idx]) {
995 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700996 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100997 return -ENOMEM;
998 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700999 }
1000
1001 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +01001002 if (!dest) {
1003 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001004 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +01001005 return -ENOMEM;
1006 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001007
1008 /* Fill in src buffers */
1009 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1010 u8 *ptr = page_address(xor_srcs[src_idx]);
1011 for (i = 0; i < PAGE_SIZE; i++)
1012 ptr[i] = (1 << src_idx);
1013 }
1014
1015 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1016 cmp_byte ^= (u8) (1 << src_idx);
1017
1018 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1019 (cmp_byte << 8) | cmp_byte;
1020
1021 memset(page_address(dest), 0, PAGE_SIZE);
1022
1023 dma_chan = container_of(device->common.channels.next,
1024 struct dma_chan,
1025 device_node);
Dan Williamsaa1e6f12009-01-06 11:38:17 -07001026 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001027 err = -ENODEV;
1028 goto out;
1029 }
1030
1031 /* test xor */
1032 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1033 DMA_FROM_DEVICE);
1034
1035 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1036 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1037 0, PAGE_SIZE, DMA_TO_DEVICE);
1038
1039 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1040 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1041
1042 cookie = mv_xor_tx_submit(tx);
1043 mv_xor_issue_pending(dma_chan);
1044 async_tx_ack(tx);
1045 msleep(8);
1046
Linus Walleij07934482010-03-26 16:50:49 -07001047 if (mv_xor_status(dma_chan, cookie, NULL) !=
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001048 DMA_SUCCESS) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001049 dev_err(dma_chan->device->dev,
1050 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001051 err = -ENODEV;
1052 goto free_resources;
1053 }
1054
1055 mv_chan = to_mv_xor_chan(dma_chan);
1056 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1057 PAGE_SIZE, DMA_FROM_DEVICE);
1058 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1059 u32 *ptr = page_address(dest);
1060 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001061 dev_err(dma_chan->device->dev,
1062 "Self-test xor failed compare, disabling."
1063 " index %d, data %x, expected %x\n", i,
1064 ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001065 err = -ENODEV;
1066 goto free_resources;
1067 }
1068 }
1069
1070free_resources:
1071 mv_xor_free_chan_resources(dma_chan);
1072out:
1073 src_idx = MV_XOR_NUM_SRC_TEST;
1074 while (src_idx--)
1075 __free_page(xor_srcs[src_idx]);
1076 __free_page(dest);
1077 return err;
1078}
1079
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001080static int mv_xor_channel_remove(struct mv_xor_device *device)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001081{
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001082 struct dma_chan *chan, *_chan;
1083 struct mv_xor_chan *mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001084
1085 dma_async_device_unregister(&device->common);
1086
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001087 dma_free_coherent(&device->pdev->dev, device->pool_size,
1088 device->dma_desc_pool_virt, device->dma_desc_pool);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001089
1090 list_for_each_entry_safe(chan, _chan, &device->common.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001091 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001092 mv_chan = to_mv_xor_chan(chan);
1093 list_del(&chan->device_node);
1094 }
1095
1096 return 0;
1097}
1098
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001099static struct mv_xor_device *
1100mv_xor_channel_add(struct mv_xor_shared_private *msp,
1101 struct platform_device *pdev,
1102 int hw_id, dma_cap_mask_t cap_mask,
1103 size_t pool_size, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001104{
1105 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001106 struct mv_xor_device *adev;
1107 struct mv_xor_chan *mv_chan;
1108 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001109
1110 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1111 if (!adev)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001112 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001113
1114 dma_dev = &adev->common;
1115
1116 /* allocate coherent memory for hardware descriptors
1117 * note: writecombine gives slightly better performance, but
1118 * requires that we explicitly flush the writes
1119 */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001120 adev->pool_size = pool_size;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001121 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
Thomas Petazzoni09f2b782012-10-29 16:27:34 +01001122 adev->pool_size,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001123 &adev->dma_desc_pool,
1124 GFP_KERNEL);
1125 if (!adev->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001126 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001127
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001128 adev->id = hw_id;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001129
1130 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001131 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001132 adev->pdev = pdev;
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001133 adev->shared = msp;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001134
1135 INIT_LIST_HEAD(&dma_dev->channels);
1136
1137 /* set base routines */
1138 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1139 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001140 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001141 dma_dev->device_issue_pending = mv_xor_issue_pending;
1142 dma_dev->dev = &pdev->dev;
1143
1144 /* set prep routines based on capability */
1145 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1146 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1147 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1148 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1149 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001150 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001151 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1152 }
1153
1154 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1155 if (!mv_chan) {
1156 ret = -ENOMEM;
1157 goto err_free_dma;
1158 }
1159 mv_chan->device = adev;
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001160 mv_chan->idx = hw_id;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001161 mv_chan->mmr_base = adev->shared->xor_base;
1162
1163 if (!mv_chan->mmr_base) {
1164 ret = -ENOMEM;
1165 goto err_free_dma;
1166 }
1167 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1168 mv_chan);
1169
1170 /* clear errors before enabling interrupts */
1171 mv_xor_device_clear_err_status(mv_chan);
1172
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001173 ret = devm_request_irq(&pdev->dev, irq,
1174 mv_xor_interrupt_handler,
1175 0, dev_name(&pdev->dev), mv_chan);
1176 if (ret)
1177 goto err_free_dma;
1178
1179 mv_chan_unmask_interrupts(mv_chan);
1180
1181 mv_set_mode(mv_chan, DMA_MEMCPY);
1182
1183 spin_lock_init(&mv_chan->lock);
1184 INIT_LIST_HEAD(&mv_chan->chain);
1185 INIT_LIST_HEAD(&mv_chan->completed_slots);
1186 INIT_LIST_HEAD(&mv_chan->all_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001187 mv_chan->common.device = dma_dev;
Russell King - ARM Linux8ac69542012-03-06 22:36:27 +00001188 dma_cookie_init(&mv_chan->common);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001189
1190 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1191
1192 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1193 ret = mv_xor_memcpy_self_test(adev);
1194 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1195 if (ret)
1196 goto err_free_dma;
1197 }
1198
1199 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1200 ret = mv_xor_xor_self_test(adev);
1201 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1202 if (ret)
1203 goto err_free_dma;
1204 }
1205
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001206 dev_info(&pdev->dev, "Marvell XOR: "
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001207 "( %s%s%s%s)\n",
1208 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1209 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1210 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1211 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1212
1213 dma_async_device_register(dma_dev);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001214 return adev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001215
1216 err_free_dma:
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001217 dma_free_coherent(&adev->pdev->dev, pool_size,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001218 adev->dma_desc_pool_virt, adev->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001219 return ERR_PTR(ret);
1220}
1221
1222static int __devexit mv_xor_remove(struct platform_device *pdev)
1223{
1224 struct mv_xor_device *device = platform_get_drvdata(pdev);
1225 return mv_xor_channel_remove(device);
1226}
1227
1228static int __devinit mv_xor_probe(struct platform_device *pdev)
1229{
1230 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1231 struct mv_xor_shared_private *msp =
1232 platform_get_drvdata(plat_data->shared);
1233 struct mv_xor_device *mv_xor_device;
1234 int irq;
1235
1236 irq = platform_get_irq(pdev, 0);
1237 if (irq < 0)
1238 return irq;
1239
1240 mv_xor_device = mv_xor_channel_add(msp, pdev, plat_data->hw_id,
1241 plat_data->cap_mask,
1242 plat_data->pool_size, irq);
1243 if (IS_ERR(mv_xor_device))
1244 return PTR_ERR(mv_xor_device);
1245
1246 platform_set_drvdata(pdev, mv_xor_device);
1247
1248 return 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001249}
1250
1251static void
1252mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
Andrew Lunn63a93322011-12-07 21:48:07 +01001253 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001254{
1255 void __iomem *base = msp->xor_base;
1256 u32 win_enable = 0;
1257 int i;
1258
1259 for (i = 0; i < 8; i++) {
1260 writel(0, base + WINDOW_BASE(i));
1261 writel(0, base + WINDOW_SIZE(i));
1262 if (i < 4)
1263 writel(0, base + WINDOW_REMAP_HIGH(i));
1264 }
1265
1266 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001267 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001268
1269 writel((cs->base & 0xffff0000) |
1270 (cs->mbus_attr << 8) |
1271 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1272 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1273
1274 win_enable |= (1 << i);
1275 win_enable |= 3 << (16 + (2 * i));
1276 }
1277
1278 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1279 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1280}
1281
1282static struct platform_driver mv_xor_driver = {
1283 .probe = mv_xor_probe,
Russell Kingbdf602b2009-03-03 13:43:47 +00001284 .remove = __devexit_p(mv_xor_remove),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001285 .driver = {
1286 .owner = THIS_MODULE,
1287 .name = MV_XOR_NAME,
1288 },
1289};
1290
1291static int mv_xor_shared_probe(struct platform_device *pdev)
1292{
Andrew Lunn63a93322011-12-07 21:48:07 +01001293 const struct mbus_dram_target_info *dram;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001294 struct mv_xor_shared_private *msp;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001295 struct mv_xor_shared_platform_data *pdata = pdev->dev.platform_data;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001296 struct resource *res;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001297 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001298
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +01001299 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001300
1301 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1302 if (!msp)
1303 return -ENOMEM;
1304
1305 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1306 if (!res)
1307 return -ENODEV;
1308
1309 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
H Hartley Sweeten4de1ba12011-06-06 13:49:00 -07001310 resource_size(res));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001311 if (!msp->xor_base)
1312 return -EBUSY;
1313
1314 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1315 if (!res)
1316 return -ENODEV;
1317
1318 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
H Hartley Sweeten4de1ba12011-06-06 13:49:00 -07001319 resource_size(res));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001320 if (!msp->xor_high_base)
1321 return -EBUSY;
1322
1323 platform_set_drvdata(pdev, msp);
1324
1325 /*
1326 * (Re-)program MBUS remapping windows if we are asked to.
1327 */
Andrew Lunn63a93322011-12-07 21:48:07 +01001328 dram = mv_mbus_dram_info();
1329 if (dram)
1330 mv_xor_conf_mbus_windows(msp, dram);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001331
Andrew Lunnc5101822012-02-19 13:30:26 +01001332 /* Not all platforms can gate the clock, so it is not
1333 * an error if the clock does not exists.
1334 */
1335 msp->clk = clk_get(&pdev->dev, NULL);
1336 if (!IS_ERR(msp->clk))
1337 clk_prepare_enable(msp->clk);
1338
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001339 if (pdata && pdata->channels) {
1340 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1341 struct mv_xor_platform_data *cd;
1342 int irq;
1343
1344 cd = &pdata->channels[i];
1345 if (!cd) {
1346 ret = -ENODEV;
1347 goto err_channel_add;
1348 }
1349
1350 irq = platform_get_irq(pdev, i);
1351 if (irq < 0) {
1352 ret = irq;
1353 goto err_channel_add;
1354 }
1355
1356 msp->channels[i] =
1357 mv_xor_channel_add(msp, pdev, cd->hw_id,
1358 cd->cap_mask,
1359 cd->pool_size, irq);
1360 if (IS_ERR(msp->channels[i])) {
1361 ret = PTR_ERR(msp->channels[i]);
1362 goto err_channel_add;
1363 }
1364 }
1365 }
1366
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001367 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001368
1369err_channel_add:
1370 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1371 if (msp->channels[i])
1372 mv_xor_channel_remove(msp->channels[i]);
1373
1374 clk_disable_unprepare(msp->clk);
1375 clk_put(msp->clk);
1376 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001377}
1378
1379static int mv_xor_shared_remove(struct platform_device *pdev)
1380{
Andrew Lunnc5101822012-02-19 13:30:26 +01001381 struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001382 int i;
1383
1384 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1385 if (msp->channels[i])
1386 mv_xor_channel_remove(msp->channels[i]);
1387 }
Andrew Lunnc5101822012-02-19 13:30:26 +01001388
1389 if (!IS_ERR(msp->clk)) {
1390 clk_disable_unprepare(msp->clk);
1391 clk_put(msp->clk);
1392 }
1393
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001394 return 0;
1395}
1396
1397static struct platform_driver mv_xor_shared_driver = {
1398 .probe = mv_xor_shared_probe,
1399 .remove = mv_xor_shared_remove,
1400 .driver = {
1401 .owner = THIS_MODULE,
1402 .name = MV_XOR_SHARED_NAME,
1403 },
1404};
1405
1406
1407static int __init mv_xor_init(void)
1408{
1409 int rc;
1410
1411 rc = platform_driver_register(&mv_xor_shared_driver);
1412 if (!rc) {
1413 rc = platform_driver_register(&mv_xor_driver);
1414 if (rc)
1415 platform_driver_unregister(&mv_xor_shared_driver);
1416 }
1417 return rc;
1418}
1419module_init(mv_xor_init);
1420
1421/* it's currently unsafe to unload this module */
1422#if 0
1423static void __exit mv_xor_exit(void)
1424{
1425 platform_driver_unregister(&mv_xor_driver);
1426 platform_driver_unregister(&mv_xor_shared_driver);
1427 return;
1428}
1429
1430module_exit(mv_xor_exit);
1431#endif
1432
1433MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1434MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1435MODULE_LICENSE("GPL");