blob: 9c752bd295e16118bddfc4f49d25f2746535d509 [file] [log] [blame]
Dan Williamsc2110922007-01-02 13:52:26 -07001/*
2 * offload engine driver for the Intel Xscale series of i/o processors
3 * Copyright © 2006, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 */
19
20/*
21 * This driver supports the asynchrounous DMA copy and RAID engines available
22 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
Dan Williamsc2110922007-01-02 13:52:26 -070027#include <linux/delay.h>
28#include <linux/dma-mapping.h>
29#include <linux/spinlock.h>
30#include <linux/interrupt.h>
31#include <linux/platform_device.h>
32#include <linux/memory.h>
33#include <linux/ioport.h>
34
Russell Kinga09e64f2008-08-05 16:14:15 +010035#include <mach/adma.h>
Dan Williamsc2110922007-01-02 13:52:26 -070036
37#define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
38#define to_iop_adma_device(dev) \
39 container_of(dev, struct iop_adma_device, common)
40#define tx_to_iop_adma_slot(tx) \
41 container_of(tx, struct iop_adma_desc_slot, async_tx)
42
43/**
44 * iop_adma_free_slots - flags descriptor slots for reuse
45 * @slot: Slot to free
46 * Caller must hold &iop_chan->lock while calling this function
47 */
48static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
49{
50 int stride = slot->slots_per_op;
51
52 while (stride--) {
53 slot->slots_per_op = 0;
54 slot = list_entry(slot->slot_node.next,
55 struct iop_adma_desc_slot,
56 slot_node);
57 }
58}
59
60static dma_cookie_t
61iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
62 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
63{
Dan Williams507fbec2009-08-29 19:12:39 -070064 struct dma_async_tx_descriptor *tx = &desc->async_tx;
65
66 BUG_ON(tx->cookie < 0);
67 if (tx->cookie > 0) {
68 cookie = tx->cookie;
69 tx->cookie = 0;
Dan Williamsc2110922007-01-02 13:52:26 -070070
71 /* call the callback (must not sleep or submit new
72 * operations to this channel)
73 */
Dan Williams507fbec2009-08-29 19:12:39 -070074 if (tx->callback)
75 tx->callback(tx->callback_param);
Dan Williamsc2110922007-01-02 13:52:26 -070076
77 /* unmap dma addresses
78 * (unmap_single vs unmap_page?)
79 */
80 if (desc->group_head && desc->unmap_len) {
81 struct iop_adma_desc_slot *unmap = desc->group_head;
82 struct device *dev =
83 &iop_chan->device->pdev->dev;
84 u32 len = unmap->unmap_len;
Dan Williams507fbec2009-08-29 19:12:39 -070085 enum dma_ctrl_flags flags = tx->flags;
Dan Williamse1d181e2008-07-04 00:13:40 -070086 u32 src_cnt;
87 dma_addr_t addr;
Dan Williamsa06d5682008-12-08 13:46:00 -070088 dma_addr_t dest;
Dan Williamsc2110922007-01-02 13:52:26 -070089
Dan Williamsa06d5682008-12-08 13:46:00 -070090 src_cnt = unmap->unmap_src_cnt;
91 dest = iop_desc_get_dest_addr(unmap, iop_chan);
Dan Williamse1d181e2008-07-04 00:13:40 -070092 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
Dan Williamsa06d5682008-12-08 13:46:00 -070093 enum dma_data_direction dir;
94
95 if (src_cnt > 1) /* is xor? */
96 dir = DMA_BIDIRECTIONAL;
97 else
98 dir = DMA_FROM_DEVICE;
99
100 dma_unmap_page(dev, dest, len, dir);
Dan Williamse1d181e2008-07-04 00:13:40 -0700101 }
102
103 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
Dan Williamse1d181e2008-07-04 00:13:40 -0700104 while (src_cnt--) {
105 addr = iop_desc_get_src_addr(unmap,
106 iop_chan,
107 src_cnt);
Dan Williamsa06d5682008-12-08 13:46:00 -0700108 if (addr == dest)
109 continue;
Dan Williamse1d181e2008-07-04 00:13:40 -0700110 dma_unmap_page(dev, addr, len,
111 DMA_TO_DEVICE);
112 }
Dan Williamsc2110922007-01-02 13:52:26 -0700113 }
114 desc->group_head = NULL;
115 }
116 }
117
118 /* run dependent operations */
Dan Williams507fbec2009-08-29 19:12:39 -0700119 dma_run_dependencies(tx);
Dan Williamsc2110922007-01-02 13:52:26 -0700120
121 return cookie;
122}
123
124static int
125iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
126 struct iop_adma_chan *iop_chan)
127{
128 /* the client is allowed to attach dependent operations
129 * until 'ack' is set
130 */
Dan Williams636bdea2008-04-17 20:17:26 -0700131 if (!async_tx_test_ack(&desc->async_tx))
Dan Williamsc2110922007-01-02 13:52:26 -0700132 return 0;
133
134 /* leave the last descriptor in the chain
135 * so we can append to it
136 */
137 if (desc->chain_node.next == &iop_chan->chain)
138 return 1;
139
140 dev_dbg(iop_chan->device->common.dev,
141 "\tfree slot: %d slots_per_op: %d\n",
142 desc->idx, desc->slots_per_op);
143
144 list_del(&desc->chain_node);
145 iop_adma_free_slots(desc);
146
147 return 0;
148}
149
150static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
151{
152 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
153 dma_cookie_t cookie = 0;
154 u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
155 int busy = iop_chan_is_busy(iop_chan);
156 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
157
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700158 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
Dan Williamsc2110922007-01-02 13:52:26 -0700159 /* free completed slots from the chain starting with
160 * the oldest descriptor
161 */
162 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
163 chain_node) {
164 pr_debug("\tcookie: %d slot: %d busy: %d "
165 "this_desc: %#x next_desc: %#x ack: %d\n",
166 iter->async_tx.cookie, iter->idx, busy,
167 iter->async_tx.phys, iop_desc_get_next_desc(iter),
Dan Williams636bdea2008-04-17 20:17:26 -0700168 async_tx_test_ack(&iter->async_tx));
Dan Williamsc2110922007-01-02 13:52:26 -0700169 prefetch(_iter);
170 prefetch(&_iter->async_tx);
171
172 /* do not advance past the current descriptor loaded into the
173 * hardware channel, subsequent descriptors are either in
174 * process or have not been submitted
175 */
176 if (seen_current)
177 break;
178
179 /* stop the search if we reach the current descriptor and the
180 * channel is busy, or if it appears that the current descriptor
181 * needs to be re-read (i.e. has been appended to)
182 */
183 if (iter->async_tx.phys == current_desc) {
184 BUG_ON(seen_current++);
185 if (busy || iop_desc_get_next_desc(iter))
186 break;
187 }
188
189 /* detect the start of a group transaction */
190 if (!slot_cnt && !slots_per_op) {
191 slot_cnt = iter->slot_cnt;
192 slots_per_op = iter->slots_per_op;
193 if (slot_cnt <= slots_per_op) {
194 slot_cnt = 0;
195 slots_per_op = 0;
196 }
197 }
198
199 if (slot_cnt) {
200 pr_debug("\tgroup++\n");
201 if (!grp_start)
202 grp_start = iter;
203 slot_cnt -= slots_per_op;
204 }
205
206 /* all the members of a group are complete */
207 if (slots_per_op != 0 && slot_cnt == 0) {
208 struct iop_adma_desc_slot *grp_iter, *_grp_iter;
209 int end_of_chain = 0;
210 pr_debug("\tgroup end\n");
211
212 /* collect the total results */
213 if (grp_start->xor_check_result) {
214 u32 zero_sum_result = 0;
215 slot_cnt = grp_start->slot_cnt;
216 grp_iter = grp_start;
217
218 list_for_each_entry_from(grp_iter,
219 &iop_chan->chain, chain_node) {
220 zero_sum_result |=
221 iop_desc_get_zero_result(grp_iter);
222 pr_debug("\titer%d result: %d\n",
223 grp_iter->idx, zero_sum_result);
224 slot_cnt -= slots_per_op;
225 if (slot_cnt == 0)
226 break;
227 }
228 pr_debug("\tgrp_start->xor_check_result: %p\n",
229 grp_start->xor_check_result);
230 *grp_start->xor_check_result = zero_sum_result;
231 }
232
233 /* clean up the group */
234 slot_cnt = grp_start->slot_cnt;
235 grp_iter = grp_start;
236 list_for_each_entry_safe_from(grp_iter, _grp_iter,
237 &iop_chan->chain, chain_node) {
238 cookie = iop_adma_run_tx_complete_actions(
239 grp_iter, iop_chan, cookie);
240
241 slot_cnt -= slots_per_op;
242 end_of_chain = iop_adma_clean_slot(grp_iter,
243 iop_chan);
244
245 if (slot_cnt == 0 || end_of_chain)
246 break;
247 }
248
249 /* the group should be complete at this point */
250 BUG_ON(slot_cnt);
251
252 slots_per_op = 0;
253 grp_start = NULL;
254 if (end_of_chain)
255 break;
256 else
257 continue;
258 } else if (slots_per_op) /* wait for group completion */
259 continue;
260
261 /* write back zero sum results (single descriptor case) */
262 if (iter->xor_check_result && iter->async_tx.cookie)
263 *iter->xor_check_result =
264 iop_desc_get_zero_result(iter);
265
266 cookie = iop_adma_run_tx_complete_actions(
267 iter, iop_chan, cookie);
268
269 if (iop_adma_clean_slot(iter, iop_chan))
270 break;
271 }
272
Dan Williamsc2110922007-01-02 13:52:26 -0700273 if (cookie > 0) {
274 iop_chan->completed_cookie = cookie;
275 pr_debug("\tcompleted cookie %d\n", cookie);
276 }
277}
278
279static void
280iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
281{
282 spin_lock_bh(&iop_chan->lock);
283 __iop_adma_slot_cleanup(iop_chan);
284 spin_unlock_bh(&iop_chan->lock);
285}
286
287static void iop_adma_tasklet(unsigned long data)
288{
Dan Williams19242d72008-04-17 20:17:25 -0700289 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
290
Dan Williams72be12f2009-07-14 13:38:29 -0700291 /* lockdep will flag depedency submissions as potentially
292 * recursive locking, this is not the case as a dependency
293 * submission will never recurse a channels submit routine.
294 * There are checks in async_tx.c to prevent this.
295 */
296 spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
Dan Williams19242d72008-04-17 20:17:25 -0700297 __iop_adma_slot_cleanup(iop_chan);
298 spin_unlock(&iop_chan->lock);
Dan Williamsc2110922007-01-02 13:52:26 -0700299}
300
301static struct iop_adma_desc_slot *
302iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
303 int slots_per_op)
304{
305 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
Denis Chenge73ef9a2008-02-02 19:30:01 -0700306 LIST_HEAD(chain);
Dan Williamsc2110922007-01-02 13:52:26 -0700307 int slots_found, retry = 0;
308
309 /* start search from the last allocated descrtiptor
310 * if a contiguous allocation can not be found start searching
311 * from the beginning of the list
312 */
313retry:
314 slots_found = 0;
315 if (retry == 0)
316 iter = iop_chan->last_used;
317 else
318 iter = list_entry(&iop_chan->all_slots,
319 struct iop_adma_desc_slot,
320 slot_node);
321
322 list_for_each_entry_safe_continue(
323 iter, _iter, &iop_chan->all_slots, slot_node) {
324 prefetch(_iter);
325 prefetch(&_iter->async_tx);
326 if (iter->slots_per_op) {
327 /* give up after finding the first busy slot
328 * on the second pass through the list
329 */
330 if (retry)
331 break;
332
333 slots_found = 0;
334 continue;
335 }
336
337 /* start the allocation if the slot is correctly aligned */
338 if (!slots_found++) {
339 if (iop_desc_is_aligned(iter, slots_per_op))
340 alloc_start = iter;
341 else {
342 slots_found = 0;
343 continue;
344 }
345 }
346
347 if (slots_found == num_slots) {
348 struct iop_adma_desc_slot *alloc_tail = NULL;
349 struct iop_adma_desc_slot *last_used = NULL;
350 iter = alloc_start;
351 while (num_slots) {
352 int i;
353 dev_dbg(iop_chan->device->common.dev,
354 "allocated slot: %d "
355 "(desc %p phys: %#x) slots_per_op %d\n",
356 iter->idx, iter->hw_desc,
357 iter->async_tx.phys, slots_per_op);
358
359 /* pre-ack all but the last descriptor */
360 if (num_slots != slots_per_op)
Dan Williams636bdea2008-04-17 20:17:26 -0700361 async_tx_ack(&iter->async_tx);
Dan Williamsc2110922007-01-02 13:52:26 -0700362
363 list_add_tail(&iter->chain_node, &chain);
364 alloc_tail = iter;
365 iter->async_tx.cookie = 0;
366 iter->slot_cnt = num_slots;
367 iter->xor_check_result = NULL;
368 for (i = 0; i < slots_per_op; i++) {
369 iter->slots_per_op = slots_per_op - i;
370 last_used = iter;
371 iter = list_entry(iter->slot_node.next,
372 struct iop_adma_desc_slot,
373 slot_node);
374 }
375 num_slots -= slots_per_op;
376 }
377 alloc_tail->group_head = alloc_start;
378 alloc_tail->async_tx.cookie = -EBUSY;
379 list_splice(&chain, &alloc_tail->async_tx.tx_list);
380 iop_chan->last_used = last_used;
381 iop_desc_clear_next_desc(alloc_start);
382 iop_desc_clear_next_desc(alloc_tail);
383 return alloc_tail;
384 }
385 }
386 if (!retry++)
387 goto retry;
388
Dan Williamsc7141d02008-07-17 17:59:56 -0700389 /* perform direct reclaim if the allocation fails */
390 __iop_adma_slot_cleanup(iop_chan);
Dan Williamsc2110922007-01-02 13:52:26 -0700391
392 return NULL;
393}
394
395static dma_cookie_t
396iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
397 struct iop_adma_desc_slot *desc)
398{
399 dma_cookie_t cookie = iop_chan->common.cookie;
400 cookie++;
401 if (cookie < 0)
402 cookie = 1;
403 iop_chan->common.cookie = desc->async_tx.cookie = cookie;
404 return cookie;
405}
406
407static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
408{
409 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
410 iop_chan->pending);
411
412 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
413 iop_chan->pending = 0;
414 iop_chan_append(iop_chan);
415 }
416}
417
418static dma_cookie_t
419iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
420{
421 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
422 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
423 struct iop_adma_desc_slot *grp_start, *old_chain_tail;
424 int slot_cnt;
425 int slots_per_op;
426 dma_cookie_t cookie;
Dan Williams137cb552008-11-11 13:12:33 -0700427 dma_addr_t next_dma;
Dan Williamsc2110922007-01-02 13:52:26 -0700428
429 grp_start = sw_desc->group_head;
430 slot_cnt = grp_start->slot_cnt;
431 slots_per_op = grp_start->slots_per_op;
432
433 spin_lock_bh(&iop_chan->lock);
434 cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
435
436 old_chain_tail = list_entry(iop_chan->chain.prev,
437 struct iop_adma_desc_slot, chain_node);
438 list_splice_init(&sw_desc->async_tx.tx_list,
439 &old_chain_tail->chain_node);
440
441 /* fix up the hardware chain */
Dan Williams137cb552008-11-11 13:12:33 -0700442 next_dma = grp_start->async_tx.phys;
443 iop_desc_set_next_desc(old_chain_tail, next_dma);
444 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
Dan Williamsc2110922007-01-02 13:52:26 -0700445
Dan Williams137cb552008-11-11 13:12:33 -0700446 /* check for pre-chained descriptors */
Dan Williams65e50382008-11-11 13:12:33 -0700447 iop_paranoia(iop_desc_get_next_desc(sw_desc));
Dan Williamsc2110922007-01-02 13:52:26 -0700448
449 /* increment the pending count by the number of slots
450 * memcpy operations have a 1:1 (slot:operation) relation
451 * other operations are heavier and will pop the threshold
452 * more often.
453 */
454 iop_chan->pending += slot_cnt;
455 iop_adma_check_threshold(iop_chan);
456 spin_unlock_bh(&iop_chan->lock);
457
458 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700459 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
Dan Williamsc2110922007-01-02 13:52:26 -0700460
461 return cookie;
462}
463
Dan Williamsc2110922007-01-02 13:52:26 -0700464static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
465static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
466
Dan Williams5eb907a2008-07-17 17:59:56 -0700467/**
468 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
469 * @chan - allocate descriptor resources for this channel
470 * @client - current client requesting the channel be ready for requests
471 *
472 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
473 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
474 * greater than 2x the number slots needed to satisfy a device->max_xor
475 * request.
476 * */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700477static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
Dan Williamsc2110922007-01-02 13:52:26 -0700478{
479 char *hw_desc;
480 int idx;
481 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
482 struct iop_adma_desc_slot *slot = NULL;
483 int init = iop_chan->slots_allocated ? 0 : 1;
484 struct iop_adma_platform_data *plat_data =
485 iop_chan->device->pdev->dev.platform_data;
486 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
487
488 /* Allocate descriptor slots */
489 do {
490 idx = iop_chan->slots_allocated;
491 if (idx == num_descs_in_pool)
492 break;
493
494 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
495 if (!slot) {
496 printk(KERN_INFO "IOP ADMA Channel only initialized"
497 " %d descriptor slots", idx);
498 break;
499 }
500 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
501 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
502
503 dma_async_tx_descriptor_init(&slot->async_tx, chan);
504 slot->async_tx.tx_submit = iop_adma_tx_submit;
Dan Williamsc2110922007-01-02 13:52:26 -0700505 INIT_LIST_HEAD(&slot->chain_node);
506 INIT_LIST_HEAD(&slot->slot_node);
Dan Williamsc2110922007-01-02 13:52:26 -0700507 hw_desc = (char *) iop_chan->device->dma_desc_pool;
508 slot->async_tx.phys =
509 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
510 slot->idx = idx;
511
512 spin_lock_bh(&iop_chan->lock);
513 iop_chan->slots_allocated++;
514 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
515 spin_unlock_bh(&iop_chan->lock);
516 } while (iop_chan->slots_allocated < num_descs_in_pool);
517
518 if (idx && !iop_chan->last_used)
519 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
520 struct iop_adma_desc_slot,
521 slot_node);
522
523 dev_dbg(iop_chan->device->common.dev,
524 "allocated %d descriptor slots last_used: %p\n",
525 iop_chan->slots_allocated, iop_chan->last_used);
526
527 /* initialize the channel and the chain with a null operation */
528 if (init) {
529 if (dma_has_cap(DMA_MEMCPY,
530 iop_chan->device->common.cap_mask))
531 iop_chan_start_null_memcpy(iop_chan);
532 else if (dma_has_cap(DMA_XOR,
533 iop_chan->device->common.cap_mask))
534 iop_chan_start_null_xor(iop_chan);
535 else
536 BUG();
537 }
538
539 return (idx > 0) ? idx : -ENOMEM;
540}
541
542static struct dma_async_tx_descriptor *
Dan Williams636bdea2008-04-17 20:17:26 -0700543iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
Dan Williamsc2110922007-01-02 13:52:26 -0700544{
545 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
546 struct iop_adma_desc_slot *sw_desc, *grp_start;
547 int slot_cnt, slots_per_op;
548
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700549 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
Dan Williamsc2110922007-01-02 13:52:26 -0700550
551 spin_lock_bh(&iop_chan->lock);
552 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
553 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
554 if (sw_desc) {
555 grp_start = sw_desc->group_head;
556 iop_desc_init_interrupt(grp_start, iop_chan);
557 grp_start->unmap_len = 0;
Dan Williams636bdea2008-04-17 20:17:26 -0700558 sw_desc->async_tx.flags = flags;
Dan Williamsc2110922007-01-02 13:52:26 -0700559 }
560 spin_unlock_bh(&iop_chan->lock);
561
562 return sw_desc ? &sw_desc->async_tx : NULL;
563}
564
Dan Williamsc2110922007-01-02 13:52:26 -0700565static struct dma_async_tx_descriptor *
Dan Williams00367312008-02-02 19:49:57 -0700566iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700567 dma_addr_t dma_src, size_t len, unsigned long flags)
Dan Williamsc2110922007-01-02 13:52:26 -0700568{
569 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
570 struct iop_adma_desc_slot *sw_desc, *grp_start;
571 int slot_cnt, slots_per_op;
572
573 if (unlikely(!len))
574 return NULL;
575 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
576
577 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700578 __func__, len);
Dan Williamsc2110922007-01-02 13:52:26 -0700579
580 spin_lock_bh(&iop_chan->lock);
581 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
582 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
583 if (sw_desc) {
584 grp_start = sw_desc->group_head;
Dan Williamsd4c56f92008-02-02 19:49:58 -0700585 iop_desc_init_memcpy(grp_start, flags);
Dan Williamsc2110922007-01-02 13:52:26 -0700586 iop_desc_set_byte_count(grp_start, iop_chan, len);
Dan Williams00367312008-02-02 19:49:57 -0700587 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
588 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
Dan Williamsc2110922007-01-02 13:52:26 -0700589 sw_desc->unmap_src_cnt = 1;
590 sw_desc->unmap_len = len;
Dan Williams636bdea2008-04-17 20:17:26 -0700591 sw_desc->async_tx.flags = flags;
Dan Williamsc2110922007-01-02 13:52:26 -0700592 }
593 spin_unlock_bh(&iop_chan->lock);
594
595 return sw_desc ? &sw_desc->async_tx : NULL;
596}
597
598static struct dma_async_tx_descriptor *
Dan Williams00367312008-02-02 19:49:57 -0700599iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700600 int value, size_t len, unsigned long flags)
Dan Williamsc2110922007-01-02 13:52:26 -0700601{
602 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
603 struct iop_adma_desc_slot *sw_desc, *grp_start;
604 int slot_cnt, slots_per_op;
605
606 if (unlikely(!len))
607 return NULL;
608 BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
609
610 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700611 __func__, len);
Dan Williamsc2110922007-01-02 13:52:26 -0700612
613 spin_lock_bh(&iop_chan->lock);
614 slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
615 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
616 if (sw_desc) {
617 grp_start = sw_desc->group_head;
Dan Williamsd4c56f92008-02-02 19:49:58 -0700618 iop_desc_init_memset(grp_start, flags);
Dan Williamsc2110922007-01-02 13:52:26 -0700619 iop_desc_set_byte_count(grp_start, iop_chan, len);
620 iop_desc_set_block_fill_val(grp_start, value);
Dan Williams00367312008-02-02 19:49:57 -0700621 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
Dan Williamsc2110922007-01-02 13:52:26 -0700622 sw_desc->unmap_src_cnt = 1;
623 sw_desc->unmap_len = len;
Dan Williams636bdea2008-04-17 20:17:26 -0700624 sw_desc->async_tx.flags = flags;
Dan Williamsc2110922007-01-02 13:52:26 -0700625 }
626 spin_unlock_bh(&iop_chan->lock);
627
628 return sw_desc ? &sw_desc->async_tx : NULL;
629}
630
Dan Williamsc2110922007-01-02 13:52:26 -0700631static struct dma_async_tx_descriptor *
Dan Williams00367312008-02-02 19:49:57 -0700632iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
633 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700634 unsigned long flags)
Dan Williamsc2110922007-01-02 13:52:26 -0700635{
636 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
637 struct iop_adma_desc_slot *sw_desc, *grp_start;
638 int slot_cnt, slots_per_op;
639
640 if (unlikely(!len))
641 return NULL;
642 BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
643
644 dev_dbg(iop_chan->device->common.dev,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700645 "%s src_cnt: %d len: %u flags: %lx\n",
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700646 __func__, src_cnt, len, flags);
Dan Williamsc2110922007-01-02 13:52:26 -0700647
648 spin_lock_bh(&iop_chan->lock);
649 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
650 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
651 if (sw_desc) {
652 grp_start = sw_desc->group_head;
Dan Williamsd4c56f92008-02-02 19:49:58 -0700653 iop_desc_init_xor(grp_start, src_cnt, flags);
Dan Williamsc2110922007-01-02 13:52:26 -0700654 iop_desc_set_byte_count(grp_start, iop_chan, len);
Dan Williams00367312008-02-02 19:49:57 -0700655 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
Dan Williamsc2110922007-01-02 13:52:26 -0700656 sw_desc->unmap_src_cnt = src_cnt;
657 sw_desc->unmap_len = len;
Dan Williams636bdea2008-04-17 20:17:26 -0700658 sw_desc->async_tx.flags = flags;
Dan Williams00367312008-02-02 19:49:57 -0700659 while (src_cnt--)
660 iop_desc_set_xor_src_addr(grp_start, src_cnt,
661 dma_src[src_cnt]);
Dan Williamsc2110922007-01-02 13:52:26 -0700662 }
663 spin_unlock_bh(&iop_chan->lock);
664
665 return sw_desc ? &sw_desc->async_tx : NULL;
666}
667
Dan Williamsc2110922007-01-02 13:52:26 -0700668static struct dma_async_tx_descriptor *
Dan Williams099f53c2009-04-08 14:28:37 -0700669iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
670 unsigned int src_cnt, size_t len, u32 *result,
671 unsigned long flags)
Dan Williamsc2110922007-01-02 13:52:26 -0700672{
673 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
674 struct iop_adma_desc_slot *sw_desc, *grp_start;
675 int slot_cnt, slots_per_op;
676
677 if (unlikely(!len))
678 return NULL;
679
680 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700681 __func__, src_cnt, len);
Dan Williamsc2110922007-01-02 13:52:26 -0700682
683 spin_lock_bh(&iop_chan->lock);
684 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
685 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
686 if (sw_desc) {
687 grp_start = sw_desc->group_head;
Dan Williamsd4c56f92008-02-02 19:49:58 -0700688 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
Dan Williamsc2110922007-01-02 13:52:26 -0700689 iop_desc_set_zero_sum_byte_count(grp_start, len);
690 grp_start->xor_check_result = result;
691 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700692 __func__, grp_start->xor_check_result);
Dan Williamsc2110922007-01-02 13:52:26 -0700693 sw_desc->unmap_src_cnt = src_cnt;
694 sw_desc->unmap_len = len;
Dan Williams636bdea2008-04-17 20:17:26 -0700695 sw_desc->async_tx.flags = flags;
Dan Williams00367312008-02-02 19:49:57 -0700696 while (src_cnt--)
697 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
698 dma_src[src_cnt]);
Dan Williamsc2110922007-01-02 13:52:26 -0700699 }
700 spin_unlock_bh(&iop_chan->lock);
701
702 return sw_desc ? &sw_desc->async_tx : NULL;
703}
704
Dan Williamsc2110922007-01-02 13:52:26 -0700705static void iop_adma_free_chan_resources(struct dma_chan *chan)
706{
707 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
708 struct iop_adma_desc_slot *iter, *_iter;
709 int in_use_descs = 0;
710
711 iop_adma_slot_cleanup(iop_chan);
712
713 spin_lock_bh(&iop_chan->lock);
714 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
715 chain_node) {
716 in_use_descs++;
717 list_del(&iter->chain_node);
718 }
719 list_for_each_entry_safe_reverse(
720 iter, _iter, &iop_chan->all_slots, slot_node) {
721 list_del(&iter->slot_node);
722 kfree(iter);
723 iop_chan->slots_allocated--;
724 }
725 iop_chan->last_used = NULL;
726
727 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700728 __func__, iop_chan->slots_allocated);
Dan Williamsc2110922007-01-02 13:52:26 -0700729 spin_unlock_bh(&iop_chan->lock);
730
731 /* one is ok since we left it on there on purpose */
732 if (in_use_descs > 1)
733 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
734 in_use_descs - 1);
735}
736
737/**
738 * iop_adma_is_complete - poll the status of an ADMA transaction
739 * @chan: ADMA channel handle
740 * @cookie: ADMA transaction identifier
741 */
742static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
743 dma_cookie_t cookie,
744 dma_cookie_t *done,
745 dma_cookie_t *used)
746{
747 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
748 dma_cookie_t last_used;
749 dma_cookie_t last_complete;
750 enum dma_status ret;
751
752 last_used = chan->cookie;
753 last_complete = iop_chan->completed_cookie;
754
755 if (done)
756 *done = last_complete;
757 if (used)
758 *used = last_used;
759
760 ret = dma_async_is_complete(cookie, last_complete, last_used);
761 if (ret == DMA_SUCCESS)
762 return ret;
763
764 iop_adma_slot_cleanup(iop_chan);
765
766 last_used = chan->cookie;
767 last_complete = iop_chan->completed_cookie;
768
769 if (done)
770 *done = last_complete;
771 if (used)
772 *used = last_used;
773
774 return dma_async_is_complete(cookie, last_complete, last_used);
775}
776
777static irqreturn_t iop_adma_eot_handler(int irq, void *data)
778{
779 struct iop_adma_chan *chan = data;
780
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700781 dev_dbg(chan->device->common.dev, "%s\n", __func__);
Dan Williamsc2110922007-01-02 13:52:26 -0700782
783 tasklet_schedule(&chan->irq_tasklet);
784
785 iop_adma_device_clear_eot_status(chan);
786
787 return IRQ_HANDLED;
788}
789
790static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
791{
792 struct iop_adma_chan *chan = data;
793
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700794 dev_dbg(chan->device->common.dev, "%s\n", __func__);
Dan Williamsc2110922007-01-02 13:52:26 -0700795
796 tasklet_schedule(&chan->irq_tasklet);
797
798 iop_adma_device_clear_eoc_status(chan);
799
800 return IRQ_HANDLED;
801}
802
803static irqreturn_t iop_adma_err_handler(int irq, void *data)
804{
805 struct iop_adma_chan *chan = data;
806 unsigned long status = iop_chan_get_status(chan);
807
808 dev_printk(KERN_ERR, chan->device->common.dev,
809 "error ( %s%s%s%s%s%s%s)\n",
810 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
811 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
812 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
813 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
814 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
815 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
816 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
817
818 iop_adma_device_clear_err_status(chan);
819
820 BUG();
821
822 return IRQ_HANDLED;
823}
824
825static void iop_adma_issue_pending(struct dma_chan *chan)
826{
827 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
828
829 if (iop_chan->pending) {
830 iop_chan->pending = 0;
831 iop_chan_append(iop_chan);
832 }
833}
834
835/*
836 * Perform a transaction to verify the HW works.
837 */
838#define IOP_ADMA_TEST_SIZE 2000
839
840static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
841{
842 int i;
843 void *src, *dest;
844 dma_addr_t src_dma, dest_dma;
845 struct dma_chan *dma_chan;
846 dma_cookie_t cookie;
847 struct dma_async_tx_descriptor *tx;
848 int err = 0;
849 struct iop_adma_chan *iop_chan;
850
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700851 dev_dbg(device->common.dev, "%s\n", __func__);
Dan Williamsc2110922007-01-02 13:52:26 -0700852
Christophe Jailleteccf2142008-05-20 16:33:06 -0700853 src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
Dan Williamsc2110922007-01-02 13:52:26 -0700854 if (!src)
855 return -ENOMEM;
Christophe Jailleteccf2142008-05-20 16:33:06 -0700856 dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
Dan Williamsc2110922007-01-02 13:52:26 -0700857 if (!dest) {
858 kfree(src);
859 return -ENOMEM;
860 }
861
862 /* Fill in src buffer */
863 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
864 ((u8 *) src)[i] = (u8)i;
865
Dan Williamsc2110922007-01-02 13:52:26 -0700866 /* Start copy, using first DMA channel */
867 dma_chan = container_of(device->common.channels.next,
868 struct dma_chan,
869 device_node);
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700870 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
Dan Williamsc2110922007-01-02 13:52:26 -0700871 err = -ENODEV;
872 goto out;
873 }
874
Dan Williamsc2110922007-01-02 13:52:26 -0700875 dest_dma = dma_map_single(dma_chan->device->dev, dest,
876 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
Dan Williamsc2110922007-01-02 13:52:26 -0700877 src_dma = dma_map_single(dma_chan->device->dev, src,
878 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
Dan Williams00367312008-02-02 19:49:57 -0700879 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
Dan Williams636bdea2008-04-17 20:17:26 -0700880 IOP_ADMA_TEST_SIZE,
881 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Dan Williamsc2110922007-01-02 13:52:26 -0700882
883 cookie = iop_adma_tx_submit(tx);
884 iop_adma_issue_pending(dma_chan);
Dan Williamsc2110922007-01-02 13:52:26 -0700885 msleep(1);
886
887 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
888 DMA_SUCCESS) {
889 dev_printk(KERN_ERR, dma_chan->device->dev,
890 "Self-test copy timed out, disabling\n");
891 err = -ENODEV;
892 goto free_resources;
893 }
894
895 iop_chan = to_iop_adma_chan(dma_chan);
896 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
897 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
898 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
899 dev_printk(KERN_ERR, dma_chan->device->dev,
900 "Self-test copy failed compare, disabling\n");
901 err = -ENODEV;
902 goto free_resources;
903 }
904
905free_resources:
906 iop_adma_free_chan_resources(dma_chan);
907out:
908 kfree(src);
909 kfree(dest);
910 return err;
911}
912
913#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
914static int __devinit
Dan Williams099f53c2009-04-08 14:28:37 -0700915iop_adma_xor_val_self_test(struct iop_adma_device *device)
Dan Williamsc2110922007-01-02 13:52:26 -0700916{
917 int i, src_idx;
918 struct page *dest;
919 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
920 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
Dan Williams00367312008-02-02 19:49:57 -0700921 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
Dan Williamsc2110922007-01-02 13:52:26 -0700922 dma_addr_t dma_addr, dest_dma;
923 struct dma_async_tx_descriptor *tx;
924 struct dma_chan *dma_chan;
925 dma_cookie_t cookie;
926 u8 cmp_byte = 0;
927 u32 cmp_word;
928 u32 zero_sum_result;
929 int err = 0;
930 struct iop_adma_chan *iop_chan;
931
Harvey Harrison3d9b5252008-03-13 17:45:28 -0700932 dev_dbg(device->common.dev, "%s\n", __func__);
Dan Williamsc2110922007-01-02 13:52:26 -0700933
934 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
935 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100936 if (!xor_srcs[src_idx]) {
937 while (src_idx--)
Dan Williamsc2110922007-01-02 13:52:26 -0700938 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100939 return -ENOMEM;
940 }
Dan Williamsc2110922007-01-02 13:52:26 -0700941 }
942
943 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100944 if (!dest) {
945 while (src_idx--)
Dan Williamsc2110922007-01-02 13:52:26 -0700946 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100947 return -ENOMEM;
948 }
Dan Williamsc2110922007-01-02 13:52:26 -0700949
950 /* Fill in src buffers */
951 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
952 u8 *ptr = page_address(xor_srcs[src_idx]);
953 for (i = 0; i < PAGE_SIZE; i++)
954 ptr[i] = (1 << src_idx);
955 }
956
957 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
958 cmp_byte ^= (u8) (1 << src_idx);
959
960 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
961 (cmp_byte << 8) | cmp_byte;
962
963 memset(page_address(dest), 0, PAGE_SIZE);
964
965 dma_chan = container_of(device->common.channels.next,
966 struct dma_chan,
967 device_node);
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700968 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
Dan Williamsc2110922007-01-02 13:52:26 -0700969 err = -ENODEV;
970 goto out;
971 }
972
973 /* test xor */
Dan Williamsc2110922007-01-02 13:52:26 -0700974 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
975 PAGE_SIZE, DMA_FROM_DEVICE);
Dan Williams00367312008-02-02 19:49:57 -0700976 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
977 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
978 0, PAGE_SIZE, DMA_TO_DEVICE);
979 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
Dan Williams636bdea2008-04-17 20:17:26 -0700980 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
981 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Dan Williamsc2110922007-01-02 13:52:26 -0700982
983 cookie = iop_adma_tx_submit(tx);
984 iop_adma_issue_pending(dma_chan);
Dan Williamsc2110922007-01-02 13:52:26 -0700985 msleep(8);
986
987 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
988 DMA_SUCCESS) {
989 dev_printk(KERN_ERR, dma_chan->device->dev,
990 "Self-test xor timed out, disabling\n");
991 err = -ENODEV;
992 goto free_resources;
993 }
994
995 iop_chan = to_iop_adma_chan(dma_chan);
996 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
997 PAGE_SIZE, DMA_FROM_DEVICE);
998 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
999 u32 *ptr = page_address(dest);
1000 if (ptr[i] != cmp_word) {
1001 dev_printk(KERN_ERR, dma_chan->device->dev,
1002 "Self-test xor failed compare, disabling\n");
1003 err = -ENODEV;
1004 goto free_resources;
1005 }
1006 }
1007 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1008 PAGE_SIZE, DMA_TO_DEVICE);
1009
1010 /* skip zero sum if the capability is not present */
Dan Williams099f53c2009-04-08 14:28:37 -07001011 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
Dan Williamsc2110922007-01-02 13:52:26 -07001012 goto free_resources;
1013
1014 /* zero sum the sources with the destintation page */
1015 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1016 zero_sum_srcs[i] = xor_srcs[i];
1017 zero_sum_srcs[i] = dest;
1018
1019 zero_sum_result = 1;
1020
Dan Williams00367312008-02-02 19:49:57 -07001021 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1022 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1023 zero_sum_srcs[i], 0, PAGE_SIZE,
1024 DMA_TO_DEVICE);
Dan Williams099f53c2009-04-08 14:28:37 -07001025 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1026 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1027 &zero_sum_result,
1028 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Dan Williamsc2110922007-01-02 13:52:26 -07001029
1030 cookie = iop_adma_tx_submit(tx);
1031 iop_adma_issue_pending(dma_chan);
Dan Williamsc2110922007-01-02 13:52:26 -07001032 msleep(8);
1033
1034 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1035 dev_printk(KERN_ERR, dma_chan->device->dev,
1036 "Self-test zero sum timed out, disabling\n");
1037 err = -ENODEV;
1038 goto free_resources;
1039 }
1040
1041 if (zero_sum_result != 0) {
1042 dev_printk(KERN_ERR, dma_chan->device->dev,
1043 "Self-test zero sum failed compare, disabling\n");
1044 err = -ENODEV;
1045 goto free_resources;
1046 }
1047
1048 /* test memset */
Dan Williamsc2110922007-01-02 13:52:26 -07001049 dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1050 PAGE_SIZE, DMA_FROM_DEVICE);
Dan Williams636bdea2008-04-17 20:17:26 -07001051 tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
1052 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Dan Williamsc2110922007-01-02 13:52:26 -07001053
1054 cookie = iop_adma_tx_submit(tx);
1055 iop_adma_issue_pending(dma_chan);
Dan Williamsc2110922007-01-02 13:52:26 -07001056 msleep(8);
1057
1058 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1059 dev_printk(KERN_ERR, dma_chan->device->dev,
1060 "Self-test memset timed out, disabling\n");
1061 err = -ENODEV;
1062 goto free_resources;
1063 }
1064
1065 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1066 u32 *ptr = page_address(dest);
1067 if (ptr[i]) {
1068 dev_printk(KERN_ERR, dma_chan->device->dev,
1069 "Self-test memset failed compare, disabling\n");
1070 err = -ENODEV;
1071 goto free_resources;
1072 }
1073 }
1074
1075 /* test for non-zero parity sum */
1076 zero_sum_result = 0;
Dan Williams00367312008-02-02 19:49:57 -07001077 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1078 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1079 zero_sum_srcs[i], 0, PAGE_SIZE,
1080 DMA_TO_DEVICE);
Dan Williams099f53c2009-04-08 14:28:37 -07001081 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1082 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1083 &zero_sum_result,
1084 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Dan Williamsc2110922007-01-02 13:52:26 -07001085
1086 cookie = iop_adma_tx_submit(tx);
1087 iop_adma_issue_pending(dma_chan);
Dan Williamsc2110922007-01-02 13:52:26 -07001088 msleep(8);
1089
1090 if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1091 dev_printk(KERN_ERR, dma_chan->device->dev,
1092 "Self-test non-zero sum timed out, disabling\n");
1093 err = -ENODEV;
1094 goto free_resources;
1095 }
1096
1097 if (zero_sum_result != 1) {
1098 dev_printk(KERN_ERR, dma_chan->device->dev,
1099 "Self-test non-zero sum failed compare, disabling\n");
1100 err = -ENODEV;
1101 goto free_resources;
1102 }
1103
1104free_resources:
1105 iop_adma_free_chan_resources(dma_chan);
1106out:
1107 src_idx = IOP_ADMA_NUM_SRC_TEST;
1108 while (src_idx--)
1109 __free_page(xor_srcs[src_idx]);
1110 __free_page(dest);
1111 return err;
1112}
1113
1114static int __devexit iop_adma_remove(struct platform_device *dev)
1115{
1116 struct iop_adma_device *device = platform_get_drvdata(dev);
1117 struct dma_chan *chan, *_chan;
1118 struct iop_adma_chan *iop_chan;
Dan Williamsc2110922007-01-02 13:52:26 -07001119 struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
1120
1121 dma_async_device_unregister(&device->common);
1122
Dan Williamsc2110922007-01-02 13:52:26 -07001123 dma_free_coherent(&dev->dev, plat_data->pool_size,
1124 device->dma_desc_pool_virt, device->dma_desc_pool);
1125
Dan Williamsc2110922007-01-02 13:52:26 -07001126 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1127 device_node) {
1128 iop_chan = to_iop_adma_chan(chan);
1129 list_del(&chan->device_node);
1130 kfree(iop_chan);
1131 }
1132 kfree(device);
1133
1134 return 0;
1135}
1136
1137static int __devinit iop_adma_probe(struct platform_device *pdev)
1138{
1139 struct resource *res;
1140 int ret = 0, i;
1141 struct iop_adma_device *adev;
1142 struct iop_adma_chan *iop_chan;
1143 struct dma_device *dma_dev;
1144 struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
1145
1146 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1147 if (!res)
1148 return -ENODEV;
1149
1150 if (!devm_request_mem_region(&pdev->dev, res->start,
1151 res->end - res->start, pdev->name))
1152 return -EBUSY;
1153
1154 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1155 if (!adev)
1156 return -ENOMEM;
1157 dma_dev = &adev->common;
1158
1159 /* allocate coherent memory for hardware descriptors
1160 * note: writecombine gives slightly better performance, but
1161 * requires that we explicitly flush the writes
1162 */
1163 if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1164 plat_data->pool_size,
1165 &adev->dma_desc_pool,
1166 GFP_KERNEL)) == NULL) {
1167 ret = -ENOMEM;
1168 goto err_free_adev;
1169 }
1170
1171 dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
Harvey Harrison3d9b5252008-03-13 17:45:28 -07001172 __func__, adev->dma_desc_pool_virt,
Dan Williamsc2110922007-01-02 13:52:26 -07001173 (void *) adev->dma_desc_pool);
1174
1175 adev->id = plat_data->hw_id;
1176
1177 /* discover transaction capabilites from the platform data */
1178 dma_dev->cap_mask = plat_data->cap_mask;
1179
1180 adev->pdev = pdev;
1181 platform_set_drvdata(pdev, adev);
1182
1183 INIT_LIST_HEAD(&dma_dev->channels);
1184
1185 /* set base routines */
1186 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1187 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1188 dma_dev->device_is_tx_complete = iop_adma_is_complete;
1189 dma_dev->device_issue_pending = iop_adma_issue_pending;
Dan Williamsc2110922007-01-02 13:52:26 -07001190 dma_dev->dev = &pdev->dev;
1191
1192 /* set prep routines based on capability */
1193 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1194 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1195 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1196 dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
1197 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1198 dma_dev->max_xor = iop_adma_get_max_xor();
1199 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1200 }
Dan Williams099f53c2009-04-08 14:28:37 -07001201 if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1202 dma_dev->device_prep_dma_xor_val =
1203 iop_adma_prep_dma_xor_val;
Dan Williamsc2110922007-01-02 13:52:26 -07001204 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1205 dma_dev->device_prep_dma_interrupt =
1206 iop_adma_prep_dma_interrupt;
1207
1208 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1209 if (!iop_chan) {
1210 ret = -ENOMEM;
1211 goto err_free_dma;
1212 }
1213 iop_chan->device = adev;
1214
1215 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1216 res->end - res->start);
1217 if (!iop_chan->mmr_base) {
1218 ret = -ENOMEM;
1219 goto err_free_iop_chan;
1220 }
1221 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1222 iop_chan);
1223
1224 /* clear errors before enabling interrupts */
1225 iop_adma_device_clear_err_status(iop_chan);
1226
1227 for (i = 0; i < 3; i++) {
1228 irq_handler_t handler[] = { iop_adma_eot_handler,
1229 iop_adma_eoc_handler,
1230 iop_adma_err_handler };
1231 int irq = platform_get_irq(pdev, i);
1232 if (irq < 0) {
1233 ret = -ENXIO;
1234 goto err_free_iop_chan;
1235 } else {
1236 ret = devm_request_irq(&pdev->dev, irq,
1237 handler[i], 0, pdev->name, iop_chan);
1238 if (ret)
1239 goto err_free_iop_chan;
1240 }
1241 }
1242
1243 spin_lock_init(&iop_chan->lock);
Dan Williamsc2110922007-01-02 13:52:26 -07001244 INIT_LIST_HEAD(&iop_chan->chain);
1245 INIT_LIST_HEAD(&iop_chan->all_slots);
Dan Williamsc2110922007-01-02 13:52:26 -07001246 iop_chan->common.device = dma_dev;
1247 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1248
1249 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1250 ret = iop_adma_memcpy_self_test(adev);
1251 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1252 if (ret)
1253 goto err_free_iop_chan;
1254 }
1255
1256 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
1257 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
Dan Williams099f53c2009-04-08 14:28:37 -07001258 ret = iop_adma_xor_val_self_test(adev);
Dan Williamsc2110922007-01-02 13:52:26 -07001259 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1260 if (ret)
1261 goto err_free_iop_chan;
1262 }
1263
1264 dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
1265 "( %s%s%s%s%s%s%s%s%s%s)\n",
Dan Williamsb2f46fd2009-07-14 12:20:36 -07001266 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
Dan Williamsc2110922007-01-02 13:52:26 -07001267 dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "",
Dan Williams099f53c2009-04-08 14:28:37 -07001268 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
Dan Williamsc2110922007-01-02 13:52:26 -07001269 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1270 dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "",
Dan Williams099f53c2009-04-08 14:28:37 -07001271 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
Dan Williamsc2110922007-01-02 13:52:26 -07001272 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1273 dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "",
1274 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1275 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1276
1277 dma_async_device_register(dma_dev);
1278 goto out;
1279
1280 err_free_iop_chan:
1281 kfree(iop_chan);
1282 err_free_dma:
1283 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1284 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1285 err_free_adev:
1286 kfree(adev);
1287 out:
1288 return ret;
1289}
1290
1291static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1292{
1293 struct iop_adma_desc_slot *sw_desc, *grp_start;
1294 dma_cookie_t cookie;
1295 int slot_cnt, slots_per_op;
1296
Harvey Harrison3d9b5252008-03-13 17:45:28 -07001297 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
Dan Williamsc2110922007-01-02 13:52:26 -07001298
1299 spin_lock_bh(&iop_chan->lock);
1300 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1301 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1302 if (sw_desc) {
1303 grp_start = sw_desc->group_head;
1304
1305 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
Dan Williams636bdea2008-04-17 20:17:26 -07001306 async_tx_ack(&sw_desc->async_tx);
Dan Williamsc2110922007-01-02 13:52:26 -07001307 iop_desc_init_memcpy(grp_start, 0);
1308 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1309 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1310 iop_desc_set_memcpy_src_addr(grp_start, 0);
1311
1312 cookie = iop_chan->common.cookie;
1313 cookie++;
1314 if (cookie <= 1)
1315 cookie = 2;
1316
1317 /* initialize the completed cookie to be less than
1318 * the most recently used cookie
1319 */
1320 iop_chan->completed_cookie = cookie - 1;
1321 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1322
1323 /* channel should not be busy */
1324 BUG_ON(iop_chan_is_busy(iop_chan));
1325
1326 /* clear any prior error-status bits */
1327 iop_adma_device_clear_err_status(iop_chan);
1328
1329 /* disable operation */
1330 iop_chan_disable(iop_chan);
1331
1332 /* set the descriptor address */
1333 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1334
1335 /* 1/ don't add pre-chained descriptors
1336 * 2/ dummy read to flush next_desc write
1337 */
1338 BUG_ON(iop_desc_get_next_desc(sw_desc));
1339
1340 /* run the descriptor */
1341 iop_chan_enable(iop_chan);
1342 } else
1343 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1344 "failed to allocate null descriptor\n");
1345 spin_unlock_bh(&iop_chan->lock);
1346}
1347
1348static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1349{
1350 struct iop_adma_desc_slot *sw_desc, *grp_start;
1351 dma_cookie_t cookie;
1352 int slot_cnt, slots_per_op;
1353
Harvey Harrison3d9b5252008-03-13 17:45:28 -07001354 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
Dan Williamsc2110922007-01-02 13:52:26 -07001355
1356 spin_lock_bh(&iop_chan->lock);
1357 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1358 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1359 if (sw_desc) {
1360 grp_start = sw_desc->group_head;
1361 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
Dan Williams636bdea2008-04-17 20:17:26 -07001362 async_tx_ack(&sw_desc->async_tx);
Dan Williamsc2110922007-01-02 13:52:26 -07001363 iop_desc_init_null_xor(grp_start, 2, 0);
1364 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1365 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1366 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1367 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1368
1369 cookie = iop_chan->common.cookie;
1370 cookie++;
1371 if (cookie <= 1)
1372 cookie = 2;
1373
1374 /* initialize the completed cookie to be less than
1375 * the most recently used cookie
1376 */
1377 iop_chan->completed_cookie = cookie - 1;
1378 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1379
1380 /* channel should not be busy */
1381 BUG_ON(iop_chan_is_busy(iop_chan));
1382
1383 /* clear any prior error-status bits */
1384 iop_adma_device_clear_err_status(iop_chan);
1385
1386 /* disable operation */
1387 iop_chan_disable(iop_chan);
1388
1389 /* set the descriptor address */
1390 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1391
1392 /* 1/ don't add pre-chained descriptors
1393 * 2/ dummy read to flush next_desc write
1394 */
1395 BUG_ON(iop_desc_get_next_desc(sw_desc));
1396
1397 /* run the descriptor */
1398 iop_chan_enable(iop_chan);
1399 } else
1400 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1401 "failed to allocate null descriptor\n");
1402 spin_unlock_bh(&iop_chan->lock);
1403}
1404
Kay Sieversebabe272008-07-08 11:58:28 -07001405MODULE_ALIAS("platform:iop-adma");
1406
Dan Williamsc2110922007-01-02 13:52:26 -07001407static struct platform_driver iop_adma_driver = {
1408 .probe = iop_adma_probe,
Russell Kingbdf602b2009-03-03 13:43:47 +00001409 .remove = __devexit_p(iop_adma_remove),
Dan Williamsc2110922007-01-02 13:52:26 -07001410 .driver = {
1411 .owner = THIS_MODULE,
1412 .name = "iop-adma",
1413 },
1414};
1415
1416static int __init iop_adma_init (void)
1417{
Dan Williamsc2110922007-01-02 13:52:26 -07001418 return platform_driver_register(&iop_adma_driver);
1419}
1420
1421static void __exit iop_adma_exit (void)
1422{
1423 platform_driver_unregister(&iop_adma_driver);
1424 return;
1425}
Rusty Russellaf49d922007-10-16 23:26:27 -07001426module_exit(iop_adma_exit);
Dan Williamsc2110922007-01-02 13:52:26 -07001427module_init(iop_adma_init);
Dan Williamsc2110922007-01-02 13:52:26 -07001428
1429MODULE_AUTHOR("Intel Corporation");
1430MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1431MODULE_LICENSE("GPL");