blob: bb57491f3fb395e767eb800fdec9909cc6e8087f [file] [log] [blame]
Dan Williamsbf40a682009-09-08 17:42:55 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 *
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
24 *
25 * BSD LICENSE
26 *
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
31 *
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
37 * distribution.
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
53 */
54
55/*
56 * Support routines for v3+ hardware
57 */
58
59#include <linux/pci.h>
60#include <linux/dmaengine.h>
61#include <linux/dma-mapping.h>
62#include "registers.h"
63#include "hw.h"
64#include "dma.h"
65#include "dma_v2.h"
66
Dan Williamsb094ad32009-09-08 17:42:57 -070067/* ioat hardware assumes at least two sources for raid operations */
68#define src_cnt_to_sw(x) ((x) + 2)
69#define src_cnt_to_hw(x) ((x) - 2)
70
71/* provide a lookup table for setting the source address in the base or
Dan Williamsd69d235b2009-09-08 17:42:59 -070072 * extended descriptor of an xor or pq descriptor
Dan Williamsb094ad32009-09-08 17:42:57 -070073 */
74static const u8 xor_idx_to_desc __read_mostly = 0xd0;
75static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
Dan Williamsd69d235b2009-09-08 17:42:59 -070076static const u8 pq_idx_to_desc __read_mostly = 0xf8;
77static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
Dan Williamsb094ad32009-09-08 17:42:57 -070078
79static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
80{
81 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
82
83 return raw->field[xor_idx_to_field[idx]];
84}
85
86static void xor_set_src(struct ioat_raw_descriptor *descs[2],
87 dma_addr_t addr, u32 offset, int idx)
88{
89 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
90
91 raw->field[xor_idx_to_field[idx]] = addr + offset;
92}
93
Dan Williamsd69d235b2009-09-08 17:42:59 -070094static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
95{
96 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
97
98 return raw->field[pq_idx_to_field[idx]];
99}
100
101static void pq_set_src(struct ioat_raw_descriptor *descs[2],
102 dma_addr_t addr, u32 offset, u8 coef, int idx)
103{
104 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
105 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
106
107 raw->field[pq_idx_to_field[idx]] = addr + offset;
108 pq->coef[idx] = coef;
109}
110
Dan Williamsbf40a682009-09-08 17:42:55 -0700111static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
Dan Williamsb094ad32009-09-08 17:42:57 -0700112 struct ioat_ring_ent *desc, int idx)
Dan Williamsbf40a682009-09-08 17:42:55 -0700113{
114 struct ioat_chan_common *chan = &ioat->base;
115 struct pci_dev *pdev = chan->device->pdev;
116 size_t len = desc->len;
117 size_t offset = len - desc->hw->size;
118 struct dma_async_tx_descriptor *tx = &desc->txd;
119 enum dma_ctrl_flags flags = tx->flags;
120
121 switch (desc->hw->ctl_f.op) {
122 case IOAT_OP_COPY:
123 ioat_dma_unmap(chan, flags, len, desc->hw);
124 break;
125 case IOAT_OP_FILL: {
126 struct ioat_fill_descriptor *hw = desc->fill;
127
128 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
129 ioat_unmap(pdev, hw->dst_addr - offset, len,
130 PCI_DMA_FROMDEVICE, flags, 1);
131 break;
132 }
Dan Williamsb094ad32009-09-08 17:42:57 -0700133 case IOAT_OP_XOR_VAL:
134 case IOAT_OP_XOR: {
135 struct ioat_xor_descriptor *xor = desc->xor;
136 struct ioat_ring_ent *ext;
137 struct ioat_xor_ext_descriptor *xor_ex = NULL;
138 int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
139 struct ioat_raw_descriptor *descs[2];
140 int i;
141
142 if (src_cnt > 5) {
143 ext = ioat2_get_ring_ent(ioat, idx + 1);
144 xor_ex = ext->xor_ex;
145 }
146
147 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
148 descs[0] = (struct ioat_raw_descriptor *) xor;
149 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
150 for (i = 0; i < src_cnt; i++) {
151 dma_addr_t src = xor_get_src(descs, i);
152
153 ioat_unmap(pdev, src - offset, len,
154 PCI_DMA_TODEVICE, flags, 0);
155 }
156
157 /* dest is a source in xor validate operations */
158 if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
159 ioat_unmap(pdev, xor->dst_addr - offset, len,
160 PCI_DMA_TODEVICE, flags, 1);
161 break;
162 }
163 }
164
165 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
166 ioat_unmap(pdev, xor->dst_addr - offset, len,
167 PCI_DMA_FROMDEVICE, flags, 1);
168 break;
169 }
Dan Williamsd69d235b2009-09-08 17:42:59 -0700170 case IOAT_OP_PQ_VAL:
171 case IOAT_OP_PQ: {
172 struct ioat_pq_descriptor *pq = desc->pq;
173 struct ioat_ring_ent *ext;
174 struct ioat_pq_ext_descriptor *pq_ex = NULL;
175 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
176 struct ioat_raw_descriptor *descs[2];
177 int i;
178
179 if (src_cnt > 3) {
180 ext = ioat2_get_ring_ent(ioat, idx + 1);
181 pq_ex = ext->pq_ex;
182 }
183
184 /* in the 'continue' case don't unmap the dests as sources */
185 if (dmaf_p_disabled_continue(flags))
186 src_cnt--;
187 else if (dmaf_continue(flags))
188 src_cnt -= 3;
189
190 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
191 descs[0] = (struct ioat_raw_descriptor *) pq;
192 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
193 for (i = 0; i < src_cnt; i++) {
194 dma_addr_t src = pq_get_src(descs, i);
195
196 ioat_unmap(pdev, src - offset, len,
197 PCI_DMA_TODEVICE, flags, 0);
198 }
199
200 /* the dests are sources in pq validate operations */
201 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
202 if (!(flags & DMA_PREP_PQ_DISABLE_P))
203 ioat_unmap(pdev, pq->p_addr - offset,
204 len, PCI_DMA_TODEVICE, flags, 0);
205 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
206 ioat_unmap(pdev, pq->q_addr - offset,
207 len, PCI_DMA_TODEVICE, flags, 0);
208 break;
209 }
210 }
211
212 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
213 if (!(flags & DMA_PREP_PQ_DISABLE_P))
214 ioat_unmap(pdev, pq->p_addr - offset, len,
215 PCI_DMA_BIDIRECTIONAL, flags, 1);
216 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
217 ioat_unmap(pdev, pq->q_addr - offset, len,
218 PCI_DMA_BIDIRECTIONAL, flags, 1);
219 }
220 break;
221 }
Dan Williamsbf40a682009-09-08 17:42:55 -0700222 default:
223 dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
224 __func__, desc->hw->ctl_f.op);
225 }
226}
227
Dan Williamsb094ad32009-09-08 17:42:57 -0700228static bool desc_has_ext(struct ioat_ring_ent *desc)
229{
230 struct ioat_dma_descriptor *hw = desc->hw;
Dan Williamsbf40a682009-09-08 17:42:55 -0700231
Dan Williamsb094ad32009-09-08 17:42:57 -0700232 if (hw->ctl_f.op == IOAT_OP_XOR ||
233 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
234 struct ioat_xor_descriptor *xor = desc->xor;
235
236 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
237 return true;
Dan Williamsd69d235b2009-09-08 17:42:59 -0700238 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
239 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
240 struct ioat_pq_descriptor *pq = desc->pq;
241
242 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
243 return true;
Dan Williamsb094ad32009-09-08 17:42:57 -0700244 }
245
246 return false;
247}
248
249/**
250 * __cleanup - reclaim used descriptors
251 * @ioat: channel (ring) to clean
252 *
253 * The difference from the dma_v2.c __cleanup() is that this routine
254 * handles extended descriptors and dma-unmapping raid operations.
255 */
Dan Williamsbf40a682009-09-08 17:42:55 -0700256static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
257{
258 struct ioat_chan_common *chan = &ioat->base;
259 struct ioat_ring_ent *desc;
260 bool seen_current = false;
261 u16 active;
262 int i;
263
264 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
265 __func__, ioat->head, ioat->tail, ioat->issued);
266
267 active = ioat2_ring_active(ioat);
268 for (i = 0; i < active && !seen_current; i++) {
269 struct dma_async_tx_descriptor *tx;
270
271 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
272 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
273 dump_desc_dbg(ioat, desc);
274 tx = &desc->txd;
275 if (tx->cookie) {
276 chan->completed_cookie = tx->cookie;
Dan Williamsb094ad32009-09-08 17:42:57 -0700277 ioat3_dma_unmap(ioat, desc, ioat->tail + i);
Dan Williamsbf40a682009-09-08 17:42:55 -0700278 tx->cookie = 0;
279 if (tx->callback) {
280 tx->callback(tx->callback_param);
281 tx->callback = NULL;
282 }
283 }
284
285 if (tx->phys == phys_complete)
286 seen_current = true;
Dan Williamsb094ad32009-09-08 17:42:57 -0700287
288 /* skip extended descriptors */
289 if (desc_has_ext(desc)) {
290 BUG_ON(i + 1 >= active);
291 i++;
292 }
Dan Williamsbf40a682009-09-08 17:42:55 -0700293 }
294 ioat->tail += i;
295 BUG_ON(!seen_current); /* no active descs have written a completion? */
296 chan->last_completion = phys_complete;
297 if (ioat->head == ioat->tail) {
298 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
299 __func__);
300 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
301 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
302 }
303}
304
305static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
306{
307 struct ioat_chan_common *chan = &ioat->base;
308 unsigned long phys_complete;
309
310 prefetch(chan->completion);
311
312 if (!spin_trylock_bh(&chan->cleanup_lock))
313 return;
314
315 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
316 spin_unlock_bh(&chan->cleanup_lock);
317 return;
318 }
319
320 if (!spin_trylock_bh(&ioat->ring_lock)) {
321 spin_unlock_bh(&chan->cleanup_lock);
322 return;
323 }
324
325 __cleanup(ioat, phys_complete);
326
327 spin_unlock_bh(&ioat->ring_lock);
328 spin_unlock_bh(&chan->cleanup_lock);
329}
330
331static void ioat3_cleanup_tasklet(unsigned long data)
332{
333 struct ioat2_dma_chan *ioat = (void *) data;
334
335 ioat3_cleanup(ioat);
Dan Williamse61daca2009-09-08 17:42:57 -0700336 writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN,
337 ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
Dan Williamsbf40a682009-09-08 17:42:55 -0700338}
339
340static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
341{
342 struct ioat_chan_common *chan = &ioat->base;
343 unsigned long phys_complete;
344 u32 status;
345
346 status = ioat_chansts(chan);
347 if (is_ioat_active(status) || is_ioat_idle(status))
348 ioat_suspend(chan);
349 while (is_ioat_active(status) || is_ioat_idle(status)) {
350 status = ioat_chansts(chan);
351 cpu_relax();
352 }
353
354 if (ioat_cleanup_preamble(chan, &phys_complete))
355 __cleanup(ioat, phys_complete);
356
357 __ioat2_restart_chan(ioat);
358}
359
360static void ioat3_timer_event(unsigned long data)
361{
362 struct ioat2_dma_chan *ioat = (void *) data;
363 struct ioat_chan_common *chan = &ioat->base;
364
365 spin_lock_bh(&chan->cleanup_lock);
366 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
367 unsigned long phys_complete;
368 u64 status;
369
370 spin_lock_bh(&ioat->ring_lock);
371 status = ioat_chansts(chan);
372
373 /* when halted due to errors check for channel
374 * programming errors before advancing the completion state
375 */
376 if (is_ioat_halted(status)) {
377 u32 chanerr;
378
379 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
380 BUG_ON(is_ioat_bug(chanerr));
381 }
382
383 /* if we haven't made progress and we have already
384 * acknowledged a pending completion once, then be more
385 * forceful with a restart
386 */
387 if (ioat_cleanup_preamble(chan, &phys_complete))
388 __cleanup(ioat, phys_complete);
389 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
390 ioat3_restart_channel(ioat);
391 else {
392 set_bit(IOAT_COMPLETION_ACK, &chan->state);
393 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
394 }
395 spin_unlock_bh(&ioat->ring_lock);
396 } else {
397 u16 active;
398
399 /* if the ring is idle, empty, and oversized try to step
400 * down the size
401 */
402 spin_lock_bh(&ioat->ring_lock);
403 active = ioat2_ring_active(ioat);
404 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
405 reshape_ring(ioat, ioat->alloc_order-1);
406 spin_unlock_bh(&ioat->ring_lock);
407
408 /* keep shrinking until we get back to our minimum
409 * default size
410 */
411 if (ioat->alloc_order > ioat_get_alloc_order())
412 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
413 }
414 spin_unlock_bh(&chan->cleanup_lock);
415}
416
417static enum dma_status
418ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
419 dma_cookie_t *done, dma_cookie_t *used)
420{
421 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
422
423 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
424 return DMA_SUCCESS;
425
426 ioat3_cleanup(ioat);
427
428 return ioat_is_complete(c, cookie, done, used);
429}
430
431static struct dma_async_tx_descriptor *
432ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
433 size_t len, unsigned long flags)
434{
435 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
436 struct ioat_ring_ent *desc;
437 size_t total_len = len;
438 struct ioat_fill_descriptor *fill;
439 int num_descs;
440 u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
441 u16 idx;
442 int i;
443
444 num_descs = ioat2_xferlen_to_descs(ioat, len);
445 if (likely(num_descs) &&
446 ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
447 /* pass */;
448 else
449 return NULL;
450 for (i = 0; i < num_descs; i++) {
451 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
452
453 desc = ioat2_get_ring_ent(ioat, idx + i);
454 fill = desc->fill;
455
456 fill->size = xfer_size;
457 fill->src_data = src_data;
458 fill->dst_addr = dest;
459 fill->ctl = 0;
460 fill->ctl_f.op = IOAT_OP_FILL;
461
462 len -= xfer_size;
463 dest += xfer_size;
464 dump_desc_dbg(ioat, desc);
465 }
466
467 desc->txd.flags = flags;
468 desc->len = total_len;
469 fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
470 fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
471 fill->ctl_f.compl_write = 1;
472 dump_desc_dbg(ioat, desc);
473
474 /* we leave the channel locked to ensure in order submission */
475 return &desc->txd;
476}
477
Dan Williamsb094ad32009-09-08 17:42:57 -0700478static struct dma_async_tx_descriptor *
479__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
480 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
481 size_t len, unsigned long flags)
482{
483 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
484 struct ioat_ring_ent *compl_desc;
485 struct ioat_ring_ent *desc;
486 struct ioat_ring_ent *ext;
487 size_t total_len = len;
488 struct ioat_xor_descriptor *xor;
489 struct ioat_xor_ext_descriptor *xor_ex = NULL;
490 struct ioat_dma_descriptor *hw;
491 u32 offset = 0;
492 int num_descs;
493 int with_ext;
494 int i;
495 u16 idx;
496 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
497
498 BUG_ON(src_cnt < 2);
499
500 num_descs = ioat2_xferlen_to_descs(ioat, len);
501 /* we need 2x the number of descriptors to cover greater than 5
502 * sources
503 */
504 if (src_cnt > 5) {
505 with_ext = 1;
506 num_descs *= 2;
507 } else
508 with_ext = 0;
509
510 /* completion writes from the raid engine may pass completion
511 * writes from the legacy engine, so we need one extra null
512 * (legacy) descriptor to ensure all completion writes arrive in
513 * order.
514 */
515 if (likely(num_descs) &&
516 ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
517 /* pass */;
518 else
519 return NULL;
520 for (i = 0; i < num_descs; i += 1 + with_ext) {
521 struct ioat_raw_descriptor *descs[2];
522 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
523 int s;
524
525 desc = ioat2_get_ring_ent(ioat, idx + i);
526 xor = desc->xor;
527
528 /* save a branch by unconditionally retrieving the
529 * extended descriptor xor_set_src() knows to not write
530 * to it in the single descriptor case
531 */
532 ext = ioat2_get_ring_ent(ioat, idx + i + 1);
533 xor_ex = ext->xor_ex;
534
535 descs[0] = (struct ioat_raw_descriptor *) xor;
536 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
537 for (s = 0; s < src_cnt; s++)
538 xor_set_src(descs, src[s], offset, s);
539 xor->size = xfer_size;
540 xor->dst_addr = dest + offset;
541 xor->ctl = 0;
542 xor->ctl_f.op = op;
543 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
544
545 len -= xfer_size;
546 offset += xfer_size;
547 dump_desc_dbg(ioat, desc);
548 }
549
550 /* last xor descriptor carries the unmap parameters and fence bit */
551 desc->txd.flags = flags;
552 desc->len = total_len;
553 if (result)
554 desc->result = result;
555 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
556
557 /* completion descriptor carries interrupt bit */
558 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
559 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
560 hw = compl_desc->hw;
561 hw->ctl = 0;
562 hw->ctl_f.null = 1;
563 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
564 hw->ctl_f.compl_write = 1;
565 hw->size = NULL_DESC_BUFFER_SIZE;
566 dump_desc_dbg(ioat, compl_desc);
567
568 /* we leave the channel locked to ensure in order submission */
569 return &desc->txd;
570}
571
572static struct dma_async_tx_descriptor *
573ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
574 unsigned int src_cnt, size_t len, unsigned long flags)
575{
576 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
577}
578
579struct dma_async_tx_descriptor *
580ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
581 unsigned int src_cnt, size_t len,
582 enum sum_check_flags *result, unsigned long flags)
583{
584 /* the cleanup routine only sets bits on validate failure, it
585 * does not clear bits on validate success... so clear it here
586 */
587 *result = 0;
588
589 return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
590 src_cnt - 1, len, flags);
591}
592
Dan Williamsd69d235b2009-09-08 17:42:59 -0700593static void
594dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
595{
596 struct device *dev = to_dev(&ioat->base);
597 struct ioat_pq_descriptor *pq = desc->pq;
598 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
599 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
600 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
601 int i;
602
603 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
604 " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
605 desc_id(desc), (unsigned long long) desc->txd.phys,
606 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
607 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
608 pq->ctl_f.compl_write,
609 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
610 pq->ctl_f.src_cnt);
611 for (i = 0; i < src_cnt; i++)
612 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
613 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
614 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
615 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
616}
617
618static struct dma_async_tx_descriptor *
619__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
620 const dma_addr_t *dst, const dma_addr_t *src,
621 unsigned int src_cnt, const unsigned char *scf,
622 size_t len, unsigned long flags)
623{
624 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
625 struct ioat_chan_common *chan = &ioat->base;
626 struct ioat_ring_ent *compl_desc;
627 struct ioat_ring_ent *desc;
628 struct ioat_ring_ent *ext;
629 size_t total_len = len;
630 struct ioat_pq_descriptor *pq;
631 struct ioat_pq_ext_descriptor *pq_ex = NULL;
632 struct ioat_dma_descriptor *hw;
633 u32 offset = 0;
634 int num_descs;
635 int with_ext;
636 int i, s;
637 u16 idx;
638 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
639
640 dev_dbg(to_dev(chan), "%s\n", __func__);
641 /* the engine requires at least two sources (we provide
642 * at least 1 implied source in the DMA_PREP_CONTINUE case)
643 */
644 BUG_ON(src_cnt + dmaf_continue(flags) < 2);
645
646 num_descs = ioat2_xferlen_to_descs(ioat, len);
647 /* we need 2x the number of descriptors to cover greater than 3
648 * sources
649 */
650 if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) {
651 with_ext = 1;
652 num_descs *= 2;
653 } else
654 with_ext = 0;
655
656 /* completion writes from the raid engine may pass completion
657 * writes from the legacy engine, so we need one extra null
658 * (legacy) descriptor to ensure all completion writes arrive in
659 * order.
660 */
661 if (likely(num_descs) &&
662 ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0)
663 /* pass */;
664 else
665 return NULL;
666 for (i = 0; i < num_descs; i += 1 + with_ext) {
667 struct ioat_raw_descriptor *descs[2];
668 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
669
670 desc = ioat2_get_ring_ent(ioat, idx + i);
671 pq = desc->pq;
672
673 /* save a branch by unconditionally retrieving the
674 * extended descriptor pq_set_src() knows to not write
675 * to it in the single descriptor case
676 */
677 ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
678 pq_ex = ext->pq_ex;
679
680 descs[0] = (struct ioat_raw_descriptor *) pq;
681 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
682
683 for (s = 0; s < src_cnt; s++)
684 pq_set_src(descs, src[s], offset, scf[s], s);
685
686 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
687 if (dmaf_p_disabled_continue(flags))
688 pq_set_src(descs, dst[1], offset, 1, s++);
689 else if (dmaf_continue(flags)) {
690 pq_set_src(descs, dst[0], offset, 0, s++);
691 pq_set_src(descs, dst[1], offset, 1, s++);
692 pq_set_src(descs, dst[1], offset, 0, s++);
693 }
694 pq->size = xfer_size;
695 pq->p_addr = dst[0] + offset;
696 pq->q_addr = dst[1] + offset;
697 pq->ctl = 0;
698 pq->ctl_f.op = op;
699 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
700 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
701 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
702
703 len -= xfer_size;
704 offset += xfer_size;
705 }
706
707 /* last pq descriptor carries the unmap parameters and fence bit */
708 desc->txd.flags = flags;
709 desc->len = total_len;
710 if (result)
711 desc->result = result;
712 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
713 dump_pq_desc_dbg(ioat, desc, ext);
714
715 /* completion descriptor carries interrupt bit */
716 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
717 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
718 hw = compl_desc->hw;
719 hw->ctl = 0;
720 hw->ctl_f.null = 1;
721 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
722 hw->ctl_f.compl_write = 1;
723 hw->size = NULL_DESC_BUFFER_SIZE;
724 dump_desc_dbg(ioat, compl_desc);
725
726 /* we leave the channel locked to ensure in order submission */
727 return &desc->txd;
728}
729
730static struct dma_async_tx_descriptor *
731ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
732 unsigned int src_cnt, const unsigned char *scf, size_t len,
733 unsigned long flags)
734{
735 /* handle the single source multiply case from the raid6
736 * recovery path
737 */
738 if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) {
739 dma_addr_t single_source[2];
740 unsigned char single_source_coef[2];
741
742 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
743 single_source[0] = src[0];
744 single_source[1] = src[0];
745 single_source_coef[0] = scf[0];
746 single_source_coef[1] = 0;
747
748 return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
749 single_source_coef, len, flags);
750 } else
751 return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
752 len, flags);
753}
754
755struct dma_async_tx_descriptor *
756ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
757 unsigned int src_cnt, const unsigned char *scf, size_t len,
758 enum sum_check_flags *pqres, unsigned long flags)
759{
760 /* the cleanup routine only sets bits on validate failure, it
761 * does not clear bits on validate success... so clear it here
762 */
763 *pqres = 0;
764
765 return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
766 flags);
767}
768
Dan Williamsae786622009-09-08 17:43:00 -0700769static struct dma_async_tx_descriptor *
770ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
771 unsigned int src_cnt, size_t len, unsigned long flags)
772{
773 unsigned char scf[src_cnt];
774 dma_addr_t pq[2];
775
776 memset(scf, 0, src_cnt);
777 flags |= DMA_PREP_PQ_DISABLE_Q;
778 pq[0] = dst;
779 pq[1] = ~0;
780
781 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
782 flags);
783}
784
785struct dma_async_tx_descriptor *
786ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
787 unsigned int src_cnt, size_t len,
788 enum sum_check_flags *result, unsigned long flags)
789{
790 unsigned char scf[src_cnt];
791 dma_addr_t pq[2];
792
793 /* the cleanup routine only sets bits on validate failure, it
794 * does not clear bits on validate success... so clear it here
795 */
796 *result = 0;
797
798 memset(scf, 0, src_cnt);
799 flags |= DMA_PREP_PQ_DISABLE_Q;
800 pq[0] = src[0];
801 pq[1] = ~0;
802
803 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
804 len, flags);
805}
806
Dan Williams9de6fc72009-09-08 17:42:58 -0700807static void __devinit ioat3_dma_test_callback(void *dma_async_param)
808{
809 struct completion *cmp = dma_async_param;
810
811 complete(cmp);
812}
813
814#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
815static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
816{
817 int i, src_idx;
818 struct page *dest;
819 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
820 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
821 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
822 dma_addr_t dma_addr, dest_dma;
823 struct dma_async_tx_descriptor *tx;
824 struct dma_chan *dma_chan;
825 dma_cookie_t cookie;
826 u8 cmp_byte = 0;
827 u32 cmp_word;
828 u32 xor_val_result;
829 int err = 0;
830 struct completion cmp;
831 unsigned long tmo;
832 struct device *dev = &device->pdev->dev;
833 struct dma_device *dma = &device->common;
834
835 dev_dbg(dev, "%s\n", __func__);
836
837 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
838 return 0;
839
840 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
841 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
842 if (!xor_srcs[src_idx]) {
843 while (src_idx--)
844 __free_page(xor_srcs[src_idx]);
845 return -ENOMEM;
846 }
847 }
848
849 dest = alloc_page(GFP_KERNEL);
850 if (!dest) {
851 while (src_idx--)
852 __free_page(xor_srcs[src_idx]);
853 return -ENOMEM;
854 }
855
856 /* Fill in src buffers */
857 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
858 u8 *ptr = page_address(xor_srcs[src_idx]);
859 for (i = 0; i < PAGE_SIZE; i++)
860 ptr[i] = (1 << src_idx);
861 }
862
863 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
864 cmp_byte ^= (u8) (1 << src_idx);
865
866 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
867 (cmp_byte << 8) | cmp_byte;
868
869 memset(page_address(dest), 0, PAGE_SIZE);
870
871 dma_chan = container_of(dma->channels.next, struct dma_chan,
872 device_node);
873 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
874 err = -ENODEV;
875 goto out;
876 }
877
878 /* test xor */
879 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
880 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
881 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
882 DMA_TO_DEVICE);
883 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
884 IOAT_NUM_SRC_TEST, PAGE_SIZE,
885 DMA_PREP_INTERRUPT);
886
887 if (!tx) {
888 dev_err(dev, "Self-test xor prep failed\n");
889 err = -ENODEV;
890 goto free_resources;
891 }
892
893 async_tx_ack(tx);
894 init_completion(&cmp);
895 tx->callback = ioat3_dma_test_callback;
896 tx->callback_param = &cmp;
897 cookie = tx->tx_submit(tx);
898 if (cookie < 0) {
899 dev_err(dev, "Self-test xor setup failed\n");
900 err = -ENODEV;
901 goto free_resources;
902 }
903 dma->device_issue_pending(dma_chan);
904
905 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
906
907 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
908 dev_err(dev, "Self-test xor timed out\n");
909 err = -ENODEV;
910 goto free_resources;
911 }
912
913 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
914 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
915 u32 *ptr = page_address(dest);
916 if (ptr[i] != cmp_word) {
917 dev_err(dev, "Self-test xor failed compare\n");
918 err = -ENODEV;
919 goto free_resources;
920 }
921 }
922 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE);
923
924 /* skip validate if the capability is not present */
925 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
926 goto free_resources;
927
928 /* validate the sources with the destintation page */
929 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
930 xor_val_srcs[i] = xor_srcs[i];
931 xor_val_srcs[i] = dest;
932
933 xor_val_result = 1;
934
935 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
936 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
937 DMA_TO_DEVICE);
938 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
939 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
940 &xor_val_result, DMA_PREP_INTERRUPT);
941 if (!tx) {
942 dev_err(dev, "Self-test zero prep failed\n");
943 err = -ENODEV;
944 goto free_resources;
945 }
946
947 async_tx_ack(tx);
948 init_completion(&cmp);
949 tx->callback = ioat3_dma_test_callback;
950 tx->callback_param = &cmp;
951 cookie = tx->tx_submit(tx);
952 if (cookie < 0) {
953 dev_err(dev, "Self-test zero setup failed\n");
954 err = -ENODEV;
955 goto free_resources;
956 }
957 dma->device_issue_pending(dma_chan);
958
959 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
960
961 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
962 dev_err(dev, "Self-test validate timed out\n");
963 err = -ENODEV;
964 goto free_resources;
965 }
966
967 if (xor_val_result != 0) {
968 dev_err(dev, "Self-test validate failed compare\n");
969 err = -ENODEV;
970 goto free_resources;
971 }
972
973 /* skip memset if the capability is not present */
974 if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask))
975 goto free_resources;
976
977 /* test memset */
978 dma_addr = dma_map_page(dev, dest, 0,
979 PAGE_SIZE, DMA_FROM_DEVICE);
980 tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
981 DMA_PREP_INTERRUPT);
982 if (!tx) {
983 dev_err(dev, "Self-test memset prep failed\n");
984 err = -ENODEV;
985 goto free_resources;
986 }
987
988 async_tx_ack(tx);
989 init_completion(&cmp);
990 tx->callback = ioat3_dma_test_callback;
991 tx->callback_param = &cmp;
992 cookie = tx->tx_submit(tx);
993 if (cookie < 0) {
994 dev_err(dev, "Self-test memset setup failed\n");
995 err = -ENODEV;
996 goto free_resources;
997 }
998 dma->device_issue_pending(dma_chan);
999
1000 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1001
1002 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1003 dev_err(dev, "Self-test memset timed out\n");
1004 err = -ENODEV;
1005 goto free_resources;
1006 }
1007
1008 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1009 u32 *ptr = page_address(dest);
1010 if (ptr[i]) {
1011 dev_err(dev, "Self-test memset failed compare\n");
1012 err = -ENODEV;
1013 goto free_resources;
1014 }
1015 }
1016
1017 /* test for non-zero parity sum */
1018 xor_val_result = 0;
1019 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1020 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1021 DMA_TO_DEVICE);
1022 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1023 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1024 &xor_val_result, DMA_PREP_INTERRUPT);
1025 if (!tx) {
1026 dev_err(dev, "Self-test 2nd zero prep failed\n");
1027 err = -ENODEV;
1028 goto free_resources;
1029 }
1030
1031 async_tx_ack(tx);
1032 init_completion(&cmp);
1033 tx->callback = ioat3_dma_test_callback;
1034 tx->callback_param = &cmp;
1035 cookie = tx->tx_submit(tx);
1036 if (cookie < 0) {
1037 dev_err(dev, "Self-test 2nd zero setup failed\n");
1038 err = -ENODEV;
1039 goto free_resources;
1040 }
1041 dma->device_issue_pending(dma_chan);
1042
1043 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1044
1045 if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1046 dev_err(dev, "Self-test 2nd validate timed out\n");
1047 err = -ENODEV;
1048 goto free_resources;
1049 }
1050
1051 if (xor_val_result != SUM_CHECK_P_RESULT) {
1052 dev_err(dev, "Self-test validate failed compare\n");
1053 err = -ENODEV;
1054 goto free_resources;
1055 }
1056
1057free_resources:
1058 dma->device_free_chan_resources(dma_chan);
1059out:
1060 src_idx = IOAT_NUM_SRC_TEST;
1061 while (src_idx--)
1062 __free_page(xor_srcs[src_idx]);
1063 __free_page(dest);
1064 return err;
1065}
1066
1067static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1068{
1069 int rc = ioat_dma_self_test(device);
1070
1071 if (rc)
1072 return rc;
1073
1074 rc = ioat_xor_val_self_test(device);
1075 if (rc)
1076 return rc;
1077
1078 return 0;
1079}
1080
Dan Williamsbf40a682009-09-08 17:42:55 -07001081int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1082{
1083 struct pci_dev *pdev = device->pdev;
1084 struct dma_device *dma;
1085 struct dma_chan *c;
1086 struct ioat_chan_common *chan;
1087 int err;
1088 u16 dev_id;
1089 u32 cap;
1090
1091 device->enumerate_channels = ioat2_enumerate_channels;
1092 device->cleanup_tasklet = ioat3_cleanup_tasklet;
1093 device->timer_fn = ioat3_timer_event;
Dan Williams9de6fc72009-09-08 17:42:58 -07001094 device->self_test = ioat3_dma_self_test;
Dan Williamsbf40a682009-09-08 17:42:55 -07001095 dma = &device->common;
1096 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1097 dma->device_issue_pending = ioat2_issue_pending;
1098 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1099 dma->device_free_chan_resources = ioat2_free_chan_resources;
1100 dma->device_is_tx_complete = ioat3_is_complete;
1101 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1102 if (cap & IOAT_CAP_FILL_BLOCK) {
1103 dma_cap_set(DMA_MEMSET, dma->cap_mask);
1104 dma->device_prep_dma_memset = ioat3_prep_memset_lock;
1105 }
Dan Williamsb094ad32009-09-08 17:42:57 -07001106 if (cap & IOAT_CAP_XOR) {
1107 dma->max_xor = 8;
1108 dma->xor_align = 2;
1109
1110 dma_cap_set(DMA_XOR, dma->cap_mask);
1111 dma->device_prep_dma_xor = ioat3_prep_xor;
1112
1113 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1114 dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
1115 }
Dan Williamsd69d235b2009-09-08 17:42:59 -07001116 if (cap & IOAT_CAP_PQ) {
1117 dma_set_maxpq(dma, 8, 0);
1118 dma->pq_align = 2;
1119
1120 dma_cap_set(DMA_PQ, dma->cap_mask);
1121 dma->device_prep_dma_pq = ioat3_prep_pq;
1122
1123 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1124 dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
Dan Williamsae786622009-09-08 17:43:00 -07001125
1126 if (!(cap & IOAT_CAP_XOR)) {
1127 dma->max_xor = 8;
1128 dma->xor_align = 2;
1129
1130 dma_cap_set(DMA_XOR, dma->cap_mask);
1131 dma->device_prep_dma_xor = ioat3_prep_pqxor;
1132
1133 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1134 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1135 }
Dan Williamsd69d235b2009-09-08 17:42:59 -07001136 }
Dan Williamsbf40a682009-09-08 17:42:55 -07001137
1138 /* -= IOAT ver.3 workarounds =- */
1139 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1140 * that can cause stability issues for IOAT ver.3
1141 */
1142 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1143
1144 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1145 * (workaround for spurious config parity error after restart)
1146 */
1147 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1148 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1149 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1150
1151 err = ioat_probe(device);
1152 if (err)
1153 return err;
1154 ioat_set_tcp_copy_break(262144);
1155
1156 list_for_each_entry(c, &dma->channels, device_node) {
1157 chan = to_chan_common(c);
1158 writel(IOAT_DMA_DCA_ANY_CPU,
1159 chan->reg_base + IOAT_DCACTRL_OFFSET);
1160 }
1161
1162 err = ioat_register(device);
1163 if (err)
1164 return err;
Dan Williams5669e312009-09-08 17:42:56 -07001165
1166 ioat_kobject_add(device, &ioat2_ktype);
1167
Dan Williamsbf40a682009-09-08 17:42:55 -07001168 if (dca)
1169 device->dca = ioat3_dca_init(pdev, device->reg_base);
1170
1171 return 0;
1172}