blob: 1389f0582e29b64addfb982a682de6de80eff046 [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
Dave Jiang85596a12015-08-11 08:48:10 -07003 * Copyright(c) 2004 - 2015 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070014 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 */
18
19/*
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
21 * copy operations.
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -070027#include <linux/pci.h>
28#include <linux/interrupt.h>
29#include <linux/dmaengine.h>
30#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070031#include <linux/dma-mapping.h>
Maciej Sosnowski09177e82008-07-22 10:07:33 -070032#include <linux/workqueue.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040033#include <linux/prefetch.h>
Dave Jiangdd4645e2016-02-10 15:00:32 -070034#include <linux/sizes.h>
Dan Williams584ec222009-07-28 14:32:12 -070035#include "dma.h"
36#include "registers.h"
37#include "hw.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070038
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000039#include "../dmaengine.h"
40
Dave Jiangaed681d2016-07-20 13:14:01 -070041static char *chanerr_str[] = {
42 "DMA Transfer Destination Address Error",
43 "Next Descriptor Address Error",
44 "Descriptor Error",
45 "Chan Address Value Error",
46 "CHANCMD Error",
47 "Chipset Uncorrectable Data Integrity Error",
48 "DMA Uncorrectable Data Integrity Error",
49 "Read Data Error",
50 "Write Data Error",
51 "Descriptor Control Error",
52 "Descriptor Transfer Size Error",
53 "Completion Address Error",
54 "Interrupt Configuration Error",
55 "Super extended descriptor Address Error",
56 "Unaffiliated Error",
57 "CRC or XOR P Error",
58 "XOR Q Error",
59 "Descriptor Count Error",
60 "DIF All F detect Error",
61 "Guard Tag verification Error",
62 "Application Tag verification Error",
63 "Reference Tag verification Error",
64 "Bundle Bit Error",
65 "Result DIF All F detect Error",
66 "Result Guard Tag verification Error",
67 "Result Application Tag verification Error",
68 "Result Reference Tag verification Error",
69 NULL
70};
71
Dave Jiang3372de52015-08-11 08:48:55 -070072static void ioat_eh(struct ioatdma_chan *ioat_chan);
73
Dave Jiangaed681d2016-07-20 13:14:01 -070074static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
75{
76 int i;
77
78 for (i = 0; i < 32; i++) {
79 if ((chanerr >> i) & 1) {
80 if (chanerr_str[i]) {
81 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
82 i, chanerr_str[i]);
83 } else
84 break;
85 }
86 }
87}
88
Shannon Nelson3e037452007-10-16 01:27:40 -070089/**
90 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
91 * @irq: interrupt id
92 * @data: interrupt data
93 */
Dave Jiangc0f28ce2015-08-11 08:48:43 -070094irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
Shannon Nelson3e037452007-10-16 01:27:40 -070095{
96 struct ioatdma_device *instance = data;
Dave Jiang5a976882015-08-11 08:48:21 -070097 struct ioatdma_chan *ioat_chan;
Shannon Nelson3e037452007-10-16 01:27:40 -070098 unsigned long attnstatus;
99 int bit;
100 u8 intrctrl;
101
102 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
103
104 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
105 return IRQ_NONE;
106
107 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
108 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
109 return IRQ_NONE;
110 }
111
112 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
Akinobu Mita984b3f52010-03-05 13:41:37 -0800113 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
Dave Jiang5a976882015-08-11 08:48:21 -0700114 ioat_chan = ioat_chan_by_index(instance, bit);
115 if (test_bit(IOAT_RUN, &ioat_chan->state))
116 tasklet_schedule(&ioat_chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -0700117 }
118
119 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
120 return IRQ_HANDLED;
121}
122
123/**
124 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
125 * @irq: interrupt id
126 * @data: interrupt data
127 */
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700128irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
Shannon Nelson3e037452007-10-16 01:27:40 -0700129{
Dave Jiang5a976882015-08-11 08:48:21 -0700130 struct ioatdma_chan *ioat_chan = data;
Shannon Nelson3e037452007-10-16 01:27:40 -0700131
Dave Jiang5a976882015-08-11 08:48:21 -0700132 if (test_bit(IOAT_RUN, &ioat_chan->state))
133 tasklet_schedule(&ioat_chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -0700134
135 return IRQ_HANDLED;
136}
137
Dave Jiang5a976882015-08-11 08:48:21 -0700138void ioat_stop(struct ioatdma_chan *ioat_chan)
Dan Williamsda87ca42014-02-19 16:19:35 -0800139{
Dave Jiang55f878e2015-08-11 08:48:27 -0700140 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
141 struct pci_dev *pdev = ioat_dma->pdev;
Dave Jiang5a976882015-08-11 08:48:21 -0700142 int chan_id = chan_num(ioat_chan);
Dan Williamsda87ca42014-02-19 16:19:35 -0800143 struct msix_entry *msix;
144
145 /* 1/ stop irq from firing tasklets
146 * 2/ stop the tasklet from re-arming irqs
147 */
Dave Jiang5a976882015-08-11 08:48:21 -0700148 clear_bit(IOAT_RUN, &ioat_chan->state);
Dan Williamsda87ca42014-02-19 16:19:35 -0800149
150 /* flush inflight interrupts */
Dave Jiang55f878e2015-08-11 08:48:27 -0700151 switch (ioat_dma->irq_mode) {
Dan Williamsda87ca42014-02-19 16:19:35 -0800152 case IOAT_MSIX:
Dave Jiang55f878e2015-08-11 08:48:27 -0700153 msix = &ioat_dma->msix_entries[chan_id];
Dan Williamsda87ca42014-02-19 16:19:35 -0800154 synchronize_irq(msix->vector);
155 break;
156 case IOAT_MSI:
157 case IOAT_INTX:
158 synchronize_irq(pdev->irq);
159 break;
160 default:
161 break;
162 }
163
164 /* flush inflight timers */
Dave Jiang5a976882015-08-11 08:48:21 -0700165 del_timer_sync(&ioat_chan->timer);
Dan Williamsda87ca42014-02-19 16:19:35 -0800166
167 /* flush inflight tasklet runs */
Dave Jiang5a976882015-08-11 08:48:21 -0700168 tasklet_kill(&ioat_chan->cleanup_task);
Dan Williamsda87ca42014-02-19 16:19:35 -0800169
170 /* final cleanup now that everything is quiesced and can't re-arm */
Dave Jiangef97bd0f2015-08-11 08:49:00 -0700171 ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
Dan Williamsda87ca42014-02-19 16:19:35 -0800172}
173
Dave Jiang3372de52015-08-11 08:48:55 -0700174static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
Dave Jiang885b2012015-08-11 08:48:32 -0700175{
176 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
177 ioat_chan->issued = ioat_chan->head;
178 writew(ioat_chan->dmacount,
179 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
180 dev_dbg(to_dev(ioat_chan),
181 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
182 __func__, ioat_chan->head, ioat_chan->tail,
183 ioat_chan->issued, ioat_chan->dmacount);
184}
185
186void ioat_issue_pending(struct dma_chan *c)
187{
188 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
189
190 if (ioat_ring_pending(ioat_chan)) {
191 spin_lock_bh(&ioat_chan->prep_lock);
192 __ioat_issue_pending(ioat_chan);
193 spin_unlock_bh(&ioat_chan->prep_lock);
194 }
195}
196
197/**
198 * ioat_update_pending - log pending descriptors
199 * @ioat: ioat+ channel
200 *
201 * Check if the number of unsubmitted descriptors has exceeded the
202 * watermark. Called with prep_lock held
203 */
204static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
205{
206 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
207 __ioat_issue_pending(ioat_chan);
208}
209
210static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
211{
212 struct ioat_ring_ent *desc;
213 struct ioat_dma_descriptor *hw;
214
215 if (ioat_ring_space(ioat_chan) < 1) {
216 dev_err(to_dev(ioat_chan),
217 "Unable to start null desc - ring full\n");
218 return;
219 }
220
221 dev_dbg(to_dev(ioat_chan),
222 "%s: head: %#x tail: %#x issued: %#x\n",
223 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
224 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
225
226 hw = desc->hw;
227 hw->ctl = 0;
228 hw->ctl_f.null = 1;
229 hw->ctl_f.int_en = 1;
230 hw->ctl_f.compl_write = 1;
231 /* set size to non-zero value (channel returns error when size is 0) */
232 hw->size = NULL_DESC_BUFFER_SIZE;
233 hw->src_addr = 0;
234 hw->dst_addr = 0;
235 async_tx_ack(&desc->txd);
236 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
237 dump_desc_dbg(ioat_chan, desc);
238 /* make sure descriptors are written before we submit */
239 wmb();
240 ioat_chan->head += 1;
241 __ioat_issue_pending(ioat_chan);
242}
243
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700244void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
Dave Jiang885b2012015-08-11 08:48:32 -0700245{
246 spin_lock_bh(&ioat_chan->prep_lock);
Dave Jiangad4a7b52015-08-26 13:17:24 -0700247 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
248 __ioat_start_null_desc(ioat_chan);
Dave Jiang885b2012015-08-11 08:48:32 -0700249 spin_unlock_bh(&ioat_chan->prep_lock);
250}
251
Dave Jiang3372de52015-08-11 08:48:55 -0700252static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
Dave Jiang885b2012015-08-11 08:48:32 -0700253{
254 /* set the tail to be re-issued */
255 ioat_chan->issued = ioat_chan->tail;
256 ioat_chan->dmacount = 0;
Dave Jiang885b2012015-08-11 08:48:32 -0700257 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
258
259 dev_dbg(to_dev(ioat_chan),
260 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
261 __func__, ioat_chan->head, ioat_chan->tail,
262 ioat_chan->issued, ioat_chan->dmacount);
263
264 if (ioat_ring_pending(ioat_chan)) {
265 struct ioat_ring_ent *desc;
266
267 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
268 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
269 __ioat_issue_pending(ioat_chan);
270 } else
271 __ioat_start_null_desc(ioat_chan);
272}
273
Dave Jiang3372de52015-08-11 08:48:55 -0700274static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
Dave Jiang885b2012015-08-11 08:48:32 -0700275{
276 unsigned long end = jiffies + tmo;
277 int err = 0;
278 u32 status;
279
280 status = ioat_chansts(ioat_chan);
281 if (is_ioat_active(status) || is_ioat_idle(status))
282 ioat_suspend(ioat_chan);
283 while (is_ioat_active(status) || is_ioat_idle(status)) {
284 if (tmo && time_after(jiffies, end)) {
285 err = -ETIMEDOUT;
286 break;
287 }
288 status = ioat_chansts(ioat_chan);
289 cpu_relax();
290 }
291
292 return err;
293}
294
Dave Jiang3372de52015-08-11 08:48:55 -0700295static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
Dave Jiang885b2012015-08-11 08:48:32 -0700296{
297 unsigned long end = jiffies + tmo;
298 int err = 0;
299
300 ioat_reset(ioat_chan);
301 while (ioat_reset_pending(ioat_chan)) {
302 if (end && time_after(jiffies, end)) {
303 err = -ETIMEDOUT;
304 break;
305 }
306 cpu_relax();
307 }
308
309 return err;
310}
311
Dave Jiang885b2012015-08-11 08:48:32 -0700312static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
Dave Jiang5c65cb92015-08-25 12:58:05 -0700313 __releases(&ioat_chan->prep_lock)
Dave Jiang885b2012015-08-11 08:48:32 -0700314{
315 struct dma_chan *c = tx->chan;
316 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
317 dma_cookie_t cookie;
318
319 cookie = dma_cookie_assign(tx);
320 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
321
322 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
323 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
324
325 /* make descriptor updates visible before advancing ioat->head,
326 * this is purposefully not smp_wmb() since we are also
327 * publishing the descriptor updates to a dma device
328 */
329 wmb();
330
331 ioat_chan->head += ioat_chan->produce;
332
333 ioat_update_pending(ioat_chan);
334 spin_unlock_bh(&ioat_chan->prep_lock);
335
336 return cookie;
337}
338
339static struct ioat_ring_ent *
Dave Jiangdd4645e2016-02-10 15:00:32 -0700340ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
Dave Jiang885b2012015-08-11 08:48:32 -0700341{
342 struct ioat_dma_descriptor *hw;
343 struct ioat_ring_ent *desc;
344 struct ioatdma_device *ioat_dma;
Dave Jiangdd4645e2016-02-10 15:00:32 -0700345 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
346 int chunk;
Dave Jiang885b2012015-08-11 08:48:32 -0700347 dma_addr_t phys;
Dave Jiangdd4645e2016-02-10 15:00:32 -0700348 u8 *pos;
349 off_t offs;
Dave Jiang885b2012015-08-11 08:48:32 -0700350
351 ioat_dma = to_ioatdma_device(chan->device);
Dave Jiangdd4645e2016-02-10 15:00:32 -0700352
353 chunk = idx / IOAT_DESCS_PER_2M;
354 idx &= (IOAT_DESCS_PER_2M - 1);
355 offs = idx * IOAT_DESC_SZ;
356 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
357 phys = ioat_chan->descs[chunk].hw + offs;
358 hw = (struct ioat_dma_descriptor *)pos;
Dave Jiang885b2012015-08-11 08:48:32 -0700359 memset(hw, 0, sizeof(*hw));
360
361 desc = kmem_cache_zalloc(ioat_cache, flags);
Dave Jiangdd4645e2016-02-10 15:00:32 -0700362 if (!desc)
Dave Jiang885b2012015-08-11 08:48:32 -0700363 return NULL;
Dave Jiang885b2012015-08-11 08:48:32 -0700364
365 dma_async_tx_descriptor_init(&desc->txd, chan);
366 desc->txd.tx_submit = ioat_tx_submit_unlock;
367 desc->hw = hw;
368 desc->txd.phys = phys;
369 return desc;
370}
371
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700372void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
Dave Jiang885b2012015-08-11 08:48:32 -0700373{
Dave Jiang885b2012015-08-11 08:48:32 -0700374 kmem_cache_free(ioat_cache, desc);
375}
376
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700377struct ioat_ring_ent **
Dave Jiang885b2012015-08-11 08:48:32 -0700378ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
379{
Dave Jiangdd4645e2016-02-10 15:00:32 -0700380 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
Dave Jiang885b2012015-08-11 08:48:32 -0700381 struct ioat_ring_ent **ring;
Dave Jiangdd4645e2016-02-10 15:00:32 -0700382 int total_descs = 1 << order;
383 int i, chunks;
Dave Jiang885b2012015-08-11 08:48:32 -0700384
385 /* allocate the array to hold the software ring */
Dave Jiangdd4645e2016-02-10 15:00:32 -0700386 ring = kcalloc(total_descs, sizeof(*ring), flags);
Dave Jiang885b2012015-08-11 08:48:32 -0700387 if (!ring)
388 return NULL;
Dave Jiangdd4645e2016-02-10 15:00:32 -0700389
390 ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
391
392 for (i = 0; i < chunks; i++) {
393 struct ioat_descs *descs = &ioat_chan->descs[i];
394
395 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
396 SZ_2M, &descs->hw, flags);
Alexander.Barabash@dell.com1cb9ccb2019-12-25 17:55:30 +0000397 if (!descs->virt) {
Dave Jiangdd4645e2016-02-10 15:00:32 -0700398 int idx;
399
400 for (idx = 0; idx < i; idx++) {
Alexander.Barabash@dell.com1cb9ccb2019-12-25 17:55:30 +0000401 descs = &ioat_chan->descs[idx];
Dave Jiangdd4645e2016-02-10 15:00:32 -0700402 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
403 descs->virt, descs->hw);
404 descs->virt = NULL;
405 descs->hw = 0;
406 }
407
408 ioat_chan->desc_chunks = 0;
409 kfree(ring);
410 return NULL;
411 }
412 }
413
414 for (i = 0; i < total_descs; i++) {
415 ring[i] = ioat_alloc_ring_ent(c, i, flags);
Dave Jiang885b2012015-08-11 08:48:32 -0700416 if (!ring[i]) {
Dave Jiangdd4645e2016-02-10 15:00:32 -0700417 int idx;
418
Dave Jiang885b2012015-08-11 08:48:32 -0700419 while (i--)
420 ioat_free_ring_ent(ring[i], c);
Dave Jiangdd4645e2016-02-10 15:00:32 -0700421
422 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
423 dma_free_coherent(to_dev(ioat_chan),
424 SZ_2M,
425 ioat_chan->descs[idx].virt,
426 ioat_chan->descs[idx].hw);
427 ioat_chan->descs[idx].virt = NULL;
428 ioat_chan->descs[idx].hw = 0;
429 }
430
431 ioat_chan->desc_chunks = 0;
Dave Jiang885b2012015-08-11 08:48:32 -0700432 kfree(ring);
433 return NULL;
434 }
435 set_desc_id(ring[i], i);
436 }
437
438 /* link descs */
Dave Jiangdd4645e2016-02-10 15:00:32 -0700439 for (i = 0; i < total_descs-1; i++) {
Dave Jiang885b2012015-08-11 08:48:32 -0700440 struct ioat_ring_ent *next = ring[i+1];
441 struct ioat_dma_descriptor *hw = ring[i]->hw;
442
443 hw->next = next->txd.phys;
444 }
445 ring[i]->hw->next = ring[0]->txd.phys;
446
447 return ring;
448}
449
Dave Jiang885b2012015-08-11 08:48:32 -0700450/**
451 * ioat_check_space_lock - verify space and grab ring producer lock
452 * @ioat: ioat,3 channel (ring) to operate on
453 * @num_descs: allocation length
454 */
455int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
Dave Jiang5c65cb92015-08-25 12:58:05 -0700456 __acquires(&ioat_chan->prep_lock)
Dave Jiang885b2012015-08-11 08:48:32 -0700457{
Dave Jiang885b2012015-08-11 08:48:32 -0700458 spin_lock_bh(&ioat_chan->prep_lock);
459 /* never allow the last descriptor to be consumed, we need at
460 * least one free at all times to allow for on-the-fly ring
461 * resizing.
462 */
463 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
464 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
465 __func__, num_descs, ioat_chan->head,
466 ioat_chan->tail, ioat_chan->issued);
467 ioat_chan->produce = num_descs;
468 return 0; /* with ioat->prep_lock held */
469 }
Dave Jiang885b2012015-08-11 08:48:32 -0700470 spin_unlock_bh(&ioat_chan->prep_lock);
471
Dave Jiang885b2012015-08-11 08:48:32 -0700472 dev_dbg_ratelimited(to_dev(ioat_chan),
473 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
474 __func__, num_descs, ioat_chan->head,
475 ioat_chan->tail, ioat_chan->issued);
476
477 /* progress reclaim in the allocation failure case we may be
478 * called under bh_disabled so we need to trigger the timer
479 * event directly
480 */
481 if (time_is_before_jiffies(ioat_chan->timer.expires)
482 && timer_pending(&ioat_chan->timer)) {
Dave Jiang885b2012015-08-11 08:48:32 -0700483 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
Dave Jiangef97bd0f2015-08-11 08:49:00 -0700484 ioat_timer_event((unsigned long)ioat_chan);
Dave Jiang885b2012015-08-11 08:48:32 -0700485 }
486
487 return -ENOMEM;
488}
Dave Jiang3372de52015-08-11 08:48:55 -0700489
490static bool desc_has_ext(struct ioat_ring_ent *desc)
491{
492 struct ioat_dma_descriptor *hw = desc->hw;
493
494 if (hw->ctl_f.op == IOAT_OP_XOR ||
495 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
496 struct ioat_xor_descriptor *xor = desc->xor;
497
498 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
499 return true;
500 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
501 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
502 struct ioat_pq_descriptor *pq = desc->pq;
503
504 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
505 return true;
506 }
507
508 return false;
509}
510
511static void
512ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
513{
514 if (!sed)
515 return;
516
517 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
518 kmem_cache_free(ioat_sed_cache, sed);
519}
520
521static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
522{
523 u64 phys_complete;
524 u64 completion;
525
526 completion = *ioat_chan->completion;
527 phys_complete = ioat_chansts_to_addr(completion);
528
529 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
530 (unsigned long long) phys_complete);
531
532 return phys_complete;
533}
534
535static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
536 u64 *phys_complete)
537{
538 *phys_complete = ioat_get_current_completion(ioat_chan);
539 if (*phys_complete == ioat_chan->last_completion)
540 return false;
541
542 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
543 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
544
545 return true;
546}
547
548static void
549desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
550{
551 struct ioat_dma_descriptor *hw = desc->hw;
552
553 switch (hw->ctl_f.op) {
554 case IOAT_OP_PQ_VAL:
555 case IOAT_OP_PQ_VAL_16S:
556 {
557 struct ioat_pq_descriptor *pq = desc->pq;
558
559 /* check if there's error written */
560 if (!pq->dwbes_f.wbes)
561 return;
562
563 /* need to set a chanerr var for checking to clear later */
564
565 if (pq->dwbes_f.p_val_err)
566 *desc->result |= SUM_CHECK_P_RESULT;
567
568 if (pq->dwbes_f.q_val_err)
569 *desc->result |= SUM_CHECK_Q_RESULT;
570
571 return;
572 }
573 default:
574 return;
575 }
576}
577
578/**
579 * __cleanup - reclaim used descriptors
580 * @ioat: channel (ring) to clean
581 */
582static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
583{
584 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
585 struct ioat_ring_ent *desc;
586 bool seen_current = false;
587 int idx = ioat_chan->tail, i;
588 u16 active;
589
590 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
591 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
592
593 /*
594 * At restart of the channel, the completion address and the
595 * channel status will be 0 due to starting a new chain. Since
596 * it's new chain and the first descriptor "fails", there is
597 * nothing to clean up. We do not want to reap the entire submitted
598 * chain due to this 0 address value and then BUG.
599 */
600 if (!phys_complete)
601 return;
602
603 active = ioat_ring_active(ioat_chan);
604 for (i = 0; i < active && !seen_current; i++) {
605 struct dma_async_tx_descriptor *tx;
606
607 smp_read_barrier_depends();
608 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
609 desc = ioat_get_ring_ent(ioat_chan, idx + i);
610 dump_desc_dbg(ioat_chan, desc);
611
612 /* set err stat if we are using dwbes */
613 if (ioat_dma->cap & IOAT_CAP_DWBES)
614 desc_get_errstat(ioat_chan, desc);
615
616 tx = &desc->txd;
617 if (tx->cookie) {
Dave Jiang9546d4c2016-07-20 13:13:55 -0700618 struct dmaengine_result res;
619
Dave Jiang3372de52015-08-11 08:48:55 -0700620 dma_cookie_complete(tx);
621 dma_descriptor_unmap(tx);
Dave Jiang9546d4c2016-07-20 13:13:55 -0700622 res.result = DMA_TRANS_NOERROR;
Dave Jiang63992862016-07-20 13:11:33 -0700623 dmaengine_desc_get_callback_invoke(tx, NULL);
624 tx->callback = NULL;
Dave Jiang9546d4c2016-07-20 13:13:55 -0700625 tx->callback_result = NULL;
Dave Jiang3372de52015-08-11 08:48:55 -0700626 }
627
628 if (tx->phys == phys_complete)
629 seen_current = true;
630
631 /* skip extended descriptors */
632 if (desc_has_ext(desc)) {
633 BUG_ON(i + 1 >= active);
634 i++;
635 }
636
637 /* cleanup super extended descriptors */
638 if (desc->sed) {
639 ioat_free_sed(ioat_dma, desc->sed);
640 desc->sed = NULL;
641 }
642 }
643
644 /* finish all descriptor reads before incrementing tail */
645 smp_mb();
646 ioat_chan->tail = idx + i;
647 /* no active descs have written a completion? */
648 BUG_ON(active && !seen_current);
649 ioat_chan->last_completion = phys_complete;
650
651 if (active - i == 0) {
652 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
653 __func__);
Dave Jiang3372de52015-08-11 08:48:55 -0700654 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
655 }
656
657 /* 5 microsecond delay per pending descriptor */
658 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
659 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
660}
661
662static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
663{
664 u64 phys_complete;
665
666 spin_lock_bh(&ioat_chan->cleanup_lock);
667
668 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
669 __cleanup(ioat_chan, phys_complete);
670
671 if (is_ioat_halted(*ioat_chan->completion)) {
672 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
673
Dave Jiang9546d4c2016-07-20 13:13:55 -0700674 if (chanerr &
675 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
Dave Jiang3372de52015-08-11 08:48:55 -0700676 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
677 ioat_eh(ioat_chan);
678 }
679 }
680
681 spin_unlock_bh(&ioat_chan->cleanup_lock);
682}
683
684void ioat_cleanup_event(unsigned long data)
685{
686 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
687
688 ioat_cleanup(ioat_chan);
689 if (!test_bit(IOAT_RUN, &ioat_chan->state))
690 return;
691 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
692}
693
694static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
695{
696 u64 phys_complete;
697
698 ioat_quiesce(ioat_chan, 0);
699 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
700 __cleanup(ioat_chan, phys_complete);
701
702 __ioat_restart_chan(ioat_chan);
703}
704
Dave Jiang9546d4c2016-07-20 13:13:55 -0700705
706static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
707{
708 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
709 struct ioat_ring_ent *desc;
710 u16 active;
711 int idx = ioat_chan->tail, i;
712
713 /*
714 * We assume that the failed descriptor has been processed.
715 * Now we are just returning all the remaining submitted
716 * descriptors to abort.
717 */
718 active = ioat_ring_active(ioat_chan);
719
720 /* we skip the failed descriptor that tail points to */
721 for (i = 1; i < active; i++) {
722 struct dma_async_tx_descriptor *tx;
723
724 smp_read_barrier_depends();
725 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
726 desc = ioat_get_ring_ent(ioat_chan, idx + i);
727
728 tx = &desc->txd;
729 if (tx->cookie) {
730 struct dmaengine_result res;
731
732 dma_cookie_complete(tx);
733 dma_descriptor_unmap(tx);
734 res.result = DMA_TRANS_ABORTED;
735 dmaengine_desc_get_callback_invoke(tx, &res);
736 tx->callback = NULL;
737 tx->callback_result = NULL;
738 }
739
740 /* skip extended descriptors */
741 if (desc_has_ext(desc)) {
742 WARN_ON(i + 1 >= active);
743 i++;
744 }
745
746 /* cleanup super extended descriptors */
747 if (desc->sed) {
748 ioat_free_sed(ioat_dma, desc->sed);
749 desc->sed = NULL;
750 }
751 }
752
753 smp_mb(); /* finish all descriptor reads before incrementing tail */
754 ioat_chan->tail = idx + active;
755
756 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
757 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
758}
759
Dave Jiang3372de52015-08-11 08:48:55 -0700760static void ioat_eh(struct ioatdma_chan *ioat_chan)
761{
762 struct pci_dev *pdev = to_pdev(ioat_chan);
763 struct ioat_dma_descriptor *hw;
764 struct dma_async_tx_descriptor *tx;
765 u64 phys_complete;
766 struct ioat_ring_ent *desc;
767 u32 err_handled = 0;
768 u32 chanerr_int;
769 u32 chanerr;
Dave Jiang9546d4c2016-07-20 13:13:55 -0700770 bool abort = false;
771 struct dmaengine_result res;
Dave Jiang3372de52015-08-11 08:48:55 -0700772
773 /* cleanup so tail points to descriptor that caused the error */
774 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
775 __cleanup(ioat_chan, phys_complete);
776
777 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
778 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
779
780 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
781 __func__, chanerr, chanerr_int);
782
783 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
784 hw = desc->hw;
785 dump_desc_dbg(ioat_chan, desc);
786
787 switch (hw->ctl_f.op) {
788 case IOAT_OP_XOR_VAL:
789 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
790 *desc->result |= SUM_CHECK_P_RESULT;
791 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
792 }
793 break;
794 case IOAT_OP_PQ_VAL:
795 case IOAT_OP_PQ_VAL_16S:
796 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
797 *desc->result |= SUM_CHECK_P_RESULT;
798 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
799 }
800 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
801 *desc->result |= SUM_CHECK_Q_RESULT;
802 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
803 }
804 break;
805 }
806
Dave Jiang9546d4c2016-07-20 13:13:55 -0700807 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
808 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
809 res.result = DMA_TRANS_READ_FAILED;
810 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
811 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
812 res.result = DMA_TRANS_WRITE_FAILED;
813 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
814 }
815
816 abort = true;
817 } else
818 res.result = DMA_TRANS_NOERROR;
819
Dave Jiang3372de52015-08-11 08:48:55 -0700820 /* fault on unhandled error or spurious halt */
821 if (chanerr ^ err_handled || chanerr == 0) {
822 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
823 __func__, chanerr, err_handled);
Dave Jiangaed681d2016-07-20 13:14:01 -0700824 dev_err(to_dev(ioat_chan), "Errors handled:\n");
825 ioat_print_chanerrs(ioat_chan, err_handled);
826 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
827 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
828
Dave Jiang3372de52015-08-11 08:48:55 -0700829 BUG();
Dave Jiang3372de52015-08-11 08:48:55 -0700830 }
831
Dave Jiang9546d4c2016-07-20 13:13:55 -0700832 /* cleanup the faulty descriptor since we are continuing */
833 tx = &desc->txd;
834 if (tx->cookie) {
835 dma_cookie_complete(tx);
836 dma_descriptor_unmap(tx);
837 dmaengine_desc_get_callback_invoke(tx, &res);
838 tx->callback = NULL;
839 tx->callback_result = NULL;
840 }
Dave Jiang3372de52015-08-11 08:48:55 -0700841
842 /* mark faulting descriptor as complete */
843 *ioat_chan->completion = desc->txd.phys;
844
845 spin_lock_bh(&ioat_chan->prep_lock);
Dave Jiang9546d4c2016-07-20 13:13:55 -0700846 /* we need abort all descriptors */
847 if (abort) {
848 ioat_abort_descs(ioat_chan);
849 /* clean up the channel, we could be in weird state */
850 ioat_reset_hw(ioat_chan);
851 }
852
853 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
854 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
855
Dave Jiang3372de52015-08-11 08:48:55 -0700856 ioat_restart_channel(ioat_chan);
857 spin_unlock_bh(&ioat_chan->prep_lock);
858}
859
860static void check_active(struct ioatdma_chan *ioat_chan)
861{
862 if (ioat_ring_active(ioat_chan)) {
863 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
864 return;
865 }
866
867 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
868 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
Dave Jiang3372de52015-08-11 08:48:55 -0700869}
870
871void ioat_timer_event(unsigned long data)
872{
873 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
874 dma_addr_t phys_complete;
875 u64 status;
876
877 status = ioat_chansts(ioat_chan);
878
879 /* when halted due to errors check for channel
880 * programming errors before advancing the completion state
881 */
882 if (is_ioat_halted(status)) {
883 u32 chanerr;
884
885 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
886 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
887 __func__, chanerr);
Dave Jiangaed681d2016-07-20 13:14:01 -0700888 dev_err(to_dev(ioat_chan), "Errors:\n");
889 ioat_print_chanerrs(ioat_chan, chanerr);
890
Dave Jiang9546d4c2016-07-20 13:13:55 -0700891 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
892 spin_lock_bh(&ioat_chan->cleanup_lock);
893 spin_lock_bh(&ioat_chan->prep_lock);
894 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
895 spin_unlock_bh(&ioat_chan->prep_lock);
896
897 ioat_abort_descs(ioat_chan);
898 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
899 ioat_reset_hw(ioat_chan);
900 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
901 ioat_restart_channel(ioat_chan);
902
903 spin_lock_bh(&ioat_chan->prep_lock);
904 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
905 spin_unlock_bh(&ioat_chan->prep_lock);
906 spin_unlock_bh(&ioat_chan->cleanup_lock);
907 }
908
909 return;
Dave Jiang3372de52015-08-11 08:48:55 -0700910 }
911
Dave Jiang8a695db2016-01-19 08:57:48 -0700912 spin_lock_bh(&ioat_chan->cleanup_lock);
913
914 /* handle the no-actives case */
915 if (!ioat_ring_active(ioat_chan)) {
916 spin_lock_bh(&ioat_chan->prep_lock);
917 check_active(ioat_chan);
918 spin_unlock_bh(&ioat_chan->prep_lock);
919 spin_unlock_bh(&ioat_chan->cleanup_lock);
920 return;
921 }
922
Dave Jiang3372de52015-08-11 08:48:55 -0700923 /* if we haven't made progress and we have already
924 * acknowledged a pending completion once, then be more
925 * forceful with a restart
926 */
Dave Jiang3372de52015-08-11 08:48:55 -0700927 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
928 __cleanup(ioat_chan, phys_complete);
929 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
Dave Jiang8a695db2016-01-19 08:57:48 -0700930 u32 chanerr;
931
932 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Dave Jiangaed681d2016-07-20 13:14:01 -0700933 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
934 status, chanerr);
935 dev_err(to_dev(ioat_chan), "Errors:\n");
936 ioat_print_chanerrs(ioat_chan, chanerr);
937
938 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
939 ioat_ring_active(ioat_chan));
Dave Jiang8a695db2016-01-19 08:57:48 -0700940
Dave Jiang3372de52015-08-11 08:48:55 -0700941 spin_lock_bh(&ioat_chan->prep_lock);
Dave Jiang9546d4c2016-07-20 13:13:55 -0700942 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
943 spin_unlock_bh(&ioat_chan->prep_lock);
944
945 ioat_abort_descs(ioat_chan);
946 dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
947 ioat_reset_hw(ioat_chan);
948 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
Dave Jiang3372de52015-08-11 08:48:55 -0700949 ioat_restart_channel(ioat_chan);
Dave Jiang9546d4c2016-07-20 13:13:55 -0700950
951 spin_lock_bh(&ioat_chan->prep_lock);
952 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
Dave Jiang3372de52015-08-11 08:48:55 -0700953 spin_unlock_bh(&ioat_chan->prep_lock);
954 spin_unlock_bh(&ioat_chan->cleanup_lock);
955 return;
Dave Jiang8a695db2016-01-19 08:57:48 -0700956 } else
Dave Jiang3372de52015-08-11 08:48:55 -0700957 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
Dave Jiang3372de52015-08-11 08:48:55 -0700958
Dave Jiang8a695db2016-01-19 08:57:48 -0700959 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
Dave Jiang3372de52015-08-11 08:48:55 -0700960 spin_unlock_bh(&ioat_chan->cleanup_lock);
961}
962
963enum dma_status
964ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
965 struct dma_tx_state *txstate)
966{
967 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
968 enum dma_status ret;
969
970 ret = dma_cookie_status(c, cookie, txstate);
971 if (ret == DMA_COMPLETE)
972 return ret;
973
974 ioat_cleanup(ioat_chan);
975
976 return dma_cookie_status(c, cookie, txstate);
977}
978
Dave Jiang3372de52015-08-11 08:48:55 -0700979int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
980{
981 /* throw away whatever the channel was doing and get it
982 * initialized, with ioat3 specific workarounds
983 */
984 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
985 struct pci_dev *pdev = ioat_dma->pdev;
986 u32 chanerr;
987 u16 dev_id;
988 int err;
989
990 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
991
992 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
993 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
994
995 if (ioat_dma->version < IOAT_VER_3_3) {
996 /* clear any pending errors */
997 err = pci_read_config_dword(pdev,
998 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
999 if (err) {
1000 dev_err(&pdev->dev,
1001 "channel error register unreachable\n");
1002 return err;
1003 }
1004 pci_write_config_dword(pdev,
1005 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1006
1007 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1008 * (workaround for spurious config parity error after restart)
1009 */
1010 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1011 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1012 pci_write_config_dword(pdev,
1013 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1014 0x10);
1015 }
1016 }
1017
Dave Jiangc997e302016-03-10 16:18:40 -07001018 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1019 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1020 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1021 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1022 }
1023
1024
Dave Jiang3372de52015-08-11 08:48:55 -07001025 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
Dave Jiangc997e302016-03-10 16:18:40 -07001026 if (!err) {
1027 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1028 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1029 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1030 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1031 }
1032 }
Dave Jiang3372de52015-08-11 08:48:55 -07001033
1034 if (err)
1035 dev_err(&pdev->dev, "Failed to reset: %d\n", err);
1036
1037 return err;
1038}