blob: 4840d4805d8cc2c54edb9080fa44656ffc488177 [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
Maciej Sosnowski211a22c2009-02-26 11:05:43 +01003 * Copyright(c) 2004 - 2009 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
Shannon Nelson43d6e362007-10-16 01:27:39 -070015 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070021 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070034#include <linux/dma-mapping.h>
Maciej Sosnowski09177e82008-07-22 10:07:33 -070035#include <linux/workqueue.h>
Venki Pallipadi3ad0b022008-10-22 16:34:52 -070036#include <linux/i7300_idle.h>
Dan Williams584ec222009-07-28 14:32:12 -070037#include "dma.h"
38#include "registers.h"
39#include "hw.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070040
Shannon Nelson7bb67c12007-11-14 16:59:51 -080041static int ioat_pending_level = 4;
42module_param(ioat_pending_level, int, 0644);
43MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
45
Maciej Sosnowski09177e82008-07-22 10:07:33 -070046static void ioat_dma_chan_reset_part2(struct work_struct *work);
47static void ioat_dma_chan_watchdog(struct work_struct *work);
48
Chris Leech0bbd5f42006-05-23 17:35:34 -070049/* internal functions */
Shannon Nelson43d6e362007-10-16 01:27:39 -070050static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -080052
Shannon Nelson7f2b2912007-10-18 03:07:14 -070053static struct ioat_desc_sw *
Shannon Nelson7bb67c12007-11-14 16:59:51 -080054ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55static struct ioat_desc_sw *
56ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -070057
Dan Williamsbc3c7022009-07-28 14:33:42 -070058static inline struct ioat_dma_chan *
59ioat_chan_by_index(struct ioatdma_device *device, int index)
Shannon Nelson3e037452007-10-16 01:27:40 -070060{
61 return device->idx[index];
62}
63
64/**
65 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
66 * @irq: interrupt id
67 * @data: interrupt data
68 */
69static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
70{
71 struct ioatdma_device *instance = data;
72 struct ioat_dma_chan *ioat_chan;
73 unsigned long attnstatus;
74 int bit;
75 u8 intrctrl;
76
77 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
78
79 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
80 return IRQ_NONE;
81
82 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
83 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
84 return IRQ_NONE;
85 }
86
87 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
88 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
Dan Williamsbc3c7022009-07-28 14:33:42 -070089 ioat_chan = ioat_chan_by_index(instance, bit);
Shannon Nelson3e037452007-10-16 01:27:40 -070090 tasklet_schedule(&ioat_chan->cleanup_task);
91 }
92
93 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
94 return IRQ_HANDLED;
95}
96
97/**
98 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
99 * @irq: interrupt id
100 * @data: interrupt data
101 */
102static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
103{
104 struct ioat_dma_chan *ioat_chan = data;
105
106 tasklet_schedule(&ioat_chan->cleanup_task);
107
108 return IRQ_HANDLED;
109}
110
111static void ioat_dma_cleanup_tasklet(unsigned long data);
112
113/**
114 * ioat_dma_enumerate_channels - find and initialize the device's channels
115 * @device: the device to be enumerated
116 */
Shannon Nelson8ab89562007-10-16 01:27:39 -0700117static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700118{
119 u8 xfercap_scale;
120 u32 xfercap;
121 int i;
122 struct ioat_dma_chan *ioat_chan;
Dan Williamse6c0b692009-09-08 17:29:44 -0700123 struct device *dev = &device->pdev->dev;
Dan Williamsf2427e22009-07-28 14:42:38 -0700124 struct dma_device *dma = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700125
Dan Williamsf2427e22009-07-28 14:42:38 -0700126 INIT_LIST_HEAD(&dma->channels);
127 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
Chris Leeche3828812007-03-08 09:57:35 -0800128 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700129 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
130
Venki Pallipadif371be62008-10-23 15:39:06 -0700131#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
Dan Williamsf2427e22009-07-28 14:42:38 -0700132 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
133 dma->chancnt--;
Andy Henroid27471fd2008-10-09 11:45:22 -0700134#endif
Dan Williamsf2427e22009-07-28 14:42:38 -0700135 for (i = 0; i < dma->chancnt; i++) {
Dan Williamse6c0b692009-09-08 17:29:44 -0700136 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700137 if (!ioat_chan) {
Dan Williamsf2427e22009-07-28 14:42:38 -0700138 dma->chancnt = i;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700139 break;
140 }
141
142 ioat_chan->device = device;
143 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
144 ioat_chan->xfercap = xfercap;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800145 ioat_chan->desccount = 0;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700146 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700147 spin_lock_init(&ioat_chan->cleanup_lock);
148 spin_lock_init(&ioat_chan->desc_lock);
149 INIT_LIST_HEAD(&ioat_chan->free_desc);
150 INIT_LIST_HEAD(&ioat_chan->used_desc);
151 /* This should be made common somewhere in dmaengine.c */
152 ioat_chan->common.device = &device->common;
Dan Williamsf2427e22009-07-28 14:42:38 -0700153 list_add_tail(&ioat_chan->common.device_node, &dma->channels);
Shannon Nelson3e037452007-10-16 01:27:40 -0700154 device->idx[i] = ioat_chan;
155 tasklet_init(&ioat_chan->cleanup_task,
156 ioat_dma_cleanup_tasklet,
157 (unsigned long) ioat_chan);
158 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700159 }
Dan Williamsf2427e22009-07-28 14:42:38 -0700160 return dma->chancnt;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700161}
162
Shannon Nelson711924b2007-12-17 16:20:08 -0800163/**
164 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
165 * descriptors to hw
166 * @chan: DMA channel handle
167 */
Dan Williamsbc3c7022009-07-28 14:33:42 -0700168static inline void
169__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan)
Shannon Nelson711924b2007-12-17 16:20:08 -0800170{
171 ioat_chan->pending = 0;
172 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
173}
174
175static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
176{
177 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
178
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700179 if (ioat_chan->pending > 0) {
Shannon Nelson711924b2007-12-17 16:20:08 -0800180 spin_lock_bh(&ioat_chan->desc_lock);
181 __ioat1_dma_memcpy_issue_pending(ioat_chan);
182 spin_unlock_bh(&ioat_chan->desc_lock);
183 }
184}
185
Dan Williamsbc3c7022009-07-28 14:33:42 -0700186static inline void
187__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan)
Shannon Nelson711924b2007-12-17 16:20:08 -0800188{
189 ioat_chan->pending = 0;
190 writew(ioat_chan->dmacount,
191 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
192}
193
194static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
195{
196 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
197
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700198 if (ioat_chan->pending > 0) {
Shannon Nelson711924b2007-12-17 16:20:08 -0800199 spin_lock_bh(&ioat_chan->desc_lock);
200 __ioat2_dma_memcpy_issue_pending(ioat_chan);
201 spin_unlock_bh(&ioat_chan->desc_lock);
202 }
203}
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800204
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700205
206/**
207 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
208 */
209static void ioat_dma_chan_reset_part2(struct work_struct *work)
210{
211 struct ioat_dma_chan *ioat_chan =
212 container_of(work, struct ioat_dma_chan, work.work);
213 struct ioat_desc_sw *desc;
214
215 spin_lock_bh(&ioat_chan->cleanup_lock);
216 spin_lock_bh(&ioat_chan->desc_lock);
217
218 ioat_chan->completion_virt->low = 0;
219 ioat_chan->completion_virt->high = 0;
220 ioat_chan->pending = 0;
221
222 /*
223 * count the descriptors waiting, and be sure to do it
224 * right for both the CB1 line and the CB2 ring
225 */
226 ioat_chan->dmacount = 0;
227 if (ioat_chan->used_desc.prev) {
228 desc = to_ioat_desc(ioat_chan->used_desc.prev);
229 do {
230 ioat_chan->dmacount++;
231 desc = to_ioat_desc(desc->node.next);
232 } while (&desc->node != ioat_chan->used_desc.next);
233 }
234
235 /*
236 * write the new starting descriptor address
237 * this puts channel engine into ARMED state
238 */
239 desc = to_ioat_desc(ioat_chan->used_desc.prev);
240 switch (ioat_chan->device->version) {
241 case IOAT_VER_1_2:
Dan Williamsbc3c7022009-07-28 14:33:42 -0700242 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700243 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
Dan Williamsbc3c7022009-07-28 14:33:42 -0700244 writel(((u64) desc->txd.phys) >> 32,
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700245 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
246
247 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
248 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
249 break;
250 case IOAT_VER_2_0:
Dan Williamsbc3c7022009-07-28 14:33:42 -0700251 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700252 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
Dan Williamsbc3c7022009-07-28 14:33:42 -0700253 writel(((u64) desc->txd.phys) >> 32,
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700254 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
255
256 /* tell the engine to go with what's left to be done */
257 writew(ioat_chan->dmacount,
258 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
259
260 break;
261 }
Dan Williamsbc3c7022009-07-28 14:33:42 -0700262 dev_err(to_dev(ioat_chan),
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700263 "chan%d reset - %d descs waiting, %d total desc\n",
264 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
265
266 spin_unlock_bh(&ioat_chan->desc_lock);
267 spin_unlock_bh(&ioat_chan->cleanup_lock);
268}
269
270/**
271 * ioat_dma_reset_channel - restart a channel
272 * @ioat_chan: IOAT DMA channel handle
273 */
274static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
275{
276 u32 chansts, chanerr;
277
278 if (!ioat_chan->used_desc.prev)
279 return;
280
281 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
282 chansts = (ioat_chan->completion_virt->low
283 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
284 if (chanerr) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700285 dev_err(to_dev(ioat_chan),
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700286 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
287 chan_num(ioat_chan), chansts, chanerr);
288 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
289 }
290
291 /*
292 * whack it upside the head with a reset
293 * and wait for things to settle out.
294 * force the pending count to a really big negative
295 * to make sure no one forces an issue_pending
296 * while we're waiting.
297 */
298
299 spin_lock_bh(&ioat_chan->desc_lock);
300 ioat_chan->pending = INT_MIN;
301 writeb(IOAT_CHANCMD_RESET,
302 ioat_chan->reg_base
303 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
304 spin_unlock_bh(&ioat_chan->desc_lock);
305
306 /* schedule the 2nd half instead of sleeping a long time */
307 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
308}
309
310/**
311 * ioat_dma_chan_watchdog - watch for stuck channels
312 */
313static void ioat_dma_chan_watchdog(struct work_struct *work)
314{
315 struct ioatdma_device *device =
316 container_of(work, struct ioatdma_device, work.work);
317 struct ioat_dma_chan *ioat_chan;
318 int i;
319
320 union {
321 u64 full;
322 struct {
323 u32 low;
324 u32 high;
325 };
326 } completion_hw;
327 unsigned long compl_desc_addr_hw;
328
329 for (i = 0; i < device->common.chancnt; i++) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700330 ioat_chan = ioat_chan_by_index(device, i);
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700331
332 if (ioat_chan->device->version == IOAT_VER_1_2
333 /* have we started processing anything yet */
334 && ioat_chan->last_completion
335 /* have we completed any since last watchdog cycle? */
336 && (ioat_chan->last_completion ==
337 ioat_chan->watchdog_completion)
338 /* has TCP stuck on one cookie since last watchdog? */
339 && (ioat_chan->watchdog_tcp_cookie ==
340 ioat_chan->watchdog_last_tcp_cookie)
341 && (ioat_chan->watchdog_tcp_cookie !=
342 ioat_chan->completed_cookie)
343 /* is there something in the chain to be processed? */
344 /* CB1 chain always has at least the last one processed */
345 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
346 && ioat_chan->pending == 0) {
347
348 /*
349 * check CHANSTS register for completed
350 * descriptor address.
351 * if it is different than completion writeback,
352 * it is not zero
353 * and it has changed since the last watchdog
354 * we can assume that channel
355 * is still working correctly
356 * and the problem is in completion writeback.
357 * update completion writeback
358 * with actual CHANSTS value
359 * else
360 * try resetting the channel
361 */
362
363 completion_hw.low = readl(ioat_chan->reg_base +
364 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
365 completion_hw.high = readl(ioat_chan->reg_base +
366 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
367#if (BITS_PER_LONG == 64)
368 compl_desc_addr_hw =
369 completion_hw.full
370 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
371#else
372 compl_desc_addr_hw =
373 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
374#endif
375
376 if ((compl_desc_addr_hw != 0)
377 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
378 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
379 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
380 ioat_chan->completion_virt->low = completion_hw.low;
381 ioat_chan->completion_virt->high = completion_hw.high;
382 } else {
383 ioat_dma_reset_channel(ioat_chan);
384 ioat_chan->watchdog_completion = 0;
385 ioat_chan->last_compl_desc_addr_hw = 0;
386 }
387
388 /*
389 * for version 2.0 if there are descriptors yet to be processed
390 * and the last completed hasn't changed since the last watchdog
391 * if they haven't hit the pending level
392 * issue the pending to push them through
393 * else
394 * try resetting the channel
395 */
396 } else if (ioat_chan->device->version == IOAT_VER_2_0
397 && ioat_chan->used_desc.prev
398 && ioat_chan->last_completion
399 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
400
401 if (ioat_chan->pending < ioat_pending_level)
402 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
403 else {
404 ioat_dma_reset_channel(ioat_chan);
405 ioat_chan->watchdog_completion = 0;
406 }
407 } else {
408 ioat_chan->last_compl_desc_addr_hw = 0;
409 ioat_chan->watchdog_completion
410 = ioat_chan->last_completion;
411 }
412
413 ioat_chan->watchdog_last_tcp_cookie =
414 ioat_chan->watchdog_tcp_cookie;
415 }
416
417 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
418}
419
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800420static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
Dan Williams7405f742007-01-02 11:10:43 -0700421{
422 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700423 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
424 struct ioat_desc_sw *prev, *new;
425 struct ioat_dma_descriptor *hw;
Dan Williams7405f742007-01-02 11:10:43 -0700426 dma_cookie_t cookie;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700427 LIST_HEAD(new_chain);
428 u32 copy;
429 size_t len;
430 dma_addr_t src, dst;
Dan Williams636bdea2008-04-17 20:17:26 -0700431 unsigned long orig_flags;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700432 unsigned int desc_count = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700433
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700434 /* src and dest and len are stored in the initial descriptor */
435 len = first->len;
436 src = first->src;
437 dst = first->dst;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700438 orig_flags = first->txd.flags;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700439 new = first;
440
Dan Williams7405f742007-01-02 11:10:43 -0700441 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700442 prev = to_ioat_desc(ioat_chan->used_desc.prev);
443 prefetch(prev->hw);
444 do {
Shannon Nelson711924b2007-12-17 16:20:08 -0800445 copy = min_t(size_t, len, ioat_chan->xfercap);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700446
Dan Williamsbc3c7022009-07-28 14:33:42 -0700447 async_tx_ack(&new->txd);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700448
449 hw = new->hw;
450 hw->size = copy;
451 hw->ctl = 0;
452 hw->src_addr = src;
453 hw->dst_addr = dst;
454 hw->next = 0;
455
456 /* chain together the physical address list for the HW */
457 wmb();
Dan Williamsbc3c7022009-07-28 14:33:42 -0700458 prev->hw->next = (u64) new->txd.phys;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700459
460 len -= copy;
461 dst += copy;
462 src += copy;
463
464 list_add_tail(&new->node, &new_chain);
465 desc_count++;
466 prev = new;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800467 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700468
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700469 if (!new) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700470 dev_err(to_dev(ioat_chan), "tx submit failed\n");
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700471 spin_unlock_bh(&ioat_chan->desc_lock);
472 return -ENOMEM;
473 }
474
Dan Williamsc7984f42009-07-28 14:44:04 -0700475 hw->ctl_f.compl_write = 1;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700476 if (first->txd.callback) {
Dan Williamsc7984f42009-07-28 14:44:04 -0700477 hw->ctl_f.int_en = 1;
Shannon Nelson95218432007-10-18 03:07:15 -0700478 if (first != new) {
479 /* move callback into to last desc */
Dan Williamsbc3c7022009-07-28 14:33:42 -0700480 new->txd.callback = first->txd.callback;
481 new->txd.callback_param
482 = first->txd.callback_param;
483 first->txd.callback = NULL;
484 first->txd.callback_param = NULL;
Shannon Nelson95218432007-10-18 03:07:15 -0700485 }
486 }
487
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700488 new->tx_cnt = desc_count;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700489 new->txd.flags = orig_flags; /* client is in control of this ack */
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700490
491 /* store the original values for use in later cleanup */
492 if (new != first) {
493 new->src = first->src;
494 new->dst = first->dst;
495 new->len = first->len;
496 }
497
Dan Williams7405f742007-01-02 11:10:43 -0700498 /* cookie incr and addition to used_list must be atomic */
499 cookie = ioat_chan->common.cookie;
500 cookie++;
501 if (cookie < 0)
502 cookie = 1;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700503 ioat_chan->common.cookie = new->txd.cookie = cookie;
Dan Williams7405f742007-01-02 11:10:43 -0700504
505 /* write address into NextDescriptor field of last desc in chain */
506 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
Dan Williamsbc3c7022009-07-28 14:33:42 -0700507 first->txd.phys;
Luis R. Rodriguez7d283ae2008-08-06 15:21:26 -0700508 list_splice_tail(&new_chain, &ioat_chan->used_desc);
Dan Williams7405f742007-01-02 11:10:43 -0700509
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800510 ioat_chan->dmacount += desc_count;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700511 ioat_chan->pending += desc_count;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800512 if (ioat_chan->pending >= ioat_pending_level)
513 __ioat1_dma_memcpy_issue_pending(ioat_chan);
Dan Williams7405f742007-01-02 11:10:43 -0700514 spin_unlock_bh(&ioat_chan->desc_lock);
515
Dan Williams7405f742007-01-02 11:10:43 -0700516 return cookie;
517}
518
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800519static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
520{
521 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
522 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
523 struct ioat_desc_sw *new;
524 struct ioat_dma_descriptor *hw;
525 dma_cookie_t cookie;
526 u32 copy;
527 size_t len;
528 dma_addr_t src, dst;
Dan Williams636bdea2008-04-17 20:17:26 -0700529 unsigned long orig_flags;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800530 unsigned int desc_count = 0;
531
532 /* src and dest and len are stored in the initial descriptor */
533 len = first->len;
534 src = first->src;
535 dst = first->dst;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700536 orig_flags = first->txd.flags;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800537 new = first;
538
Shannon Nelson711924b2007-12-17 16:20:08 -0800539 /*
540 * ioat_chan->desc_lock is still in force in version 2 path
541 * it gets unlocked at end of this function
542 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800543 do {
Shannon Nelson711924b2007-12-17 16:20:08 -0800544 copy = min_t(size_t, len, ioat_chan->xfercap);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800545
Dan Williamsbc3c7022009-07-28 14:33:42 -0700546 async_tx_ack(&new->txd);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800547
548 hw = new->hw;
549 hw->size = copy;
550 hw->ctl = 0;
551 hw->src_addr = src;
552 hw->dst_addr = dst;
553
554 len -= copy;
555 dst += copy;
556 src += copy;
557 desc_count++;
558 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
559
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700560 if (!new) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700561 dev_err(to_dev(ioat_chan), "tx submit failed\n");
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700562 spin_unlock_bh(&ioat_chan->desc_lock);
563 return -ENOMEM;
564 }
565
Dan Williamsc7984f42009-07-28 14:44:04 -0700566 hw->ctl_f.compl_write = 1;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700567 if (first->txd.callback) {
Dan Williamsc7984f42009-07-28 14:44:04 -0700568 hw->ctl_f.int_en = 1;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800569 if (first != new) {
570 /* move callback into to last desc */
Dan Williamsbc3c7022009-07-28 14:33:42 -0700571 new->txd.callback = first->txd.callback;
572 new->txd.callback_param
573 = first->txd.callback_param;
574 first->txd.callback = NULL;
575 first->txd.callback_param = NULL;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800576 }
577 }
578
579 new->tx_cnt = desc_count;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700580 new->txd.flags = orig_flags; /* client is in control of this ack */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800581
582 /* store the original values for use in later cleanup */
583 if (new != first) {
584 new->src = first->src;
585 new->dst = first->dst;
586 new->len = first->len;
587 }
588
589 /* cookie incr and addition to used_list must be atomic */
590 cookie = ioat_chan->common.cookie;
591 cookie++;
592 if (cookie < 0)
593 cookie = 1;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700594 ioat_chan->common.cookie = new->txd.cookie = cookie;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800595
596 ioat_chan->dmacount += desc_count;
597 ioat_chan->pending += desc_count;
598 if (ioat_chan->pending >= ioat_pending_level)
599 __ioat2_dma_memcpy_issue_pending(ioat_chan);
600 spin_unlock_bh(&ioat_chan->desc_lock);
601
602 return cookie;
603}
604
605/**
606 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
607 * @ioat_chan: the channel supplying the memory pool for the descriptors
608 * @flags: allocation flags
609 */
Dan Williamsbc3c7022009-07-28 14:33:42 -0700610static struct ioat_desc_sw *
611ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700612{
613 struct ioat_dma_descriptor *desc;
614 struct ioat_desc_sw *desc_sw;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700615 struct ioatdma_device *ioatdma_device;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700616 dma_addr_t phys;
617
Shannon Nelson8ab89562007-10-16 01:27:39 -0700618 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
619 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700620 if (unlikely(!desc))
621 return NULL;
622
623 desc_sw = kzalloc(sizeof(*desc_sw), flags);
624 if (unlikely(!desc_sw)) {
Shannon Nelson8ab89562007-10-16 01:27:39 -0700625 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700626 return NULL;
627 }
628
629 memset(desc, 0, sizeof(*desc));
Dan Williamsbc3c7022009-07-28 14:33:42 -0700630 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800631 switch (ioat_chan->device->version) {
632 case IOAT_VER_1_2:
Dan Williamsbc3c7022009-07-28 14:33:42 -0700633 desc_sw->txd.tx_submit = ioat1_tx_submit;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800634 break;
635 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700636 case IOAT_VER_3_0:
Dan Williamsbc3c7022009-07-28 14:33:42 -0700637 desc_sw->txd.tx_submit = ioat2_tx_submit;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800638 break;
639 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800640
Chris Leech0bbd5f42006-05-23 17:35:34 -0700641 desc_sw->hw = desc;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700642 desc_sw->txd.phys = phys;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700643
644 return desc_sw;
645}
646
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800647static int ioat_initial_desc_count = 256;
648module_param(ioat_initial_desc_count, int, 0644);
649MODULE_PARM_DESC(ioat_initial_desc_count,
650 "initial descriptors per channel (default: 256)");
651
652/**
653 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
654 * @ioat_chan: the channel to be massaged
655 */
656static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
657{
658 struct ioat_desc_sw *desc, *_desc;
659
660 /* setup used_desc */
661 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
662 ioat_chan->used_desc.prev = NULL;
663
664 /* pull free_desc out of the circle so that every node is a hw
665 * descriptor, but leave it pointing to the list
666 */
667 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
668 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
669
670 /* circle link the hw descriptors */
671 desc = to_ioat_desc(ioat_chan->free_desc.next);
Dan Williamsbc3c7022009-07-28 14:33:42 -0700672 desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800673 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700674 desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800675 }
676}
677
678/**
679 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
680 * @chan: the channel to be filled out
681 */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700682static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700683{
684 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson711924b2007-12-17 16:20:08 -0800685 struct ioat_desc_sw *desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700686 u16 chanctrl;
687 u32 chanerr;
688 int i;
689 LIST_HEAD(tmp_list);
690
Shannon Nelsone4223972007-08-24 23:02:53 -0700691 /* have we already been set up? */
692 if (!list_empty(&ioat_chan->free_desc))
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800693 return ioat_chan->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700694
Shannon Nelson43d6e362007-10-16 01:27:39 -0700695 /* Setup register to interrupt and write completion status on error */
Shannon Nelsone4223972007-08-24 23:02:53 -0700696 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
Chris Leech0bbd5f42006-05-23 17:35:34 -0700697 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
698 IOAT_CHANCTRL_ERR_COMPLETION_EN;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700699 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700700
Chris Leeche3828812007-03-08 09:57:35 -0800701 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700702 if (chanerr) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700703 dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr);
Chris Leeche3828812007-03-08 09:57:35 -0800704 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700705 }
706
707 /* Allocate descriptors */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800708 for (i = 0; i < ioat_initial_desc_count; i++) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700709 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
710 if (!desc) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700711 dev_err(to_dev(ioat_chan),
Shannon Nelson5149fd02007-10-18 03:07:13 -0700712 "Only %d initial descriptors\n", i);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700713 break;
714 }
715 list_add_tail(&desc->node, &tmp_list);
716 }
717 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800718 ioat_chan->desccount = i;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700719 list_splice(&tmp_list, &ioat_chan->free_desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800720 if (ioat_chan->device->version != IOAT_VER_1_2)
721 ioat2_dma_massage_chan_desc(ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700722 spin_unlock_bh(&ioat_chan->desc_lock);
723
724 /* allocate a completion writeback area */
725 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
726 ioat_chan->completion_virt =
727 pci_pool_alloc(ioat_chan->device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700728 GFP_KERNEL,
729 &ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700730 memset(ioat_chan->completion_virt, 0,
731 sizeof(*ioat_chan->completion_virt));
Chris Leeche3828812007-03-08 09:57:35 -0800732 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
733 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
734 writel(((u64) ioat_chan->completion_addr) >> 32,
735 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700736
Shannon Nelson3e037452007-10-16 01:27:40 -0700737 tasklet_enable(&ioat_chan->cleanup_task);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800738 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
739 return ioat_chan->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700740}
741
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800742/**
743 * ioat_dma_free_chan_resources - release all the descriptors
744 * @chan: the channel to be cleaned
745 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700746static void ioat_dma_free_chan_resources(struct dma_chan *chan)
747{
748 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700749 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700750 struct ioat_desc_sw *desc, *_desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700751 int in_use_descs = 0;
752
Maciej Sosnowskic3d4f442008-11-07 01:45:52 +0000753 /* Before freeing channel resources first check
754 * if they have been previously allocated for this channel.
755 */
756 if (ioat_chan->desccount == 0)
757 return;
758
Shannon Nelson3e037452007-10-16 01:27:40 -0700759 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700760 ioat_dma_memcpy_cleanup(ioat_chan);
761
Shannon Nelson3e037452007-10-16 01:27:40 -0700762 /* Delay 100ms after reset to allow internal DMA logic to quiesce
763 * before removing DMA descriptor resources.
764 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800765 writeb(IOAT_CHANCMD_RESET,
766 ioat_chan->reg_base
767 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
Shannon Nelson3e037452007-10-16 01:27:40 -0700768 mdelay(100);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700769
770 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800771 switch (ioat_chan->device->version) {
772 case IOAT_VER_1_2:
773 list_for_each_entry_safe(desc, _desc,
774 &ioat_chan->used_desc, node) {
775 in_use_descs++;
776 list_del(&desc->node);
777 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williamsbc3c7022009-07-28 14:33:42 -0700778 desc->txd.phys);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800779 kfree(desc);
780 }
781 list_for_each_entry_safe(desc, _desc,
782 &ioat_chan->free_desc, node) {
783 list_del(&desc->node);
784 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williamsbc3c7022009-07-28 14:33:42 -0700785 desc->txd.phys);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800786 kfree(desc);
787 }
788 break;
789 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700790 case IOAT_VER_3_0:
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800791 list_for_each_entry_safe(desc, _desc,
792 ioat_chan->free_desc.next, node) {
793 list_del(&desc->node);
794 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williamsbc3c7022009-07-28 14:33:42 -0700795 desc->txd.phys);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800796 kfree(desc);
797 }
798 desc = to_ioat_desc(ioat_chan->free_desc.next);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700799 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williamsbc3c7022009-07-28 14:33:42 -0700800 desc->txd.phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700801 kfree(desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800802 INIT_LIST_HEAD(&ioat_chan->free_desc);
803 INIT_LIST_HEAD(&ioat_chan->used_desc);
804 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700805 }
806 spin_unlock_bh(&ioat_chan->desc_lock);
807
Shannon Nelson8ab89562007-10-16 01:27:39 -0700808 pci_pool_free(ioatdma_device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700809 ioat_chan->completion_virt,
810 ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700811
812 /* one is ok since we left it on there on purpose */
813 if (in_use_descs > 1)
Dan Williamsbc3c7022009-07-28 14:33:42 -0700814 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
Chris Leech0bbd5f42006-05-23 17:35:34 -0700815 in_use_descs - 1);
816
817 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700818 ioat_chan->pending = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800819 ioat_chan->dmacount = 0;
Maciej Sosnowskic3d4f442008-11-07 01:45:52 +0000820 ioat_chan->desccount = 0;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700821 ioat_chan->watchdog_completion = 0;
822 ioat_chan->last_compl_desc_addr_hw = 0;
823 ioat_chan->watchdog_tcp_cookie =
824 ioat_chan->watchdog_last_tcp_cookie = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700825}
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700826
Shannon Nelson3e037452007-10-16 01:27:40 -0700827/**
828 * ioat_dma_get_next_descriptor - return the next available descriptor
829 * @ioat_chan: IOAT DMA channel handle
830 *
831 * Gets the next descriptor from the chain, and must be called with the
832 * channel's desc_lock held. Allocates more descriptors if the channel
833 * has run out.
834 */
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700835static struct ioat_desc_sw *
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800836ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
Shannon Nelson3e037452007-10-16 01:27:40 -0700837{
Shannon Nelson711924b2007-12-17 16:20:08 -0800838 struct ioat_desc_sw *new;
Shannon Nelson3e037452007-10-16 01:27:40 -0700839
840 if (!list_empty(&ioat_chan->free_desc)) {
841 new = to_ioat_desc(ioat_chan->free_desc.next);
842 list_del(&new->node);
843 } else {
844 /* try to get another desc */
845 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
Shannon Nelson711924b2007-12-17 16:20:08 -0800846 if (!new) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700847 dev_err(to_dev(ioat_chan), "alloc failed\n");
Shannon Nelson711924b2007-12-17 16:20:08 -0800848 return NULL;
849 }
Shannon Nelson3e037452007-10-16 01:27:40 -0700850 }
851
852 prefetch(new->hw);
853 return new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700854}
855
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800856static struct ioat_desc_sw *
857ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
858{
Shannon Nelson711924b2007-12-17 16:20:08 -0800859 struct ioat_desc_sw *new;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800860
861 /*
862 * used.prev points to where to start processing
863 * used.next points to next free descriptor
864 * if used.prev == NULL, there are none waiting to be processed
865 * if used.next == used.prev.prev, there is only one free descriptor,
866 * and we need to use it to as a noop descriptor before
867 * linking in a new set of descriptors, since the device
868 * has probably already read the pointer to it
869 */
870 if (ioat_chan->used_desc.prev &&
871 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
872
Shannon Nelson711924b2007-12-17 16:20:08 -0800873 struct ioat_desc_sw *desc;
874 struct ioat_desc_sw *noop_desc;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800875 int i;
876
877 /* set up the noop descriptor */
878 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700879 /* set size to non-zero value (channel returns error when size is 0) */
880 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
Dan Williamsc7984f42009-07-28 14:44:04 -0700881 noop_desc->hw->ctl = 0;
882 noop_desc->hw->ctl_f.null = 1;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800883 noop_desc->hw->src_addr = 0;
884 noop_desc->hw->dst_addr = 0;
885
886 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
887 ioat_chan->pending++;
888 ioat_chan->dmacount++;
889
Shannon Nelson711924b2007-12-17 16:20:08 -0800890 /* try to get a few more descriptors */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800891 for (i = 16; i; i--) {
892 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
Shannon Nelson711924b2007-12-17 16:20:08 -0800893 if (!desc) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700894 dev_err(to_dev(ioat_chan), "alloc failed\n");
Shannon Nelson711924b2007-12-17 16:20:08 -0800895 break;
896 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800897 list_add_tail(&desc->node, ioat_chan->used_desc.next);
898
899 desc->hw->next
Dan Williamsbc3c7022009-07-28 14:33:42 -0700900 = to_ioat_desc(desc->node.next)->txd.phys;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800901 to_ioat_desc(desc->node.prev)->hw->next
Dan Williamsbc3c7022009-07-28 14:33:42 -0700902 = desc->txd.phys;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800903 ioat_chan->desccount++;
904 }
905
906 ioat_chan->used_desc.next = noop_desc->node.next;
907 }
908 new = to_ioat_desc(ioat_chan->used_desc.next);
909 prefetch(new);
910 ioat_chan->used_desc.next = new->node.next;
911
912 if (ioat_chan->used_desc.prev == NULL)
913 ioat_chan->used_desc.prev = &new->node;
914
915 prefetch(new->hw);
916 return new;
917}
918
Dan Williamsbc3c7022009-07-28 14:33:42 -0700919static struct ioat_desc_sw *
920ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800921{
922 if (!ioat_chan)
923 return NULL;
924
925 switch (ioat_chan->device->version) {
926 case IOAT_VER_1_2:
927 return ioat1_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800928 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700929 case IOAT_VER_3_0:
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800930 return ioat2_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800931 }
932 return NULL;
933}
934
Dan Williamsbc3c7022009-07-28 14:33:42 -0700935static struct dma_async_tx_descriptor *
936ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
937 dma_addr_t dma_src, size_t len, unsigned long flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700938{
Dan Williams7405f742007-01-02 11:10:43 -0700939 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700940 struct ioat_desc_sw *new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700941
942 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700943 new = ioat_dma_get_next_descriptor(ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700944 spin_unlock_bh(&ioat_chan->desc_lock);
945
Shannon Nelson711924b2007-12-17 16:20:08 -0800946 if (new) {
947 new->len = len;
Dan Williams00367312008-02-02 19:49:57 -0700948 new->dst = dma_dest;
949 new->src = dma_src;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700950 new->txd.flags = flags;
951 return &new->txd;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700952 } else {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700953 dev_err(to_dev(ioat_chan),
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700954 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
955 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
Shannon Nelson711924b2007-12-17 16:20:08 -0800956 return NULL;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700957 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700958}
959
Dan Williamsbc3c7022009-07-28 14:33:42 -0700960static struct dma_async_tx_descriptor *
961ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
962 dma_addr_t dma_src, size_t len, unsigned long flags)
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800963{
964 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
965 struct ioat_desc_sw *new;
966
967 spin_lock_bh(&ioat_chan->desc_lock);
968 new = ioat2_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800969
Shannon Nelson711924b2007-12-17 16:20:08 -0800970 /*
971 * leave ioat_chan->desc_lock set in ioat 2 path
972 * it will get unlocked at end of tx_submit
973 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800974
Shannon Nelson711924b2007-12-17 16:20:08 -0800975 if (new) {
976 new->len = len;
Dan Williams00367312008-02-02 19:49:57 -0700977 new->dst = dma_dest;
978 new->src = dma_src;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700979 new->txd.flags = flags;
980 return &new->txd;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700981 } else {
982 spin_unlock_bh(&ioat_chan->desc_lock);
Dan Williamsbc3c7022009-07-28 14:33:42 -0700983 dev_err(to_dev(ioat_chan),
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700984 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
985 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
Shannon Nelson711924b2007-12-17 16:20:08 -0800986 return NULL;
Maciej Sosnowski09177e82008-07-22 10:07:33 -0700987 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700988}
989
Shannon Nelson3e037452007-10-16 01:27:40 -0700990static void ioat_dma_cleanup_tasklet(unsigned long data)
991{
992 struct ioat_dma_chan *chan = (void *)data;
993 ioat_dma_memcpy_cleanup(chan);
994 writew(IOAT_CHANCTRL_INT_DISABLE,
995 chan->reg_base + IOAT_CHANCTRL_OFFSET);
996}
997
Dan Williamse1d181e2008-07-04 00:13:40 -0700998static void
999ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1000{
Dan Williamsbc3c7022009-07-28 14:33:42 -07001001 if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1002 if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
Maciej Sosnowski4f005db2009-04-23 12:31:51 +02001003 pci_unmap_single(ioat_chan->device->pdev,
1004 pci_unmap_addr(desc, dst),
1005 pci_unmap_len(desc, len),
1006 PCI_DMA_FROMDEVICE);
1007 else
1008 pci_unmap_page(ioat_chan->device->pdev,
1009 pci_unmap_addr(desc, dst),
1010 pci_unmap_len(desc, len),
1011 PCI_DMA_FROMDEVICE);
1012 }
Dan Williamse1d181e2008-07-04 00:13:40 -07001013
Dan Williamsbc3c7022009-07-28 14:33:42 -07001014 if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1015 if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
Maciej Sosnowski4f005db2009-04-23 12:31:51 +02001016 pci_unmap_single(ioat_chan->device->pdev,
1017 pci_unmap_addr(desc, src),
1018 pci_unmap_len(desc, len),
1019 PCI_DMA_TODEVICE);
1020 else
1021 pci_unmap_page(ioat_chan->device->pdev,
1022 pci_unmap_addr(desc, src),
1023 pci_unmap_len(desc, len),
1024 PCI_DMA_TODEVICE);
1025 }
Dan Williamse1d181e2008-07-04 00:13:40 -07001026}
1027
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001028/**
1029 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
1030 * @chan: ioat channel to be cleaned up
1031 */
Shannon Nelson43d6e362007-10-16 01:27:39 -07001032static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001033{
1034 unsigned long phys_complete;
1035 struct ioat_desc_sw *desc, *_desc;
1036 dma_cookie_t cookie = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001037 unsigned long desc_phys;
1038 struct ioat_desc_sw *latest_desc;
Dan Williamsbc3c7022009-07-28 14:33:42 -07001039 struct dma_async_tx_descriptor *tx;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001040
Shannon Nelson43d6e362007-10-16 01:27:39 -07001041 prefetch(ioat_chan->completion_virt);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001042
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001043 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
Chris Leech0bbd5f42006-05-23 17:35:34 -07001044 return;
1045
1046 /* The completion writeback can happen at any time,
1047 so reads by the driver need to be atomic operations
1048 The descriptor physical addresses are limited to 32-bits
1049 when the CPU can only do a 32-bit mov */
1050
1051#if (BITS_PER_LONG == 64)
1052 phys_complete =
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001053 ioat_chan->completion_virt->full
1054 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001055#else
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001056 phys_complete =
1057 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001058#endif
1059
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001060 if ((ioat_chan->completion_virt->full
1061 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
Shannon Nelson43d6e362007-10-16 01:27:39 -07001062 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
Dan Williamsbc3c7022009-07-28 14:33:42 -07001063 dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n",
Shannon Nelson43d6e362007-10-16 01:27:39 -07001064 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
Chris Leech0bbd5f42006-05-23 17:35:34 -07001065
1066 /* TODO do something to salvage the situation */
1067 }
1068
Shannon Nelson43d6e362007-10-16 01:27:39 -07001069 if (phys_complete == ioat_chan->last_completion) {
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001070 spin_unlock_bh(&ioat_chan->cleanup_lock);
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001071 /*
1072 * perhaps we're stuck so hard that the watchdog can't go off?
1073 * try to catch it after 2 seconds
1074 */
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001075 if (ioat_chan->device->version != IOAT_VER_3_0) {
1076 if (time_after(jiffies,
1077 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1078 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1079 ioat_chan->last_completion_time = jiffies;
1080 }
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001081 }
1082 return;
1083 }
1084 ioat_chan->last_completion_time = jiffies;
1085
1086 cookie = 0;
1087 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1088 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001089 return;
1090 }
1091
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001092 switch (ioat_chan->device->version) {
1093 case IOAT_VER_1_2:
1094 list_for_each_entry_safe(desc, _desc,
1095 &ioat_chan->used_desc, node) {
Dan Williamsbc3c7022009-07-28 14:33:42 -07001096 tx = &desc->txd;
Shannon Nelson43d6e362007-10-16 01:27:39 -07001097 /*
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001098 * Incoming DMA requests may use multiple descriptors,
1099 * due to exceeding xfercap, perhaps. If so, only the
1100 * last one will have a cookie, and require unmapping.
Shannon Nelson43d6e362007-10-16 01:27:39 -07001101 */
Dan Williamsbc3c7022009-07-28 14:33:42 -07001102 if (tx->cookie) {
1103 cookie = tx->cookie;
Dan Williamse1d181e2008-07-04 00:13:40 -07001104 ioat_dma_unmap(ioat_chan, desc);
Dan Williamsbc3c7022009-07-28 14:33:42 -07001105 if (tx->callback) {
1106 tx->callback(tx->callback_param);
1107 tx->callback = NULL;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001108 }
1109 }
1110
Dan Williamsbc3c7022009-07-28 14:33:42 -07001111 if (tx->phys != phys_complete) {
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001112 /*
1113 * a completed entry, but not the last, so clean
1114 * up if the client is done with the descriptor
1115 */
Dan Williamsbc3c7022009-07-28 14:33:42 -07001116 if (async_tx_test_ack(tx)) {
Eric Sesterhennaa2d0b82009-02-26 11:05:30 +01001117 list_move_tail(&desc->node,
1118 &ioat_chan->free_desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001119 } else
Dan Williamsbc3c7022009-07-28 14:33:42 -07001120 tx->cookie = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001121 } else {
1122 /*
1123 * last used desc. Do not remove, so we can
1124 * append from it, but don't look at it next
1125 * time, either
1126 */
Dan Williamsbc3c7022009-07-28 14:33:42 -07001127 tx->cookie = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001128
1129 /* TODO check status bits? */
1130 break;
Shannon Nelson95218432007-10-18 03:07:15 -07001131 }
Chris Leech0bbd5f42006-05-23 17:35:34 -07001132 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001133 break;
1134 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001135 case IOAT_VER_3_0:
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001136 /* has some other thread has already cleaned up? */
1137 if (ioat_chan->used_desc.prev == NULL)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001138 break;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001139
1140 /* work backwards to find latest finished desc */
1141 desc = to_ioat_desc(ioat_chan->used_desc.next);
Dan Williamsbc3c7022009-07-28 14:33:42 -07001142 tx = &desc->txd;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001143 latest_desc = NULL;
1144 do {
1145 desc = to_ioat_desc(desc->node.prev);
Dan Williamsbc3c7022009-07-28 14:33:42 -07001146 desc_phys = (unsigned long)tx->phys
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001147 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1148 if (desc_phys == phys_complete) {
1149 latest_desc = desc;
1150 break;
1151 }
1152 } while (&desc->node != ioat_chan->used_desc.prev);
1153
1154 if (latest_desc != NULL) {
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001155 /* work forwards to clear finished descriptors */
1156 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
1157 &desc->node != latest_desc->node.next &&
1158 &desc->node != ioat_chan->used_desc.next;
1159 desc = to_ioat_desc(desc->node.next)) {
Dan Williamsbc3c7022009-07-28 14:33:42 -07001160 if (tx->cookie) {
1161 cookie = tx->cookie;
1162 tx->cookie = 0;
Dan Williamse1d181e2008-07-04 00:13:40 -07001163 ioat_dma_unmap(ioat_chan, desc);
Dan Williamsbc3c7022009-07-28 14:33:42 -07001164 if (tx->callback) {
1165 tx->callback(tx->callback_param);
1166 tx->callback = NULL;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001167 }
1168 }
1169 }
1170
1171 /* move used.prev up beyond those that are finished */
1172 if (&desc->node == ioat_chan->used_desc.next)
1173 ioat_chan->used_desc.prev = NULL;
1174 else
1175 ioat_chan->used_desc.prev = &desc->node;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001176 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001177 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001178 }
1179
Shannon Nelson43d6e362007-10-16 01:27:39 -07001180 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001181
Shannon Nelson43d6e362007-10-16 01:27:39 -07001182 ioat_chan->last_completion = phys_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001183 if (cookie != 0)
Shannon Nelson43d6e362007-10-16 01:27:39 -07001184 ioat_chan->completed_cookie = cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001185
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001186 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001187}
1188
1189/**
1190 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
1191 * @chan: IOAT DMA channel handle
1192 * @cookie: DMA transaction identifier
Randy Dunlap65088712006-07-03 19:45:31 -07001193 * @done: if not %NULL, updated with last completed transaction
1194 * @used: if not %NULL, updated with last used transaction
Chris Leech0bbd5f42006-05-23 17:35:34 -07001195 */
Dan Williamsbc3c7022009-07-28 14:33:42 -07001196static enum dma_status
1197ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie,
1198 dma_cookie_t *done, dma_cookie_t *used)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001199{
1200 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1201 dma_cookie_t last_used;
1202 dma_cookie_t last_complete;
1203 enum dma_status ret;
1204
1205 last_used = chan->cookie;
1206 last_complete = ioat_chan->completed_cookie;
Maciej Sosnowski09177e82008-07-22 10:07:33 -07001207 ioat_chan->watchdog_tcp_cookie = cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001208
1209 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -07001210 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001211 if (used)
1212 *used = last_used;
1213
1214 ret = dma_async_is_complete(cookie, last_complete, last_used);
1215 if (ret == DMA_SUCCESS)
1216 return ret;
1217
1218 ioat_dma_memcpy_cleanup(ioat_chan);
1219
1220 last_used = chan->cookie;
1221 last_complete = ioat_chan->completed_cookie;
1222
1223 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -07001224 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001225 if (used)
1226 *used = last_used;
1227
1228 return dma_async_is_complete(cookie, last_complete, last_used);
1229}
1230
Shannon Nelson43d6e362007-10-16 01:27:39 -07001231static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001232{
1233 struct ioat_desc_sw *desc;
Dan Williamsc7984f42009-07-28 14:44:04 -07001234 struct ioat_dma_descriptor *hw;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001235
1236 spin_lock_bh(&ioat_chan->desc_lock);
1237
Shannon Nelson3e037452007-10-16 01:27:40 -07001238 desc = ioat_dma_get_next_descriptor(ioat_chan);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001239
1240 if (!desc) {
Dan Williamsbc3c7022009-07-28 14:33:42 -07001241 dev_err(to_dev(ioat_chan),
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001242 "Unable to start null desc - get next desc failed\n");
1243 spin_unlock_bh(&ioat_chan->desc_lock);
1244 return;
1245 }
1246
Dan Williamsc7984f42009-07-28 14:44:04 -07001247 hw = desc->hw;
1248 hw->ctl = 0;
1249 hw->ctl_f.null = 1;
1250 hw->ctl_f.int_en = 1;
1251 hw->ctl_f.compl_write = 1;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001252 /* set size to non-zero value (channel returns error when size is 0) */
Dan Williamsc7984f42009-07-28 14:44:04 -07001253 hw->size = NULL_DESC_BUFFER_SIZE;
1254 hw->src_addr = 0;
1255 hw->dst_addr = 0;
Dan Williamsbc3c7022009-07-28 14:33:42 -07001256 async_tx_ack(&desc->txd);
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001257 switch (ioat_chan->device->version) {
1258 case IOAT_VER_1_2:
Dan Williamsc7984f42009-07-28 14:44:04 -07001259 hw->next = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001260 list_add_tail(&desc->node, &ioat_chan->used_desc);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001261
Dan Williamsbc3c7022009-07-28 14:33:42 -07001262 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001263 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
Dan Williamsbc3c7022009-07-28 14:33:42 -07001264 writel(((u64) desc->txd.phys) >> 32,
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001265 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
1266
1267 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
1268 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1269 break;
1270 case IOAT_VER_2_0:
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -07001271 case IOAT_VER_3_0:
Dan Williamsbc3c7022009-07-28 14:33:42 -07001272 writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF,
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001273 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
Dan Williamsbc3c7022009-07-28 14:33:42 -07001274 writel(((u64) desc->txd.phys) >> 32,
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001275 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1276
1277 ioat_chan->dmacount++;
1278 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1279 break;
1280 }
Chris Leech0bbd5f42006-05-23 17:35:34 -07001281 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001282}
1283
1284/*
1285 * Perform a IOAT transaction to verify the HW works.
1286 */
1287#define IOAT_TEST_SIZE 2000
1288
Shannon Nelson95218432007-10-18 03:07:15 -07001289static void ioat_dma_test_callback(void *dma_async_param)
1290{
Dan Williamsb9bdcbb2009-01-06 11:38:22 -07001291 struct completion *cmp = dma_async_param;
1292
1293 complete(cmp);
Shannon Nelson95218432007-10-18 03:07:15 -07001294}
1295
Shannon Nelson3e037452007-10-16 01:27:40 -07001296/**
1297 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1298 * @device: device to be tested
1299 */
1300static int ioat_dma_self_test(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001301{
1302 int i;
1303 u8 *src;
1304 u8 *dest;
Dan Williamsbc3c7022009-07-28 14:33:42 -07001305 struct dma_device *dma = &device->common;
1306 struct device *dev = &device->pdev->dev;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001307 struct dma_chan *dma_chan;
Shannon Nelson711924b2007-12-17 16:20:08 -08001308 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -07001309 dma_addr_t dma_dest, dma_src;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001310 dma_cookie_t cookie;
1311 int err = 0;
Dan Williamsb9bdcbb2009-01-06 11:38:22 -07001312 struct completion cmp;
Dan Williams0c33e1c2009-03-02 13:31:35 -07001313 unsigned long tmo;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +02001314 unsigned long flags;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001315
Christoph Lametere94b1762006-12-06 20:33:17 -08001316 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001317 if (!src)
1318 return -ENOMEM;
Christoph Lametere94b1762006-12-06 20:33:17 -08001319 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001320 if (!dest) {
1321 kfree(src);
1322 return -ENOMEM;
1323 }
1324
1325 /* Fill in src buffer */
1326 for (i = 0; i < IOAT_TEST_SIZE; i++)
1327 src[i] = (u8)i;
1328
1329 /* Start copy, using first DMA channel */
Dan Williamsbc3c7022009-07-28 14:33:42 -07001330 dma_chan = container_of(dma->channels.next, struct dma_chan,
Shannon Nelson43d6e362007-10-16 01:27:39 -07001331 device_node);
Dan Williamsbc3c7022009-07-28 14:33:42 -07001332 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
1333 dev_err(dev, "selftest cannot allocate chan resource\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001334 err = -ENODEV;
1335 goto out;
1336 }
1337
Dan Williamsbc3c7022009-07-28 14:33:42 -07001338 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
1339 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
Maciej Sosnowski4f005db2009-04-23 12:31:51 +02001340 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE;
Dan Williams00367312008-02-02 19:49:57 -07001341 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
Maciej Sosnowski4f005db2009-04-23 12:31:51 +02001342 IOAT_TEST_SIZE, flags);
Shannon Nelson5149fd02007-10-18 03:07:13 -07001343 if (!tx) {
Dan Williamsbc3c7022009-07-28 14:33:42 -07001344 dev_err(dev, "Self-test prep failed, disabling\n");
Shannon Nelson5149fd02007-10-18 03:07:13 -07001345 err = -ENODEV;
1346 goto free_resources;
1347 }
1348
Dan Williams7405f742007-01-02 11:10:43 -07001349 async_tx_ack(tx);
Dan Williamsb9bdcbb2009-01-06 11:38:22 -07001350 init_completion(&cmp);
Shannon Nelson95218432007-10-18 03:07:15 -07001351 tx->callback = ioat_dma_test_callback;
Dan Williamsb9bdcbb2009-01-06 11:38:22 -07001352 tx->callback_param = &cmp;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001353 cookie = tx->tx_submit(tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001354 if (cookie < 0) {
Dan Williamsbc3c7022009-07-28 14:33:42 -07001355 dev_err(dev, "Self-test setup failed, disabling\n");
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001356 err = -ENODEV;
1357 goto free_resources;
1358 }
Dan Williamsbc3c7022009-07-28 14:33:42 -07001359 dma->device_issue_pending(dma_chan);
Dan Williams532d3b12008-12-03 17:16:55 -07001360
Dan Williams0c33e1c2009-03-02 13:31:35 -07001361 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
Chris Leech0bbd5f42006-05-23 17:35:34 -07001362
Dan Williams0c33e1c2009-03-02 13:31:35 -07001363 if (tmo == 0 ||
Dan Williamsbc3c7022009-07-28 14:33:42 -07001364 dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL)
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001365 != DMA_SUCCESS) {
Dan Williamsbc3c7022009-07-28 14:33:42 -07001366 dev_err(dev, "Self-test copy timed out, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001367 err = -ENODEV;
1368 goto free_resources;
1369 }
1370 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
Dan Williamsbc3c7022009-07-28 14:33:42 -07001371 dev_err(dev, "Self-test copy failed compare, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001372 err = -ENODEV;
1373 goto free_resources;
1374 }
1375
1376free_resources:
Dan Williamsbc3c7022009-07-28 14:33:42 -07001377 dma->device_free_chan_resources(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001378out:
1379 kfree(src);
1380 kfree(dest);
1381 return err;
1382}
1383
Shannon Nelson3e037452007-10-16 01:27:40 -07001384static char ioat_interrupt_style[32] = "msix";
1385module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1386 sizeof(ioat_interrupt_style), 0644);
1387MODULE_PARM_DESC(ioat_interrupt_style,
1388 "set ioat interrupt style: msix (default), "
1389 "msix-single-vector, msi, intx)");
1390
1391/**
1392 * ioat_dma_setup_interrupts - setup interrupt handler
1393 * @device: ioat device
1394 */
1395static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1396{
1397 struct ioat_dma_chan *ioat_chan;
Dan Williamse6c0b692009-09-08 17:29:44 -07001398 struct pci_dev *pdev = device->pdev;
1399 struct device *dev = &pdev->dev;
1400 struct msix_entry *msix;
1401 int i, j, msixcnt;
1402 int err = -EINVAL;
Shannon Nelson3e037452007-10-16 01:27:40 -07001403 u8 intrctrl = 0;
1404
1405 if (!strcmp(ioat_interrupt_style, "msix"))
1406 goto msix;
1407 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1408 goto msix_single_vector;
1409 if (!strcmp(ioat_interrupt_style, "msi"))
1410 goto msi;
1411 if (!strcmp(ioat_interrupt_style, "intx"))
1412 goto intx;
Dan Williamse6c0b692009-09-08 17:29:44 -07001413 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
Shannon Nelson5149fd02007-10-18 03:07:13 -07001414 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -07001415
1416msix:
1417 /* The number of MSI-X vectors should equal the number of channels */
1418 msixcnt = device->common.chancnt;
1419 for (i = 0; i < msixcnt; i++)
1420 device->msix_entries[i].entry = i;
1421
Dan Williamse6c0b692009-09-08 17:29:44 -07001422 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
Shannon Nelson3e037452007-10-16 01:27:40 -07001423 if (err < 0)
1424 goto msi;
1425 if (err > 0)
1426 goto msix_single_vector;
1427
1428 for (i = 0; i < msixcnt; i++) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001429 msix = &device->msix_entries[i];
Dan Williamsbc3c7022009-07-28 14:33:42 -07001430 ioat_chan = ioat_chan_by_index(device, i);
Dan Williamse6c0b692009-09-08 17:29:44 -07001431 err = devm_request_irq(dev, msix->vector,
1432 ioat_dma_do_interrupt_msix, 0,
1433 "ioat-msix", ioat_chan);
Shannon Nelson3e037452007-10-16 01:27:40 -07001434 if (err) {
1435 for (j = 0; j < i; j++) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001436 msix = &device->msix_entries[j];
Dan Williamsbc3c7022009-07-28 14:33:42 -07001437 ioat_chan = ioat_chan_by_index(device, j);
Dan Williamse6c0b692009-09-08 17:29:44 -07001438 devm_free_irq(dev, msix->vector, ioat_chan);
Shannon Nelson3e037452007-10-16 01:27:40 -07001439 }
1440 goto msix_single_vector;
1441 }
1442 }
1443 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
Shannon Nelson3e037452007-10-16 01:27:40 -07001444 goto done;
1445
1446msix_single_vector:
Dan Williamse6c0b692009-09-08 17:29:44 -07001447 msix = &device->msix_entries[0];
1448 msix->entry = 0;
1449 err = pci_enable_msix(pdev, device->msix_entries, 1);
Shannon Nelson3e037452007-10-16 01:27:40 -07001450 if (err)
1451 goto msi;
1452
Dan Williamse6c0b692009-09-08 17:29:44 -07001453 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
1454 "ioat-msix", device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001455 if (err) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001456 pci_disable_msix(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -07001457 goto msi;
1458 }
Shannon Nelson3e037452007-10-16 01:27:40 -07001459 goto done;
1460
1461msi:
Dan Williamse6c0b692009-09-08 17:29:44 -07001462 err = pci_enable_msi(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -07001463 if (err)
1464 goto intx;
1465
Dan Williamse6c0b692009-09-08 17:29:44 -07001466 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
1467 "ioat-msi", device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001468 if (err) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001469 pci_disable_msi(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -07001470 goto intx;
1471 }
Shannon Nelson3e037452007-10-16 01:27:40 -07001472 goto done;
1473
1474intx:
Dan Williamse6c0b692009-09-08 17:29:44 -07001475 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1476 IRQF_SHARED, "ioat-intx", device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001477 if (err)
1478 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -07001479
1480done:
Dan Williamsf2427e22009-07-28 14:42:38 -07001481 if (device->intr_quirk)
1482 device->intr_quirk(device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001483 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1484 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1485 return 0;
1486
1487err_no_irq:
1488 /* Disable all interrupt generation */
1489 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
Dan Williamse6c0b692009-09-08 17:29:44 -07001490 dev_err(dev, "no usable interrupts\n");
1491 return err;
Shannon Nelson3e037452007-10-16 01:27:40 -07001492}
1493
Dan Williamse6c0b692009-09-08 17:29:44 -07001494static void ioat_disable_interrupts(struct ioatdma_device *device)
Shannon Nelson3e037452007-10-16 01:27:40 -07001495{
Shannon Nelson3e037452007-10-16 01:27:40 -07001496 /* Disable all interrupt generation */
1497 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
Shannon Nelson3e037452007-10-16 01:27:40 -07001498}
1499
Dan Williamsf2427e22009-07-28 14:42:38 -07001500static int ioat_probe(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001501{
Dan Williamsf2427e22009-07-28 14:42:38 -07001502 int err = -ENODEV;
1503 struct dma_device *dma = &device->common;
1504 struct pci_dev *pdev = device->pdev;
Dan Williamse6c0b692009-09-08 17:29:44 -07001505 struct device *dev = &pdev->dev;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001506
1507 /* DMA coherent memory pool for DMA descriptor allocations */
1508 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
Shannon Nelson8ab89562007-10-16 01:27:39 -07001509 sizeof(struct ioat_dma_descriptor),
1510 64, 0);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001511 if (!device->dma_pool) {
1512 err = -ENOMEM;
1513 goto err_dma_pool;
1514 }
1515
Shannon Nelson43d6e362007-10-16 01:27:39 -07001516 device->completion_pool = pci_pool_create("completion_pool", pdev,
1517 sizeof(u64), SMP_CACHE_BYTES,
1518 SMP_CACHE_BYTES);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001519 if (!device->completion_pool) {
1520 err = -ENOMEM;
1521 goto err_completion_pool;
1522 }
1523
Shannon Nelson43d6e362007-10-16 01:27:39 -07001524 ioat_dma_enumerate_channels(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001525
Dan Williamsf2427e22009-07-28 14:42:38 -07001526 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
Dan Williamsbc3c7022009-07-28 14:33:42 -07001527 dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources;
1528 dma->device_free_chan_resources = ioat_dma_free_chan_resources;
Dan Williamsbc3c7022009-07-28 14:33:42 -07001529 dma->device_is_tx_complete = ioat_dma_is_complete;
Dan Williamsf2427e22009-07-28 14:42:38 -07001530 dma->dev = &pdev->dev;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001531
Dan Williamse6c0b692009-09-08 17:29:44 -07001532 dev_err(dev, "Intel(R) I/OAT DMA Engine found,"
Shannon Nelson5149fd02007-10-18 03:07:13 -07001533 " %d channels, device version 0x%02x, driver version %s\n",
Dan Williamsbc3c7022009-07-28 14:33:42 -07001534 dma->chancnt, device->version, IOAT_DMA_VERSION);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001535
Dan Williamsbc3c7022009-07-28 14:33:42 -07001536 if (!dma->chancnt) {
Dan Williamse6c0b692009-09-08 17:29:44 -07001537 dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: "
Maciej Sosnowski8b794b12009-02-26 11:04:54 +01001538 "zero channels detected\n");
1539 goto err_setup_interrupts;
1540 }
1541
Shannon Nelson3e037452007-10-16 01:27:40 -07001542 err = ioat_dma_setup_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001543 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -07001544 goto err_setup_interrupts;
Shannon Nelson8ab89562007-10-16 01:27:39 -07001545
Shannon Nelson3e037452007-10-16 01:27:40 -07001546 err = ioat_dma_self_test(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001547 if (err)
1548 goto err_self_test;
1549
Dan Williamsf2427e22009-07-28 14:42:38 -07001550 return 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001551
1552err_self_test:
Dan Williamse6c0b692009-09-08 17:29:44 -07001553 ioat_disable_interrupts(device);
Shannon Nelson3e037452007-10-16 01:27:40 -07001554err_setup_interrupts:
Chris Leech0bbd5f42006-05-23 17:35:34 -07001555 pci_pool_destroy(device->completion_pool);
1556err_completion_pool:
1557 pci_pool_destroy(device->dma_pool);
1558err_dma_pool:
Dan Williamsf2427e22009-07-28 14:42:38 -07001559 return err;
1560}
1561
1562static int ioat_register(struct ioatdma_device *device)
1563{
1564 int err = dma_async_device_register(&device->common);
1565
1566 if (err) {
1567 ioat_disable_interrupts(device);
1568 pci_pool_destroy(device->completion_pool);
1569 pci_pool_destroy(device->dma_pool);
1570 }
1571
1572 return err;
1573}
1574
1575/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1576static void ioat1_intr_quirk(struct ioatdma_device *device)
1577{
1578 struct pci_dev *pdev = device->pdev;
1579 u32 dmactrl;
1580
1581 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1582 if (pdev->msi_enabled)
1583 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1584 else
1585 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1586 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1587}
1588
1589int ioat1_dma_probe(struct ioatdma_device *device, int dca)
1590{
1591 struct pci_dev *pdev = device->pdev;
1592 struct dma_device *dma;
1593 int err;
1594
1595 device->intr_quirk = ioat1_intr_quirk;
1596 dma = &device->common;
1597 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1598 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1599
1600 err = ioat_probe(device);
1601 if (err)
1602 return err;
1603 ioat_set_tcp_copy_break(4096);
1604 err = ioat_register(device);
1605 if (err)
1606 return err;
1607 if (dca)
1608 device->dca = ioat_dca_init(pdev, device->reg_base);
1609
1610 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1611 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1612
1613 return err;
1614}
1615
1616int ioat2_dma_probe(struct ioatdma_device *device, int dca)
1617{
1618 struct pci_dev *pdev = device->pdev;
1619 struct dma_device *dma;
1620 struct dma_chan *chan;
1621 struct ioat_dma_chan *ioat_chan;
1622 int err;
1623
1624 dma = &device->common;
1625 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1626 dma->device_issue_pending = ioat2_dma_memcpy_issue_pending;
1627
1628 err = ioat_probe(device);
1629 if (err)
1630 return err;
1631 ioat_set_tcp_copy_break(2048);
1632
1633 list_for_each_entry(chan, &dma->channels, device_node) {
1634 ioat_chan = to_ioat_chan(chan);
1635 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
1636 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1637 }
1638
1639 err = ioat_register(device);
1640 if (err)
1641 return err;
1642 if (dca)
1643 device->dca = ioat2_dca_init(pdev, device->reg_base);
1644
1645 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1646 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
1647
1648 return err;
1649}
1650
1651int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1652{
1653 struct pci_dev *pdev = device->pdev;
1654 struct dma_device *dma;
1655 struct dma_chan *chan;
1656 struct ioat_dma_chan *ioat_chan;
1657 int err;
1658 u16 dev_id;
1659
1660 dma = &device->common;
1661 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1662 dma->device_issue_pending = ioat2_dma_memcpy_issue_pending;
1663
1664 /* -= IOAT ver.3 workarounds =- */
1665 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1666 * that can cause stability issues for IOAT ver.3
1667 */
1668 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1669
1670 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1671 * (workaround for spurious config parity error after restart)
1672 */
1673 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1674 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1675 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1676
1677 err = ioat_probe(device);
1678 if (err)
1679 return err;
1680 ioat_set_tcp_copy_break(262144);
1681
1682 list_for_each_entry(chan, &dma->channels, device_node) {
1683 ioat_chan = to_ioat_chan(chan);
1684 writel(IOAT_DMA_DCA_ANY_CPU,
1685 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
1686 }
1687
1688 err = ioat_register(device);
1689 if (err)
1690 return err;
1691 if (dca)
1692 device->dca = ioat3_dca_init(pdev, device->reg_base);
1693
1694 return err;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001695}
1696
Shannon Nelson8ab89562007-10-16 01:27:39 -07001697void ioat_dma_remove(struct ioatdma_device *device)
Dan Aloni428ed602007-03-08 09:57:36 -08001698{
Chris Leech0bbd5f42006-05-23 17:35:34 -07001699 struct dma_chan *chan, *_chan;
1700 struct ioat_dma_chan *ioat_chan;
Dan Williamsbc3c7022009-07-28 14:33:42 -07001701 struct dma_device *dma = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001702
Maciej Sosnowski2b8a6bf2009-02-26 11:05:07 +01001703 if (device->version != IOAT_VER_3_0)
1704 cancel_delayed_work(&device->work);
1705
Dan Williamse6c0b692009-09-08 17:29:44 -07001706 ioat_disable_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001707
Dan Williamsbc3c7022009-07-28 14:33:42 -07001708 dma_async_device_unregister(dma);
Shannon Nelsondfe22992007-10-18 03:07:13 -07001709
Chris Leech0bbd5f42006-05-23 17:35:34 -07001710 pci_pool_destroy(device->dma_pool);
1711 pci_pool_destroy(device->completion_pool);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001712
Dan Williamsbc3c7022009-07-28 14:33:42 -07001713 list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -07001714 ioat_chan = to_ioat_chan(chan);
1715 list_del(&chan->device_node);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001716 }
Chris Leech0bbd5f42006-05-23 17:35:34 -07001717}
1718