blob: 318e8a22d81423a4da8cfb759be3c9854148252e [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
Shannon Nelson43d6e362007-10-16 01:27:39 -070015 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070021 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070034#include <linux/dma-mapping.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -070035#include "ioatdma.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070036#include "ioatdma_registers.h"
37#include "ioatdma_hw.h"
38
39#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
Shannon Nelson8ab89562007-10-16 01:27:39 -070040#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
Chris Leech0bbd5f42006-05-23 17:35:34 -070041#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
Dan Williams7405f742007-01-02 11:10:43 -070042#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
Chris Leech0bbd5f42006-05-23 17:35:34 -070043
Shannon Nelson7bb67c12007-11-14 16:59:51 -080044static int ioat_pending_level = 4;
45module_param(ioat_pending_level, int, 0644);
46MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)");
48
Chris Leech0bbd5f42006-05-23 17:35:34 -070049/* internal functions */
Shannon Nelson43d6e362007-10-16 01:27:39 -070050static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -080052
Shannon Nelson7f2b2912007-10-18 03:07:14 -070053static struct ioat_desc_sw *
Shannon Nelson7bb67c12007-11-14 16:59:51 -080054ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55static struct ioat_desc_sw *
56ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -070057
Shannon Nelson7f2b2912007-10-18 03:07:14 -070058static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
59 struct ioatdma_device *device,
60 int index)
Shannon Nelson3e037452007-10-16 01:27:40 -070061{
62 return device->idx[index];
63}
64
65/**
66 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
67 * @irq: interrupt id
68 * @data: interrupt data
69 */
70static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
71{
72 struct ioatdma_device *instance = data;
73 struct ioat_dma_chan *ioat_chan;
74 unsigned long attnstatus;
75 int bit;
76 u8 intrctrl;
77
78 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
79
80 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
81 return IRQ_NONE;
82
83 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
84 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
85 return IRQ_NONE;
86 }
87
88 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
89 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
90 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
91 tasklet_schedule(&ioat_chan->cleanup_task);
92 }
93
94 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
95 return IRQ_HANDLED;
96}
97
98/**
99 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
100 * @irq: interrupt id
101 * @data: interrupt data
102 */
103static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
104{
105 struct ioat_dma_chan *ioat_chan = data;
106
107 tasklet_schedule(&ioat_chan->cleanup_task);
108
109 return IRQ_HANDLED;
110}
111
112static void ioat_dma_cleanup_tasklet(unsigned long data);
113
114/**
115 * ioat_dma_enumerate_channels - find and initialize the device's channels
116 * @device: the device to be enumerated
117 */
Shannon Nelson8ab89562007-10-16 01:27:39 -0700118static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700119{
120 u8 xfercap_scale;
121 u32 xfercap;
122 int i;
123 struct ioat_dma_chan *ioat_chan;
124
Chris Leeche3828812007-03-08 09:57:35 -0800125 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700127 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
128
129 for (i = 0; i < device->common.chancnt; i++) {
130 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
131 if (!ioat_chan) {
132 device->common.chancnt = i;
133 break;
134 }
135
136 ioat_chan->device = device;
137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138 ioat_chan->xfercap = xfercap;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800139 ioat_chan->desccount = 0;
140 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU,
143 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
144 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700145 spin_lock_init(&ioat_chan->cleanup_lock);
146 spin_lock_init(&ioat_chan->desc_lock);
147 INIT_LIST_HEAD(&ioat_chan->free_desc);
148 INIT_LIST_HEAD(&ioat_chan->used_desc);
149 /* This should be made common somewhere in dmaengine.c */
150 ioat_chan->common.device = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700151 list_add_tail(&ioat_chan->common.device_node,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700152 &device->common.channels);
Shannon Nelson3e037452007-10-16 01:27:40 -0700153 device->idx[i] = ioat_chan;
154 tasklet_init(&ioat_chan->cleanup_task,
155 ioat_dma_cleanup_tasklet,
156 (unsigned long) ioat_chan);
157 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700158 }
159 return device->common.chancnt;
160}
161
Shannon Nelson711924b2007-12-17 16:20:08 -0800162/**
163 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
164 * descriptors to hw
165 * @chan: DMA channel handle
166 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800167static inline void __ioat1_dma_memcpy_issue_pending(
Shannon Nelson711924b2007-12-17 16:20:08 -0800168 struct ioat_dma_chan *ioat_chan)
169{
170 ioat_chan->pending = 0;
171 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
172}
173
174static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
175{
176 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
177
178 if (ioat_chan->pending != 0) {
179 spin_lock_bh(&ioat_chan->desc_lock);
180 __ioat1_dma_memcpy_issue_pending(ioat_chan);
181 spin_unlock_bh(&ioat_chan->desc_lock);
182 }
183}
184
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800185static inline void __ioat2_dma_memcpy_issue_pending(
Shannon Nelson711924b2007-12-17 16:20:08 -0800186 struct ioat_dma_chan *ioat_chan)
187{
188 ioat_chan->pending = 0;
189 writew(ioat_chan->dmacount,
190 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
191}
192
193static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
194{
195 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
196
197 if (ioat_chan->pending != 0) {
198 spin_lock_bh(&ioat_chan->desc_lock);
199 __ioat2_dma_memcpy_issue_pending(ioat_chan);
200 spin_unlock_bh(&ioat_chan->desc_lock);
201 }
202}
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800203
204static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
Dan Williams7405f742007-01-02 11:10:43 -0700205{
206 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700207 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
208 struct ioat_desc_sw *prev, *new;
209 struct ioat_dma_descriptor *hw;
Dan Williams7405f742007-01-02 11:10:43 -0700210 dma_cookie_t cookie;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700211 LIST_HEAD(new_chain);
212 u32 copy;
213 size_t len;
214 dma_addr_t src, dst;
Dan Williams636bdea2008-04-17 20:17:26 -0700215 unsigned long orig_flags;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700216 unsigned int desc_count = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700217
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700218 /* src and dest and len are stored in the initial descriptor */
219 len = first->len;
220 src = first->src;
221 dst = first->dst;
Dan Williams636bdea2008-04-17 20:17:26 -0700222 orig_flags = first->async_tx.flags;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700223 new = first;
224
Dan Williams7405f742007-01-02 11:10:43 -0700225 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700226 prev = to_ioat_desc(ioat_chan->used_desc.prev);
227 prefetch(prev->hw);
228 do {
Shannon Nelson711924b2007-12-17 16:20:08 -0800229 copy = min_t(size_t, len, ioat_chan->xfercap);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700230
Dan Williams636bdea2008-04-17 20:17:26 -0700231 async_tx_ack(&new->async_tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700232
233 hw = new->hw;
234 hw->size = copy;
235 hw->ctl = 0;
236 hw->src_addr = src;
237 hw->dst_addr = dst;
238 hw->next = 0;
239
240 /* chain together the physical address list for the HW */
241 wmb();
242 prev->hw->next = (u64) new->async_tx.phys;
243
244 len -= copy;
245 dst += copy;
246 src += copy;
247
248 list_add_tail(&new->node, &new_chain);
249 desc_count++;
250 prev = new;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800251 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700252
253 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
Shannon Nelson95218432007-10-18 03:07:15 -0700254 if (new->async_tx.callback) {
255 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
256 if (first != new) {
257 /* move callback into to last desc */
258 new->async_tx.callback = first->async_tx.callback;
259 new->async_tx.callback_param
260 = first->async_tx.callback_param;
261 first->async_tx.callback = NULL;
262 first->async_tx.callback_param = NULL;
263 }
264 }
265
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700266 new->tx_cnt = desc_count;
Dan Williams636bdea2008-04-17 20:17:26 -0700267 new->async_tx.flags = orig_flags; /* client is in control of this ack */
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700268
269 /* store the original values for use in later cleanup */
270 if (new != first) {
271 new->src = first->src;
272 new->dst = first->dst;
273 new->len = first->len;
274 }
275
Dan Williams7405f742007-01-02 11:10:43 -0700276 /* cookie incr and addition to used_list must be atomic */
277 cookie = ioat_chan->common.cookie;
278 cookie++;
279 if (cookie < 0)
280 cookie = 1;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700281 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
Dan Williams7405f742007-01-02 11:10:43 -0700282
283 /* write address into NextDescriptor field of last desc in chain */
284 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700285 first->async_tx.phys;
286 __list_splice(&new_chain, ioat_chan->used_desc.prev);
Dan Williams7405f742007-01-02 11:10:43 -0700287
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800288 ioat_chan->dmacount += desc_count;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700289 ioat_chan->pending += desc_count;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800290 if (ioat_chan->pending >= ioat_pending_level)
291 __ioat1_dma_memcpy_issue_pending(ioat_chan);
Dan Williams7405f742007-01-02 11:10:43 -0700292 spin_unlock_bh(&ioat_chan->desc_lock);
293
Dan Williams7405f742007-01-02 11:10:43 -0700294 return cookie;
295}
296
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800297static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
298{
299 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
300 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
301 struct ioat_desc_sw *new;
302 struct ioat_dma_descriptor *hw;
303 dma_cookie_t cookie;
304 u32 copy;
305 size_t len;
306 dma_addr_t src, dst;
Dan Williams636bdea2008-04-17 20:17:26 -0700307 unsigned long orig_flags;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800308 unsigned int desc_count = 0;
309
310 /* src and dest and len are stored in the initial descriptor */
311 len = first->len;
312 src = first->src;
313 dst = first->dst;
Dan Williams636bdea2008-04-17 20:17:26 -0700314 orig_flags = first->async_tx.flags;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800315 new = first;
316
Shannon Nelson711924b2007-12-17 16:20:08 -0800317 /*
318 * ioat_chan->desc_lock is still in force in version 2 path
319 * it gets unlocked at end of this function
320 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800321 do {
Shannon Nelson711924b2007-12-17 16:20:08 -0800322 copy = min_t(size_t, len, ioat_chan->xfercap);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800323
Dan Williams636bdea2008-04-17 20:17:26 -0700324 async_tx_ack(&new->async_tx);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800325
326 hw = new->hw;
327 hw->size = copy;
328 hw->ctl = 0;
329 hw->src_addr = src;
330 hw->dst_addr = dst;
331
332 len -= copy;
333 dst += copy;
334 src += copy;
335 desc_count++;
336 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
337
338 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
339 if (new->async_tx.callback) {
340 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
341 if (first != new) {
342 /* move callback into to last desc */
343 new->async_tx.callback = first->async_tx.callback;
344 new->async_tx.callback_param
345 = first->async_tx.callback_param;
346 first->async_tx.callback = NULL;
347 first->async_tx.callback_param = NULL;
348 }
349 }
350
351 new->tx_cnt = desc_count;
Dan Williams636bdea2008-04-17 20:17:26 -0700352 new->async_tx.flags = orig_flags; /* client is in control of this ack */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800353
354 /* store the original values for use in later cleanup */
355 if (new != first) {
356 new->src = first->src;
357 new->dst = first->dst;
358 new->len = first->len;
359 }
360
361 /* cookie incr and addition to used_list must be atomic */
362 cookie = ioat_chan->common.cookie;
363 cookie++;
364 if (cookie < 0)
365 cookie = 1;
366 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
367
368 ioat_chan->dmacount += desc_count;
369 ioat_chan->pending += desc_count;
370 if (ioat_chan->pending >= ioat_pending_level)
371 __ioat2_dma_memcpy_issue_pending(ioat_chan);
372 spin_unlock_bh(&ioat_chan->desc_lock);
373
374 return cookie;
375}
376
377/**
378 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
379 * @ioat_chan: the channel supplying the memory pool for the descriptors
380 * @flags: allocation flags
381 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700382static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
Shannon Nelson43d6e362007-10-16 01:27:39 -0700383 struct ioat_dma_chan *ioat_chan,
384 gfp_t flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700385{
386 struct ioat_dma_descriptor *desc;
387 struct ioat_desc_sw *desc_sw;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700388 struct ioatdma_device *ioatdma_device;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700389 dma_addr_t phys;
390
Shannon Nelson8ab89562007-10-16 01:27:39 -0700391 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
392 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700393 if (unlikely(!desc))
394 return NULL;
395
396 desc_sw = kzalloc(sizeof(*desc_sw), flags);
397 if (unlikely(!desc_sw)) {
Shannon Nelson8ab89562007-10-16 01:27:39 -0700398 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700399 return NULL;
400 }
401
402 memset(desc, 0, sizeof(*desc));
Dan Williams7405f742007-01-02 11:10:43 -0700403 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800404 switch (ioat_chan->device->version) {
405 case IOAT_VER_1_2:
406 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
407 break;
408 case IOAT_VER_2_0:
409 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
410 break;
411 }
Dan Williams7405f742007-01-02 11:10:43 -0700412 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800413
Chris Leech0bbd5f42006-05-23 17:35:34 -0700414 desc_sw->hw = desc;
Dan Williams7405f742007-01-02 11:10:43 -0700415 desc_sw->async_tx.phys = phys;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700416
417 return desc_sw;
418}
419
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800420static int ioat_initial_desc_count = 256;
421module_param(ioat_initial_desc_count, int, 0644);
422MODULE_PARM_DESC(ioat_initial_desc_count,
423 "initial descriptors per channel (default: 256)");
424
425/**
426 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
427 * @ioat_chan: the channel to be massaged
428 */
429static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
430{
431 struct ioat_desc_sw *desc, *_desc;
432
433 /* setup used_desc */
434 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
435 ioat_chan->used_desc.prev = NULL;
436
437 /* pull free_desc out of the circle so that every node is a hw
438 * descriptor, but leave it pointing to the list
439 */
440 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
441 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
442
443 /* circle link the hw descriptors */
444 desc = to_ioat_desc(ioat_chan->free_desc.next);
445 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
446 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
447 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
448 }
449}
450
451/**
452 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
453 * @chan: the channel to be filled out
454 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700455static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
456{
457 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson711924b2007-12-17 16:20:08 -0800458 struct ioat_desc_sw *desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700459 u16 chanctrl;
460 u32 chanerr;
461 int i;
462 LIST_HEAD(tmp_list);
463
Shannon Nelsone4223972007-08-24 23:02:53 -0700464 /* have we already been set up? */
465 if (!list_empty(&ioat_chan->free_desc))
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800466 return ioat_chan->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700467
Shannon Nelson43d6e362007-10-16 01:27:39 -0700468 /* Setup register to interrupt and write completion status on error */
Shannon Nelsone4223972007-08-24 23:02:53 -0700469 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
Chris Leech0bbd5f42006-05-23 17:35:34 -0700470 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
471 IOAT_CHANCTRL_ERR_COMPLETION_EN;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700472 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700473
Chris Leeche3828812007-03-08 09:57:35 -0800474 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700475 if (chanerr) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700476 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700477 "CHANERR = %x, clearing\n", chanerr);
Chris Leeche3828812007-03-08 09:57:35 -0800478 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700479 }
480
481 /* Allocate descriptors */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800482 for (i = 0; i < ioat_initial_desc_count; i++) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700483 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
484 if (!desc) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700485 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700486 "Only %d initial descriptors\n", i);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700487 break;
488 }
489 list_add_tail(&desc->node, &tmp_list);
490 }
491 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800492 ioat_chan->desccount = i;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700493 list_splice(&tmp_list, &ioat_chan->free_desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800494 if (ioat_chan->device->version != IOAT_VER_1_2)
495 ioat2_dma_massage_chan_desc(ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700496 spin_unlock_bh(&ioat_chan->desc_lock);
497
498 /* allocate a completion writeback area */
499 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
500 ioat_chan->completion_virt =
501 pci_pool_alloc(ioat_chan->device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700502 GFP_KERNEL,
503 &ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700504 memset(ioat_chan->completion_virt, 0,
505 sizeof(*ioat_chan->completion_virt));
Chris Leeche3828812007-03-08 09:57:35 -0800506 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
507 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
508 writel(((u64) ioat_chan->completion_addr) >> 32,
509 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700510
Shannon Nelson3e037452007-10-16 01:27:40 -0700511 tasklet_enable(&ioat_chan->cleanup_task);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800512 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
513 return ioat_chan->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700514}
515
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800516/**
517 * ioat_dma_free_chan_resources - release all the descriptors
518 * @chan: the channel to be cleaned
519 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700520static void ioat_dma_free_chan_resources(struct dma_chan *chan)
521{
522 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700523 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700524 struct ioat_desc_sw *desc, *_desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700525 int in_use_descs = 0;
526
Shannon Nelson3e037452007-10-16 01:27:40 -0700527 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700528 ioat_dma_memcpy_cleanup(ioat_chan);
529
Shannon Nelson3e037452007-10-16 01:27:40 -0700530 /* Delay 100ms after reset to allow internal DMA logic to quiesce
531 * before removing DMA descriptor resources.
532 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800533 writeb(IOAT_CHANCMD_RESET,
534 ioat_chan->reg_base
535 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
Shannon Nelson3e037452007-10-16 01:27:40 -0700536 mdelay(100);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700537
538 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800539 switch (ioat_chan->device->version) {
540 case IOAT_VER_1_2:
541 list_for_each_entry_safe(desc, _desc,
542 &ioat_chan->used_desc, node) {
543 in_use_descs++;
544 list_del(&desc->node);
545 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
546 desc->async_tx.phys);
547 kfree(desc);
548 }
549 list_for_each_entry_safe(desc, _desc,
550 &ioat_chan->free_desc, node) {
551 list_del(&desc->node);
552 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
553 desc->async_tx.phys);
554 kfree(desc);
555 }
556 break;
557 case IOAT_VER_2_0:
558 list_for_each_entry_safe(desc, _desc,
559 ioat_chan->free_desc.next, node) {
560 list_del(&desc->node);
561 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
562 desc->async_tx.phys);
563 kfree(desc);
564 }
565 desc = to_ioat_desc(ioat_chan->free_desc.next);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700566 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williams7405f742007-01-02 11:10:43 -0700567 desc->async_tx.phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700568 kfree(desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800569 INIT_LIST_HEAD(&ioat_chan->free_desc);
570 INIT_LIST_HEAD(&ioat_chan->used_desc);
571 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700572 }
573 spin_unlock_bh(&ioat_chan->desc_lock);
574
Shannon Nelson8ab89562007-10-16 01:27:39 -0700575 pci_pool_free(ioatdma_device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700576 ioat_chan->completion_virt,
577 ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700578
579 /* one is ok since we left it on there on purpose */
580 if (in_use_descs > 1)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700581 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700582 "Freeing %d in use descriptors!\n",
Chris Leech0bbd5f42006-05-23 17:35:34 -0700583 in_use_descs - 1);
584
585 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700586 ioat_chan->pending = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800587 ioat_chan->dmacount = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700588}
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700589
Shannon Nelson3e037452007-10-16 01:27:40 -0700590/**
591 * ioat_dma_get_next_descriptor - return the next available descriptor
592 * @ioat_chan: IOAT DMA channel handle
593 *
594 * Gets the next descriptor from the chain, and must be called with the
595 * channel's desc_lock held. Allocates more descriptors if the channel
596 * has run out.
597 */
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700598static struct ioat_desc_sw *
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800599ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
Shannon Nelson3e037452007-10-16 01:27:40 -0700600{
Shannon Nelson711924b2007-12-17 16:20:08 -0800601 struct ioat_desc_sw *new;
Shannon Nelson3e037452007-10-16 01:27:40 -0700602
603 if (!list_empty(&ioat_chan->free_desc)) {
604 new = to_ioat_desc(ioat_chan->free_desc.next);
605 list_del(&new->node);
606 } else {
607 /* try to get another desc */
608 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
Shannon Nelson711924b2007-12-17 16:20:08 -0800609 if (!new) {
610 dev_err(&ioat_chan->device->pdev->dev,
611 "alloc failed\n");
612 return NULL;
613 }
Shannon Nelson3e037452007-10-16 01:27:40 -0700614 }
615
616 prefetch(new->hw);
617 return new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700618}
619
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800620static struct ioat_desc_sw *
621ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
622{
Shannon Nelson711924b2007-12-17 16:20:08 -0800623 struct ioat_desc_sw *new;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800624
625 /*
626 * used.prev points to where to start processing
627 * used.next points to next free descriptor
628 * if used.prev == NULL, there are none waiting to be processed
629 * if used.next == used.prev.prev, there is only one free descriptor,
630 * and we need to use it to as a noop descriptor before
631 * linking in a new set of descriptors, since the device
632 * has probably already read the pointer to it
633 */
634 if (ioat_chan->used_desc.prev &&
635 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
636
Shannon Nelson711924b2007-12-17 16:20:08 -0800637 struct ioat_desc_sw *desc;
638 struct ioat_desc_sw *noop_desc;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800639 int i;
640
641 /* set up the noop descriptor */
642 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
643 noop_desc->hw->size = 0;
644 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
645 noop_desc->hw->src_addr = 0;
646 noop_desc->hw->dst_addr = 0;
647
648 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
649 ioat_chan->pending++;
650 ioat_chan->dmacount++;
651
Shannon Nelson711924b2007-12-17 16:20:08 -0800652 /* try to get a few more descriptors */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800653 for (i = 16; i; i--) {
654 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
Shannon Nelson711924b2007-12-17 16:20:08 -0800655 if (!desc) {
656 dev_err(&ioat_chan->device->pdev->dev,
657 "alloc failed\n");
658 break;
659 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800660 list_add_tail(&desc->node, ioat_chan->used_desc.next);
661
662 desc->hw->next
663 = to_ioat_desc(desc->node.next)->async_tx.phys;
664 to_ioat_desc(desc->node.prev)->hw->next
665 = desc->async_tx.phys;
666 ioat_chan->desccount++;
667 }
668
669 ioat_chan->used_desc.next = noop_desc->node.next;
670 }
671 new = to_ioat_desc(ioat_chan->used_desc.next);
672 prefetch(new);
673 ioat_chan->used_desc.next = new->node.next;
674
675 if (ioat_chan->used_desc.prev == NULL)
676 ioat_chan->used_desc.prev = &new->node;
677
678 prefetch(new->hw);
679 return new;
680}
681
682static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
683 struct ioat_dma_chan *ioat_chan)
684{
685 if (!ioat_chan)
686 return NULL;
687
688 switch (ioat_chan->device->version) {
689 case IOAT_VER_1_2:
690 return ioat1_dma_get_next_descriptor(ioat_chan);
691 break;
692 case IOAT_VER_2_0:
693 return ioat2_dma_get_next_descriptor(ioat_chan);
694 break;
695 }
696 return NULL;
697}
698
699static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
Shannon Nelson43d6e362007-10-16 01:27:39 -0700700 struct dma_chan *chan,
Dan Williams00367312008-02-02 19:49:57 -0700701 dma_addr_t dma_dest,
702 dma_addr_t dma_src,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700703 size_t len,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700704 unsigned long flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700705{
Dan Williams7405f742007-01-02 11:10:43 -0700706 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700707 struct ioat_desc_sw *new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700708
709 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700710 new = ioat_dma_get_next_descriptor(ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700711 spin_unlock_bh(&ioat_chan->desc_lock);
712
Shannon Nelson711924b2007-12-17 16:20:08 -0800713 if (new) {
714 new->len = len;
Dan Williams00367312008-02-02 19:49:57 -0700715 new->dst = dma_dest;
716 new->src = dma_src;
Dan Williams636bdea2008-04-17 20:17:26 -0700717 new->async_tx.flags = flags;
Shannon Nelson711924b2007-12-17 16:20:08 -0800718 return &new->async_tx;
719 } else
720 return NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700721}
722
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800723static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
724 struct dma_chan *chan,
Dan Williams00367312008-02-02 19:49:57 -0700725 dma_addr_t dma_dest,
726 dma_addr_t dma_src,
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800727 size_t len,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700728 unsigned long flags)
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800729{
730 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
731 struct ioat_desc_sw *new;
732
733 spin_lock_bh(&ioat_chan->desc_lock);
734 new = ioat2_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800735
Shannon Nelson711924b2007-12-17 16:20:08 -0800736 /*
737 * leave ioat_chan->desc_lock set in ioat 2 path
738 * it will get unlocked at end of tx_submit
739 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800740
Shannon Nelson711924b2007-12-17 16:20:08 -0800741 if (new) {
742 new->len = len;
Dan Williams00367312008-02-02 19:49:57 -0700743 new->dst = dma_dest;
744 new->src = dma_src;
Dan Williams636bdea2008-04-17 20:17:26 -0700745 new->async_tx.flags = flags;
Shannon Nelson711924b2007-12-17 16:20:08 -0800746 return &new->async_tx;
747 } else
748 return NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700749}
750
Shannon Nelson3e037452007-10-16 01:27:40 -0700751static void ioat_dma_cleanup_tasklet(unsigned long data)
752{
753 struct ioat_dma_chan *chan = (void *)data;
754 ioat_dma_memcpy_cleanup(chan);
755 writew(IOAT_CHANCTRL_INT_DISABLE,
756 chan->reg_base + IOAT_CHANCTRL_OFFSET);
757}
758
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800759/**
760 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
761 * @chan: ioat channel to be cleaned up
762 */
Shannon Nelson43d6e362007-10-16 01:27:39 -0700763static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700764{
765 unsigned long phys_complete;
766 struct ioat_desc_sw *desc, *_desc;
767 dma_cookie_t cookie = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800768 unsigned long desc_phys;
769 struct ioat_desc_sw *latest_desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700770
Shannon Nelson43d6e362007-10-16 01:27:39 -0700771 prefetch(ioat_chan->completion_virt);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700772
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700773 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
Chris Leech0bbd5f42006-05-23 17:35:34 -0700774 return;
775
776 /* The completion writeback can happen at any time,
777 so reads by the driver need to be atomic operations
778 The descriptor physical addresses are limited to 32-bits
779 when the CPU can only do a 32-bit mov */
780
781#if (BITS_PER_LONG == 64)
782 phys_complete =
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700783 ioat_chan->completion_virt->full
784 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700785#else
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700786 phys_complete =
787 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700788#endif
789
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700790 if ((ioat_chan->completion_virt->full
791 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
Shannon Nelson43d6e362007-10-16 01:27:39 -0700792 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
793 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700794 "Channel halted, chanerr = %x\n",
Shannon Nelson43d6e362007-10-16 01:27:39 -0700795 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
Chris Leech0bbd5f42006-05-23 17:35:34 -0700796
797 /* TODO do something to salvage the situation */
798 }
799
Shannon Nelson43d6e362007-10-16 01:27:39 -0700800 if (phys_complete == ioat_chan->last_completion) {
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700801 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700802 return;
803 }
804
Shannon Nelson3e037452007-10-16 01:27:40 -0700805 cookie = 0;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700806 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800807 switch (ioat_chan->device->version) {
808 case IOAT_VER_1_2:
809 list_for_each_entry_safe(desc, _desc,
810 &ioat_chan->used_desc, node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700811
Shannon Nelson43d6e362007-10-16 01:27:39 -0700812 /*
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800813 * Incoming DMA requests may use multiple descriptors,
814 * due to exceeding xfercap, perhaps. If so, only the
815 * last one will have a cookie, and require unmapping.
Shannon Nelson43d6e362007-10-16 01:27:39 -0700816 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800817 if (desc->async_tx.cookie) {
818 cookie = desc->async_tx.cookie;
819
820 /*
821 * yes we are unmapping both _page and _single
822 * alloc'd regions with unmap_page. Is this
823 * *really* that bad?
824 */
825 pci_unmap_page(ioat_chan->device->pdev,
826 pci_unmap_addr(desc, dst),
827 pci_unmap_len(desc, len),
828 PCI_DMA_FROMDEVICE);
829 pci_unmap_page(ioat_chan->device->pdev,
830 pci_unmap_addr(desc, src),
831 pci_unmap_len(desc, len),
832 PCI_DMA_TODEVICE);
833
834 if (desc->async_tx.callback) {
835 desc->async_tx.callback(desc->async_tx.callback_param);
836 desc->async_tx.callback = NULL;
837 }
838 }
839
840 if (desc->async_tx.phys != phys_complete) {
841 /*
842 * a completed entry, but not the last, so clean
843 * up if the client is done with the descriptor
844 */
Dan Williams636bdea2008-04-17 20:17:26 -0700845 if (async_tx_test_ack(&desc->async_tx)) {
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800846 list_del(&desc->node);
847 list_add_tail(&desc->node,
848 &ioat_chan->free_desc);
849 } else
850 desc->async_tx.cookie = 0;
851 } else {
852 /*
853 * last used desc. Do not remove, so we can
854 * append from it, but don't look at it next
855 * time, either
856 */
857 desc->async_tx.cookie = 0;
858
859 /* TODO check status bits? */
860 break;
Shannon Nelson95218432007-10-18 03:07:15 -0700861 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700862 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800863 break;
864 case IOAT_VER_2_0:
865 /* has some other thread has already cleaned up? */
866 if (ioat_chan->used_desc.prev == NULL)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700867 break;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800868
869 /* work backwards to find latest finished desc */
870 desc = to_ioat_desc(ioat_chan->used_desc.next);
871 latest_desc = NULL;
872 do {
873 desc = to_ioat_desc(desc->node.prev);
874 desc_phys = (unsigned long)desc->async_tx.phys
875 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
876 if (desc_phys == phys_complete) {
877 latest_desc = desc;
878 break;
879 }
880 } while (&desc->node != ioat_chan->used_desc.prev);
881
882 if (latest_desc != NULL) {
883
884 /* work forwards to clear finished descriptors */
885 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
886 &desc->node != latest_desc->node.next &&
887 &desc->node != ioat_chan->used_desc.next;
888 desc = to_ioat_desc(desc->node.next)) {
889 if (desc->async_tx.cookie) {
890 cookie = desc->async_tx.cookie;
891 desc->async_tx.cookie = 0;
892
893 pci_unmap_page(ioat_chan->device->pdev,
894 pci_unmap_addr(desc, dst),
895 pci_unmap_len(desc, len),
896 PCI_DMA_FROMDEVICE);
897 pci_unmap_page(ioat_chan->device->pdev,
898 pci_unmap_addr(desc, src),
899 pci_unmap_len(desc, len),
900 PCI_DMA_TODEVICE);
901
902 if (desc->async_tx.callback) {
903 desc->async_tx.callback(desc->async_tx.callback_param);
904 desc->async_tx.callback = NULL;
905 }
906 }
907 }
908
909 /* move used.prev up beyond those that are finished */
910 if (&desc->node == ioat_chan->used_desc.next)
911 ioat_chan->used_desc.prev = NULL;
912 else
913 ioat_chan->used_desc.prev = &desc->node;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700914 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800915 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700916 }
917
Shannon Nelson43d6e362007-10-16 01:27:39 -0700918 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700919
Shannon Nelson43d6e362007-10-16 01:27:39 -0700920 ioat_chan->last_completion = phys_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700921 if (cookie != 0)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700922 ioat_chan->completed_cookie = cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700923
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700924 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700925}
926
927/**
928 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
929 * @chan: IOAT DMA channel handle
930 * @cookie: DMA transaction identifier
Randy Dunlap65088712006-07-03 19:45:31 -0700931 * @done: if not %NULL, updated with last completed transaction
932 * @used: if not %NULL, updated with last used transaction
Chris Leech0bbd5f42006-05-23 17:35:34 -0700933 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700934static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700935 dma_cookie_t cookie,
936 dma_cookie_t *done,
937 dma_cookie_t *used)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700938{
939 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
940 dma_cookie_t last_used;
941 dma_cookie_t last_complete;
942 enum dma_status ret;
943
944 last_used = chan->cookie;
945 last_complete = ioat_chan->completed_cookie;
946
947 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700948 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700949 if (used)
950 *used = last_used;
951
952 ret = dma_async_is_complete(cookie, last_complete, last_used);
953 if (ret == DMA_SUCCESS)
954 return ret;
955
956 ioat_dma_memcpy_cleanup(ioat_chan);
957
958 last_used = chan->cookie;
959 last_complete = ioat_chan->completed_cookie;
960
961 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700962 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700963 if (used)
964 *used = last_used;
965
966 return dma_async_is_complete(cookie, last_complete, last_used);
967}
968
Shannon Nelson43d6e362007-10-16 01:27:39 -0700969static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700970{
971 struct ioat_desc_sw *desc;
972
973 spin_lock_bh(&ioat_chan->desc_lock);
974
Shannon Nelson3e037452007-10-16 01:27:40 -0700975 desc = ioat_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700976 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
977 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
978 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700979 desc->hw->size = 0;
980 desc->hw->src_addr = 0;
981 desc->hw->dst_addr = 0;
Dan Williams636bdea2008-04-17 20:17:26 -0700982 async_tx_ack(&desc->async_tx);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800983 switch (ioat_chan->device->version) {
984 case IOAT_VER_1_2:
985 desc->hw->next = 0;
986 list_add_tail(&desc->node, &ioat_chan->used_desc);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700987
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800988 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
989 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
990 writel(((u64) desc->async_tx.phys) >> 32,
991 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
992
993 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
994 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
995 break;
996 case IOAT_VER_2_0:
997 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
998 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
999 writel(((u64) desc->async_tx.phys) >> 32,
1000 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1001
1002 ioat_chan->dmacount++;
1003 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1004 break;
1005 }
Chris Leech0bbd5f42006-05-23 17:35:34 -07001006 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001007}
1008
1009/*
1010 * Perform a IOAT transaction to verify the HW works.
1011 */
1012#define IOAT_TEST_SIZE 2000
1013
Shannon Nelson95218432007-10-18 03:07:15 -07001014static void ioat_dma_test_callback(void *dma_async_param)
1015{
1016 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
Shannon Nelson711924b2007-12-17 16:20:08 -08001017 dma_async_param);
Shannon Nelson95218432007-10-18 03:07:15 -07001018}
1019
Shannon Nelson3e037452007-10-16 01:27:40 -07001020/**
1021 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1022 * @device: device to be tested
1023 */
1024static int ioat_dma_self_test(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001025{
1026 int i;
1027 u8 *src;
1028 u8 *dest;
1029 struct dma_chan *dma_chan;
Shannon Nelson711924b2007-12-17 16:20:08 -08001030 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -07001031 dma_addr_t dma_dest, dma_src;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001032 dma_cookie_t cookie;
1033 int err = 0;
1034
Christoph Lametere94b1762006-12-06 20:33:17 -08001035 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001036 if (!src)
1037 return -ENOMEM;
Christoph Lametere94b1762006-12-06 20:33:17 -08001038 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001039 if (!dest) {
1040 kfree(src);
1041 return -ENOMEM;
1042 }
1043
1044 /* Fill in src buffer */
1045 for (i = 0; i < IOAT_TEST_SIZE; i++)
1046 src[i] = (u8)i;
1047
1048 /* Start copy, using first DMA channel */
1049 dma_chan = container_of(device->common.channels.next,
Shannon Nelson43d6e362007-10-16 01:27:39 -07001050 struct dma_chan,
1051 device_node);
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001052 if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
Shannon Nelson43d6e362007-10-16 01:27:39 -07001053 dev_err(&device->pdev->dev,
1054 "selftest cannot allocate chan resource\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001055 err = -ENODEV;
1056 goto out;
1057 }
1058
Dan Williams00367312008-02-02 19:49:57 -07001059 dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1060 DMA_TO_DEVICE);
1061 dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1062 DMA_FROM_DEVICE);
1063 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1064 IOAT_TEST_SIZE, 0);
Shannon Nelson5149fd02007-10-18 03:07:13 -07001065 if (!tx) {
1066 dev_err(&device->pdev->dev,
1067 "Self-test prep failed, disabling\n");
1068 err = -ENODEV;
1069 goto free_resources;
1070 }
1071
Dan Williams7405f742007-01-02 11:10:43 -07001072 async_tx_ack(tx);
Shannon Nelson95218432007-10-18 03:07:15 -07001073 tx->callback = ioat_dma_test_callback;
1074 tx->callback_param = (void *)0x8086;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001075 cookie = tx->tx_submit(tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001076 if (cookie < 0) {
1077 dev_err(&device->pdev->dev,
1078 "Self-test setup failed, disabling\n");
1079 err = -ENODEV;
1080 goto free_resources;
1081 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001082 device->common.device_issue_pending(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001083 msleep(1);
1084
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001085 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1086 != DMA_SUCCESS) {
Shannon Nelson43d6e362007-10-16 01:27:39 -07001087 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001088 "Self-test copy timed out, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001089 err = -ENODEV;
1090 goto free_resources;
1091 }
1092 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
Shannon Nelson43d6e362007-10-16 01:27:39 -07001093 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001094 "Self-test copy failed compare, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001095 err = -ENODEV;
1096 goto free_resources;
1097 }
1098
1099free_resources:
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001100 device->common.device_free_chan_resources(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001101out:
1102 kfree(src);
1103 kfree(dest);
1104 return err;
1105}
1106
Shannon Nelson3e037452007-10-16 01:27:40 -07001107static char ioat_interrupt_style[32] = "msix";
1108module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1109 sizeof(ioat_interrupt_style), 0644);
1110MODULE_PARM_DESC(ioat_interrupt_style,
1111 "set ioat interrupt style: msix (default), "
1112 "msix-single-vector, msi, intx)");
1113
1114/**
1115 * ioat_dma_setup_interrupts - setup interrupt handler
1116 * @device: ioat device
1117 */
1118static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1119{
1120 struct ioat_dma_chan *ioat_chan;
1121 int err, i, j, msixcnt;
1122 u8 intrctrl = 0;
1123
1124 if (!strcmp(ioat_interrupt_style, "msix"))
1125 goto msix;
1126 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1127 goto msix_single_vector;
1128 if (!strcmp(ioat_interrupt_style, "msi"))
1129 goto msi;
1130 if (!strcmp(ioat_interrupt_style, "intx"))
1131 goto intx;
Shannon Nelson5149fd02007-10-18 03:07:13 -07001132 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
1133 ioat_interrupt_style);
1134 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -07001135
1136msix:
1137 /* The number of MSI-X vectors should equal the number of channels */
1138 msixcnt = device->common.chancnt;
1139 for (i = 0; i < msixcnt; i++)
1140 device->msix_entries[i].entry = i;
1141
1142 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
1143 if (err < 0)
1144 goto msi;
1145 if (err > 0)
1146 goto msix_single_vector;
1147
1148 for (i = 0; i < msixcnt; i++) {
1149 ioat_chan = ioat_lookup_chan_by_index(device, i);
1150 err = request_irq(device->msix_entries[i].vector,
1151 ioat_dma_do_interrupt_msix,
1152 0, "ioat-msix", ioat_chan);
1153 if (err) {
1154 for (j = 0; j < i; j++) {
1155 ioat_chan =
1156 ioat_lookup_chan_by_index(device, j);
1157 free_irq(device->msix_entries[j].vector,
1158 ioat_chan);
1159 }
1160 goto msix_single_vector;
1161 }
1162 }
1163 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1164 device->irq_mode = msix_multi_vector;
1165 goto done;
1166
1167msix_single_vector:
1168 device->msix_entries[0].entry = 0;
1169 err = pci_enable_msix(device->pdev, device->msix_entries, 1);
1170 if (err)
1171 goto msi;
1172
1173 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
1174 0, "ioat-msix", device);
1175 if (err) {
1176 pci_disable_msix(device->pdev);
1177 goto msi;
1178 }
1179 device->irq_mode = msix_single_vector;
1180 goto done;
1181
1182msi:
1183 err = pci_enable_msi(device->pdev);
1184 if (err)
1185 goto intx;
1186
1187 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1188 0, "ioat-msi", device);
1189 if (err) {
1190 pci_disable_msi(device->pdev);
1191 goto intx;
1192 }
1193 /*
1194 * CB 1.2 devices need a bit set in configuration space to enable MSI
1195 */
1196 if (device->version == IOAT_VER_1_2) {
1197 u32 dmactrl;
1198 pci_read_config_dword(device->pdev,
1199 IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1200 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1201 pci_write_config_dword(device->pdev,
1202 IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1203 }
1204 device->irq_mode = msi;
1205 goto done;
1206
1207intx:
1208 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1209 IRQF_SHARED, "ioat-intx", device);
1210 if (err)
1211 goto err_no_irq;
1212 device->irq_mode = intx;
1213
1214done:
1215 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1216 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1217 return 0;
1218
1219err_no_irq:
1220 /* Disable all interrupt generation */
1221 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1222 dev_err(&device->pdev->dev, "no usable interrupts\n");
1223 device->irq_mode = none;
1224 return -1;
1225}
1226
1227/**
1228 * ioat_dma_remove_interrupts - remove whatever interrupts were set
1229 * @device: ioat device
1230 */
1231static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1232{
1233 struct ioat_dma_chan *ioat_chan;
1234 int i;
1235
1236 /* Disable all interrupt generation */
1237 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1238
1239 switch (device->irq_mode) {
1240 case msix_multi_vector:
1241 for (i = 0; i < device->common.chancnt; i++) {
1242 ioat_chan = ioat_lookup_chan_by_index(device, i);
1243 free_irq(device->msix_entries[i].vector, ioat_chan);
1244 }
1245 pci_disable_msix(device->pdev);
1246 break;
1247 case msix_single_vector:
1248 free_irq(device->msix_entries[0].vector, device);
1249 pci_disable_msix(device->pdev);
1250 break;
1251 case msi:
1252 free_irq(device->pdev->irq, device);
1253 pci_disable_msi(device->pdev);
1254 break;
1255 case intx:
1256 free_irq(device->pdev->irq, device);
1257 break;
1258 case none:
1259 dev_warn(&device->pdev->dev,
1260 "call to %s without interrupts setup\n", __func__);
1261 }
1262 device->irq_mode = none;
1263}
1264
Shannon Nelson8ab89562007-10-16 01:27:39 -07001265struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1266 void __iomem *iobase)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001267{
1268 int err;
Shannon Nelson8ab89562007-10-16 01:27:39 -07001269 struct ioatdma_device *device;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001270
1271 device = kzalloc(sizeof(*device), GFP_KERNEL);
1272 if (!device) {
1273 err = -ENOMEM;
1274 goto err_kzalloc;
1275 }
Shannon Nelson8ab89562007-10-16 01:27:39 -07001276 device->pdev = pdev;
1277 device->reg_base = iobase;
1278 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001279
1280 /* DMA coherent memory pool for DMA descriptor allocations */
1281 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
Shannon Nelson8ab89562007-10-16 01:27:39 -07001282 sizeof(struct ioat_dma_descriptor),
1283 64, 0);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001284 if (!device->dma_pool) {
1285 err = -ENOMEM;
1286 goto err_dma_pool;
1287 }
1288
Shannon Nelson43d6e362007-10-16 01:27:39 -07001289 device->completion_pool = pci_pool_create("completion_pool", pdev,
1290 sizeof(u64), SMP_CACHE_BYTES,
1291 SMP_CACHE_BYTES);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001292 if (!device->completion_pool) {
1293 err = -ENOMEM;
1294 goto err_completion_pool;
1295 }
1296
Chris Leech0bbd5f42006-05-23 17:35:34 -07001297 INIT_LIST_HEAD(&device->common.channels);
Shannon Nelson43d6e362007-10-16 01:27:39 -07001298 ioat_dma_enumerate_channels(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001299
Shannon Nelson43d6e362007-10-16 01:27:39 -07001300 device->common.device_alloc_chan_resources =
1301 ioat_dma_alloc_chan_resources;
1302 device->common.device_free_chan_resources =
1303 ioat_dma_free_chan_resources;
Dan Williams7405f742007-01-02 11:10:43 -07001304 device->common.dev = &pdev->dev;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001305
1306 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1307 device->common.device_is_tx_complete = ioat_dma_is_complete;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001308 switch (device->version) {
1309 case IOAT_VER_1_2:
1310 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1311 device->common.device_issue_pending =
1312 ioat1_dma_memcpy_issue_pending;
1313 break;
1314 case IOAT_VER_2_0:
1315 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1316 device->common.device_issue_pending =
1317 ioat2_dma_memcpy_issue_pending;
1318 break;
1319 }
1320
Shannon Nelson3e037452007-10-16 01:27:40 -07001321 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001322 "Intel(R) I/OAT DMA Engine found,"
1323 " %d channels, device version 0x%02x, driver version %s\n",
1324 device->common.chancnt, device->version, IOAT_DMA_VERSION);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001325
Shannon Nelson3e037452007-10-16 01:27:40 -07001326 err = ioat_dma_setup_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001327 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -07001328 goto err_setup_interrupts;
Shannon Nelson8ab89562007-10-16 01:27:39 -07001329
Shannon Nelson3e037452007-10-16 01:27:40 -07001330 err = ioat_dma_self_test(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001331 if (err)
1332 goto err_self_test;
1333
1334 dma_async_device_register(&device->common);
1335
Shannon Nelson8ab89562007-10-16 01:27:39 -07001336 return device;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001337
1338err_self_test:
Shannon Nelson3e037452007-10-16 01:27:40 -07001339 ioat_dma_remove_interrupts(device);
1340err_setup_interrupts:
Chris Leech0bbd5f42006-05-23 17:35:34 -07001341 pci_pool_destroy(device->completion_pool);
1342err_completion_pool:
1343 pci_pool_destroy(device->dma_pool);
1344err_dma_pool:
1345 kfree(device);
1346err_kzalloc:
Shannon Nelsonbb8e8bc2007-12-17 16:20:08 -08001347 dev_err(&pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001348 "Intel(R) I/OAT DMA Engine initialization failed\n");
Shannon Nelson8ab89562007-10-16 01:27:39 -07001349 return NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001350}
1351
Shannon Nelson8ab89562007-10-16 01:27:39 -07001352void ioat_dma_remove(struct ioatdma_device *device)
Dan Aloni428ed602007-03-08 09:57:36 -08001353{
Chris Leech0bbd5f42006-05-23 17:35:34 -07001354 struct dma_chan *chan, *_chan;
1355 struct ioat_dma_chan *ioat_chan;
1356
Shannon Nelson3e037452007-10-16 01:27:40 -07001357 ioat_dma_remove_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001358
Shannon Nelsondfe22992007-10-18 03:07:13 -07001359 dma_async_device_unregister(&device->common);
1360
Chris Leech0bbd5f42006-05-23 17:35:34 -07001361 pci_pool_destroy(device->dma_pool);
1362 pci_pool_destroy(device->completion_pool);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001363
Shannon Nelson7df7cf02007-10-18 03:07:12 -07001364 iounmap(device->reg_base);
1365 pci_release_regions(device->pdev);
1366 pci_disable_device(device->pdev);
1367
Shannon Nelson43d6e362007-10-16 01:27:39 -07001368 list_for_each_entry_safe(chan, _chan,
1369 &device->common.channels, device_node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -07001370 ioat_chan = to_ioat_chan(chan);
1371 list_del(&chan->device_node);
1372 kfree(ioat_chan);
1373 }
1374 kfree(device);
1375}
1376