blob: 2db05f614843401454ef54229a728f33be4265ae [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
Shannon Nelson43d6e362007-10-16 01:27:39 -070015 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070021 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070034#include <linux/dma-mapping.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -070035#include "ioatdma.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070036#include "ioatdma_registers.h"
37#include "ioatdma_hw.h"
38
Shannon Nelson43d6e362007-10-16 01:27:39 -070039#define INITIAL_IOAT_DESC_COUNT 128
40
Chris Leech0bbd5f42006-05-23 17:35:34 -070041#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
42#define to_ioat_device(dev) container_of(dev, struct ioat_device, common)
43#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
Dan Williams7405f742007-01-02 11:10:43 -070044#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
Chris Leech0bbd5f42006-05-23 17:35:34 -070045
46/* internal functions */
Shannon Nelson43d6e362007-10-16 01:27:39 -070047static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
48static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
49static int __devinit ioat_probe(struct pci_dev *pdev,
50 const struct pci_device_id *ent);
Dan Aloni428ed602007-03-08 09:57:36 -080051static void ioat_shutdown(struct pci_dev *pdev);
Chris Leech0bbd5f42006-05-23 17:35:34 -070052static void __devexit ioat_remove(struct pci_dev *pdev);
53
Shannon Nelson43d6e362007-10-16 01:27:39 -070054static int ioat_dma_enumerate_channels(struct ioat_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -070055{
56 u8 xfercap_scale;
57 u32 xfercap;
58 int i;
59 struct ioat_dma_chan *ioat_chan;
60
Chris Leeche3828812007-03-08 09:57:35 -080061 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
62 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -070063 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
64
65 for (i = 0; i < device->common.chancnt; i++) {
66 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
67 if (!ioat_chan) {
68 device->common.chancnt = i;
69 break;
70 }
71
72 ioat_chan->device = device;
73 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
74 ioat_chan->xfercap = xfercap;
75 spin_lock_init(&ioat_chan->cleanup_lock);
76 spin_lock_init(&ioat_chan->desc_lock);
77 INIT_LIST_HEAD(&ioat_chan->free_desc);
78 INIT_LIST_HEAD(&ioat_chan->used_desc);
79 /* This should be made common somewhere in dmaengine.c */
80 ioat_chan->common.device = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -070081 list_add_tail(&ioat_chan->common.device_node,
Shannon Nelson43d6e362007-10-16 01:27:39 -070082 &device->common.channels);
Chris Leech0bbd5f42006-05-23 17:35:34 -070083 }
84 return device->common.chancnt;
85}
86
Shannon Nelson43d6e362007-10-16 01:27:39 -070087static void ioat_set_src(dma_addr_t addr,
88 struct dma_async_tx_descriptor *tx,
89 int index)
Dan Williams7405f742007-01-02 11:10:43 -070090{
91 struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
92 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
93
94 pci_unmap_addr_set(desc, src, addr);
95
96 list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
97 iter->hw->src_addr = addr;
98 addr += ioat_chan->xfercap;
99 }
100
101}
102
Shannon Nelson43d6e362007-10-16 01:27:39 -0700103static void ioat_set_dest(dma_addr_t addr,
104 struct dma_async_tx_descriptor *tx,
105 int index)
Dan Williams7405f742007-01-02 11:10:43 -0700106{
107 struct ioat_desc_sw *iter, *desc = tx_to_ioat_desc(tx);
108 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
109
110 pci_unmap_addr_set(desc, dst, addr);
111
112 list_for_each_entry(iter, &desc->async_tx.tx_list, node) {
113 iter->hw->dst_addr = addr;
114 addr += ioat_chan->xfercap;
115 }
116}
117
Shannon Nelson43d6e362007-10-16 01:27:39 -0700118static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
Dan Williams7405f742007-01-02 11:10:43 -0700119{
120 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
121 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
122 int append = 0;
123 dma_cookie_t cookie;
124 struct ioat_desc_sw *group_start;
125
126 group_start = list_entry(desc->async_tx.tx_list.next,
127 struct ioat_desc_sw, node);
128 spin_lock_bh(&ioat_chan->desc_lock);
129 /* cookie incr and addition to used_list must be atomic */
130 cookie = ioat_chan->common.cookie;
131 cookie++;
132 if (cookie < 0)
133 cookie = 1;
134 ioat_chan->common.cookie = desc->async_tx.cookie = cookie;
135
136 /* write address into NextDescriptor field of last desc in chain */
137 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
138 group_start->async_tx.phys;
139 list_splice_init(&desc->async_tx.tx_list, ioat_chan->used_desc.prev);
140
141 ioat_chan->pending += desc->tx_cnt;
142 if (ioat_chan->pending >= 4) {
143 append = 1;
144 ioat_chan->pending = 0;
145 }
146 spin_unlock_bh(&ioat_chan->desc_lock);
147
148 if (append)
149 writeb(IOAT_CHANCMD_APPEND,
150 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
Shannon Nelson1fda5f42007-10-16 01:27:37 -0700151
Dan Williams7405f742007-01-02 11:10:43 -0700152 return cookie;
153}
154
Chris Leech0bbd5f42006-05-23 17:35:34 -0700155static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
Shannon Nelson43d6e362007-10-16 01:27:39 -0700156 struct ioat_dma_chan *ioat_chan,
157 gfp_t flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700158{
159 struct ioat_dma_descriptor *desc;
160 struct ioat_desc_sw *desc_sw;
161 struct ioat_device *ioat_device;
162 dma_addr_t phys;
163
164 ioat_device = to_ioat_device(ioat_chan->common.device);
165 desc = pci_pool_alloc(ioat_device->dma_pool, flags, &phys);
166 if (unlikely(!desc))
167 return NULL;
168
169 desc_sw = kzalloc(sizeof(*desc_sw), flags);
170 if (unlikely(!desc_sw)) {
171 pci_pool_free(ioat_device->dma_pool, desc, phys);
172 return NULL;
173 }
174
175 memset(desc, 0, sizeof(*desc));
Dan Williams7405f742007-01-02 11:10:43 -0700176 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
177 desc_sw->async_tx.tx_set_src = ioat_set_src;
178 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
179 desc_sw->async_tx.tx_submit = ioat_tx_submit;
180 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700181 desc_sw->hw = desc;
Dan Williams7405f742007-01-02 11:10:43 -0700182 desc_sw->async_tx.phys = phys;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700183
184 return desc_sw;
185}
186
Chris Leech0bbd5f42006-05-23 17:35:34 -0700187/* returns the actual number of allocated descriptors */
188static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
189{
190 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
191 struct ioat_desc_sw *desc = NULL;
192 u16 chanctrl;
193 u32 chanerr;
194 int i;
195 LIST_HEAD(tmp_list);
196
Shannon Nelsone4223972007-08-24 23:02:53 -0700197 /* have we already been set up? */
198 if (!list_empty(&ioat_chan->free_desc))
199 return INITIAL_IOAT_DESC_COUNT;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700200
Shannon Nelson43d6e362007-10-16 01:27:39 -0700201 /* Setup register to interrupt and write completion status on error */
Shannon Nelsone4223972007-08-24 23:02:53 -0700202 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
Chris Leech0bbd5f42006-05-23 17:35:34 -0700203 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
204 IOAT_CHANCTRL_ERR_COMPLETION_EN;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700205 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700206
Chris Leeche3828812007-03-08 09:57:35 -0800207 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700208 if (chanerr) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700209 dev_err(&ioat_chan->device->pdev->dev,
210 "ioatdma: CHANERR = %x, clearing\n", chanerr);
Chris Leeche3828812007-03-08 09:57:35 -0800211 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700212 }
213
214 /* Allocate descriptors */
215 for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
216 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
217 if (!desc) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700218 dev_err(&ioat_chan->device->pdev->dev,
219 "ioatdma: Only %d initial descriptors\n", i);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700220 break;
221 }
222 list_add_tail(&desc->node, &tmp_list);
223 }
224 spin_lock_bh(&ioat_chan->desc_lock);
225 list_splice(&tmp_list, &ioat_chan->free_desc);
226 spin_unlock_bh(&ioat_chan->desc_lock);
227
228 /* allocate a completion writeback area */
229 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
230 ioat_chan->completion_virt =
231 pci_pool_alloc(ioat_chan->device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700232 GFP_KERNEL,
233 &ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700234 memset(ioat_chan->completion_virt, 0,
235 sizeof(*ioat_chan->completion_virt));
Chris Leeche3828812007-03-08 09:57:35 -0800236 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
237 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
238 writel(((u64) ioat_chan->completion_addr) >> 32,
239 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700240
Shannon Nelson43d6e362007-10-16 01:27:39 -0700241 ioat_dma_start_null_desc(ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700242 return i;
243}
244
Chris Leech0bbd5f42006-05-23 17:35:34 -0700245static void ioat_dma_free_chan_resources(struct dma_chan *chan)
246{
247 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
248 struct ioat_device *ioat_device = to_ioat_device(chan->device);
249 struct ioat_desc_sw *desc, *_desc;
250 u16 chanctrl;
251 int in_use_descs = 0;
252
253 ioat_dma_memcpy_cleanup(ioat_chan);
254
Chris Leeche3828812007-03-08 09:57:35 -0800255 writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700256
257 spin_lock_bh(&ioat_chan->desc_lock);
258 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
259 in_use_descs++;
260 list_del(&desc->node);
Dan Williams7405f742007-01-02 11:10:43 -0700261 pci_pool_free(ioat_device->dma_pool, desc->hw,
262 desc->async_tx.phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700263 kfree(desc);
264 }
265 list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
266 list_del(&desc->node);
Dan Williams7405f742007-01-02 11:10:43 -0700267 pci_pool_free(ioat_device->dma_pool, desc->hw,
268 desc->async_tx.phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700269 kfree(desc);
270 }
271 spin_unlock_bh(&ioat_chan->desc_lock);
272
273 pci_pool_free(ioat_device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700274 ioat_chan->completion_virt,
275 ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700276
277 /* one is ok since we left it on there on purpose */
278 if (in_use_descs > 1)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700279 dev_err(&ioat_chan->device->pdev->dev,
280 "ioatdma: Freeing %d in use descriptors!\n",
Chris Leech0bbd5f42006-05-23 17:35:34 -0700281 in_use_descs - 1);
282
283 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700284}
285
Shannon Nelson43d6e362007-10-16 01:27:39 -0700286static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
287 struct dma_chan *chan,
288 size_t len,
289 int int_en)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700290{
Dan Williams7405f742007-01-02 11:10:43 -0700291 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
292 struct ioat_desc_sw *first, *prev, *new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700293 LIST_HEAD(new_chain);
294 u32 copy;
295 size_t orig_len;
Dan Williams7405f742007-01-02 11:10:43 -0700296 int desc_count = 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700297
298 if (!len)
Dan Williams7405f742007-01-02 11:10:43 -0700299 return NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700300
301 orig_len = len;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700302
303 first = NULL;
304 prev = NULL;
305
306 spin_lock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700307 while (len) {
308 if (!list_empty(&ioat_chan->free_desc)) {
309 new = to_ioat_desc(ioat_chan->free_desc.next);
310 list_del(&new->node);
311 } else {
312 /* try to get another desc */
313 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
314 /* will this ever happen? */
315 /* TODO add upper limit on these */
316 BUG_ON(!new);
317 }
318
319 copy = min((u32) len, ioat_chan->xfercap);
320
321 new->hw->size = copy;
322 new->hw->ctl = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700323 new->async_tx.cookie = 0;
324 new->async_tx.ack = 1;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700325
326 /* chain together the physical address list for the HW */
327 if (!first)
328 first = new;
329 else
Dan Williams7405f742007-01-02 11:10:43 -0700330 prev->hw->next = (u64) new->async_tx.phys;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700331
332 prev = new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700333 len -= copy;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700334 list_add_tail(&new->node, &new_chain);
335 desc_count++;
336 }
Dan Williams7405f742007-01-02 11:10:43 -0700337
338 list_splice(&new_chain, &new->async_tx.tx_list);
339
Chris Leech0bbd5f42006-05-23 17:35:34 -0700340 new->hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
341 new->hw->next = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700342 new->tx_cnt = desc_count;
343 new->async_tx.ack = 0; /* client is in control of this ack */
344 new->async_tx.cookie = -EBUSY;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700345
Shannon Nelson54a09fe2007-08-14 17:36:31 -0700346 pci_unmap_len_set(new, len, orig_len);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700347 spin_unlock_bh(&ioat_chan->desc_lock);
348
Dan Williams7405f742007-01-02 11:10:43 -0700349 return new ? &new->async_tx : NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700350}
351
Chris Leech0bbd5f42006-05-23 17:35:34 -0700352/**
Shannon Nelson43d6e362007-10-16 01:27:39 -0700353 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
354 * descriptors to hw
Chris Leech0bbd5f42006-05-23 17:35:34 -0700355 * @chan: DMA channel handle
356 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700357static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
358{
359 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
360
361 if (ioat_chan->pending != 0) {
362 ioat_chan->pending = 0;
Chris Leeche3828812007-03-08 09:57:35 -0800363 writeb(IOAT_CHANCMD_APPEND,
364 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700365 }
366}
367
Shannon Nelson43d6e362007-10-16 01:27:39 -0700368static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700369{
370 unsigned long phys_complete;
371 struct ioat_desc_sw *desc, *_desc;
372 dma_cookie_t cookie = 0;
373
Shannon Nelson43d6e362007-10-16 01:27:39 -0700374 prefetch(ioat_chan->completion_virt);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700375
Shannon Nelson43d6e362007-10-16 01:27:39 -0700376 if (!spin_trylock(&ioat_chan->cleanup_lock))
Chris Leech0bbd5f42006-05-23 17:35:34 -0700377 return;
378
379 /* The completion writeback can happen at any time,
380 so reads by the driver need to be atomic operations
381 The descriptor physical addresses are limited to 32-bits
382 when the CPU can only do a 32-bit mov */
383
384#if (BITS_PER_LONG == 64)
385 phys_complete =
Shannon Nelson43d6e362007-10-16 01:27:39 -0700386 ioat_chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700387#else
Shannon Nelson43d6e362007-10-16 01:27:39 -0700388 phys_complete = ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700389#endif
390
Shannon Nelson43d6e362007-10-16 01:27:39 -0700391 if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
392 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
393 dev_err(&ioat_chan->device->pdev->dev,
394 "ioatdma: Channel halted, chanerr = %x\n",
395 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
Chris Leech0bbd5f42006-05-23 17:35:34 -0700396
397 /* TODO do something to salvage the situation */
398 }
399
Shannon Nelson43d6e362007-10-16 01:27:39 -0700400 if (phys_complete == ioat_chan->last_completion) {
401 spin_unlock(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700402 return;
403 }
404
Shannon Nelson43d6e362007-10-16 01:27:39 -0700405 spin_lock_bh(&ioat_chan->desc_lock);
406 list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700407
408 /*
409 * Incoming DMA requests may use multiple descriptors, due to
410 * exceeding xfercap, perhaps. If so, only the last one will
411 * have a cookie, and require unmapping.
412 */
Dan Williams7405f742007-01-02 11:10:43 -0700413 if (desc->async_tx.cookie) {
414 cookie = desc->async_tx.cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700415
Shannon Nelson43d6e362007-10-16 01:27:39 -0700416 /*
417 * yes we are unmapping both _page and _single alloc'd
418 * regions with unmap_page. Is this *really* that bad?
419 */
420 pci_unmap_page(ioat_chan->device->pdev,
Chris Leech0bbd5f42006-05-23 17:35:34 -0700421 pci_unmap_addr(desc, dst),
Shannon Nelson54a09fe2007-08-14 17:36:31 -0700422 pci_unmap_len(desc, len),
Chris Leech0bbd5f42006-05-23 17:35:34 -0700423 PCI_DMA_FROMDEVICE);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700424 pci_unmap_page(ioat_chan->device->pdev,
Chris Leech0bbd5f42006-05-23 17:35:34 -0700425 pci_unmap_addr(desc, src),
Shannon Nelson54a09fe2007-08-14 17:36:31 -0700426 pci_unmap_len(desc, len),
Chris Leech0bbd5f42006-05-23 17:35:34 -0700427 PCI_DMA_TODEVICE);
428 }
429
Dan Williams7405f742007-01-02 11:10:43 -0700430 if (desc->async_tx.phys != phys_complete) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700431 /*
432 * a completed entry, but not the last, so cleanup
Dan Williams7405f742007-01-02 11:10:43 -0700433 * if the client is done with the descriptor
434 */
435 if (desc->async_tx.ack) {
436 list_del(&desc->node);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700437 list_add_tail(&desc->node,
438 &ioat_chan->free_desc);
Dan Williams7405f742007-01-02 11:10:43 -0700439 } else
440 desc->async_tx.cookie = 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700441 } else {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700442 /*
443 * last used desc. Do not remove, so we can append from
444 * it, but don't look at it next time, either
445 */
Dan Williams7405f742007-01-02 11:10:43 -0700446 desc->async_tx.cookie = 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700447
448 /* TODO check status bits? */
449 break;
450 }
451 }
452
Shannon Nelson43d6e362007-10-16 01:27:39 -0700453 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700454
Shannon Nelson43d6e362007-10-16 01:27:39 -0700455 ioat_chan->last_completion = phys_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700456 if (cookie != 0)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700457 ioat_chan->completed_cookie = cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700458
Shannon Nelson43d6e362007-10-16 01:27:39 -0700459 spin_unlock(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700460}
461
Dan Williams7405f742007-01-02 11:10:43 -0700462static void ioat_dma_dependency_added(struct dma_chan *chan)
463{
464 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
465 spin_lock_bh(&ioat_chan->desc_lock);
466 if (ioat_chan->pending == 0) {
467 spin_unlock_bh(&ioat_chan->desc_lock);
468 ioat_dma_memcpy_cleanup(ioat_chan);
469 } else
470 spin_unlock_bh(&ioat_chan->desc_lock);
471}
472
Chris Leech0bbd5f42006-05-23 17:35:34 -0700473/**
474 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
475 * @chan: IOAT DMA channel handle
476 * @cookie: DMA transaction identifier
Randy Dunlap65088712006-07-03 19:45:31 -0700477 * @done: if not %NULL, updated with last completed transaction
478 * @used: if not %NULL, updated with last used transaction
Chris Leech0bbd5f42006-05-23 17:35:34 -0700479 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700480static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700481 dma_cookie_t cookie,
482 dma_cookie_t *done,
483 dma_cookie_t *used)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700484{
485 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
486 dma_cookie_t last_used;
487 dma_cookie_t last_complete;
488 enum dma_status ret;
489
490 last_used = chan->cookie;
491 last_complete = ioat_chan->completed_cookie;
492
493 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700494 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700495 if (used)
496 *used = last_used;
497
498 ret = dma_async_is_complete(cookie, last_complete, last_used);
499 if (ret == DMA_SUCCESS)
500 return ret;
501
502 ioat_dma_memcpy_cleanup(ioat_chan);
503
504 last_used = chan->cookie;
505 last_complete = ioat_chan->completed_cookie;
506
507 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700508 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700509 if (used)
510 *used = last_used;
511
512 return dma_async_is_complete(cookie, last_complete, last_used);
513}
514
515/* PCI API */
516
517static struct pci_device_id ioat_pci_tbl[] = {
518 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
Shannon Nelson223758c2007-10-16 01:27:37 -0700519 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
520 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
521 { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
Chris Leech0bbd5f42006-05-23 17:35:34 -0700522 { 0, }
523};
524
Randy Dunlap92504f72007-06-27 14:09:56 -0700525static struct pci_driver ioat_pci_driver = {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700526 .name = "ioatdma",
527 .id_table = ioat_pci_tbl,
528 .probe = ioat_probe,
Dan Aloni428ed602007-03-08 09:57:36 -0800529 .shutdown = ioat_shutdown,
Chris Leech0bbd5f42006-05-23 17:35:34 -0700530 .remove = __devexit_p(ioat_remove),
531};
532
David Howells7d12e782006-10-05 14:55:46 +0100533static irqreturn_t ioat_do_interrupt(int irq, void *data)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700534{
535 struct ioat_device *instance = data;
536 unsigned long attnstatus;
537 u8 intrctrl;
538
Chris Leeche3828812007-03-08 09:57:35 -0800539 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700540
541 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
542 return IRQ_NONE;
543
544 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
Chris Leeche3828812007-03-08 09:57:35 -0800545 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700546 return IRQ_NONE;
547 }
548
Chris Leeche3828812007-03-08 09:57:35 -0800549 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700550
Shannon Nelson43d6e362007-10-16 01:27:39 -0700551 printk(KERN_ERR "ioatdma: interrupt! status %lx\n", attnstatus);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700552
Chris Leeche3828812007-03-08 09:57:35 -0800553 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700554 return IRQ_HANDLED;
555}
556
Shannon Nelson43d6e362007-10-16 01:27:39 -0700557static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700558{
559 struct ioat_desc_sw *desc;
560
561 spin_lock_bh(&ioat_chan->desc_lock);
562
563 if (!list_empty(&ioat_chan->free_desc)) {
564 desc = to_ioat_desc(ioat_chan->free_desc.next);
565 list_del(&desc->node);
566 } else {
567 /* try to get another desc */
568 spin_unlock_bh(&ioat_chan->desc_lock);
569 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
570 spin_lock_bh(&ioat_chan->desc_lock);
571 /* will this ever happen? */
572 BUG_ON(!desc);
573 }
574
575 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
576 desc->hw->next = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700577 desc->async_tx.ack = 1;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700578
579 list_add_tail(&desc->node, &ioat_chan->used_desc);
580 spin_unlock_bh(&ioat_chan->desc_lock);
581
Dan Williams7405f742007-01-02 11:10:43 -0700582 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
Chris Leeche3828812007-03-08 09:57:35 -0800583 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
Dan Williams7405f742007-01-02 11:10:43 -0700584 writel(((u64) desc->async_tx.phys) >> 32,
Chris Leech70774b42007-03-08 09:57:35 -0800585 ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
586
Chris Leeche3828812007-03-08 09:57:35 -0800587 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700588}
589
590/*
591 * Perform a IOAT transaction to verify the HW works.
592 */
593#define IOAT_TEST_SIZE 2000
594
595static int ioat_self_test(struct ioat_device *device)
596{
597 int i;
598 u8 *src;
599 u8 *dest;
600 struct dma_chan *dma_chan;
Dan Williams7405f742007-01-02 11:10:43 -0700601 struct dma_async_tx_descriptor *tx;
602 dma_addr_t addr;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700603 dma_cookie_t cookie;
604 int err = 0;
605
Christoph Lametere94b1762006-12-06 20:33:17 -0800606 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700607 if (!src)
608 return -ENOMEM;
Christoph Lametere94b1762006-12-06 20:33:17 -0800609 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700610 if (!dest) {
611 kfree(src);
612 return -ENOMEM;
613 }
614
615 /* Fill in src buffer */
616 for (i = 0; i < IOAT_TEST_SIZE; i++)
617 src[i] = (u8)i;
618
619 /* Start copy, using first DMA channel */
620 dma_chan = container_of(device->common.channels.next,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700621 struct dma_chan,
622 device_node);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700623 if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700624 dev_err(&device->pdev->dev,
625 "selftest cannot allocate chan resource\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700626 err = -ENODEV;
627 goto out;
628 }
629
Dan Williams7405f742007-01-02 11:10:43 -0700630 tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
631 async_tx_ack(tx);
632 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
633 DMA_TO_DEVICE);
634 ioat_set_src(addr, tx, 0);
635 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
636 DMA_FROM_DEVICE);
637 ioat_set_dest(addr, tx, 0);
638 cookie = ioat_tx_submit(tx);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700639 ioat_dma_memcpy_issue_pending(dma_chan);
640 msleep(1);
641
642 if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700643 dev_err(&device->pdev->dev,
644 "ioatdma: Self-test copy timed out, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700645 err = -ENODEV;
646 goto free_resources;
647 }
648 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700649 dev_err(&device->pdev->dev,
650 "ioatdma: Self-test copy failed compare, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700651 err = -ENODEV;
652 goto free_resources;
653 }
654
655free_resources:
656 ioat_dma_free_chan_resources(dma_chan);
657out:
658 kfree(src);
659 kfree(dest);
660 return err;
661}
662
663static int __devinit ioat_probe(struct pci_dev *pdev,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700664 const struct pci_device_id *ent)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700665{
666 int err;
667 unsigned long mmio_start, mmio_len;
Al Viro47b16532006-10-10 22:45:47 +0100668 void __iomem *reg_base;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700669 struct ioat_device *device;
670
671 err = pci_enable_device(pdev);
672 if (err)
673 goto err_enable_device;
674
675 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
676 if (err)
677 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
678 if (err)
679 goto err_set_dma_mask;
680
Randy Dunlap92504f72007-06-27 14:09:56 -0700681 err = pci_request_regions(pdev, ioat_pci_driver.name);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700682 if (err)
683 goto err_request_regions;
684
685 mmio_start = pci_resource_start(pdev, 0);
686 mmio_len = pci_resource_len(pdev, 0);
687
688 reg_base = ioremap(mmio_start, mmio_len);
689 if (!reg_base) {
690 err = -ENOMEM;
691 goto err_ioremap;
692 }
693
694 device = kzalloc(sizeof(*device), GFP_KERNEL);
695 if (!device) {
696 err = -ENOMEM;
697 goto err_kzalloc;
698 }
699
700 /* DMA coherent memory pool for DMA descriptor allocations */
701 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
702 sizeof(struct ioat_dma_descriptor), 64, 0);
703 if (!device->dma_pool) {
704 err = -ENOMEM;
705 goto err_dma_pool;
706 }
707
Shannon Nelson43d6e362007-10-16 01:27:39 -0700708 device->completion_pool = pci_pool_create("completion_pool", pdev,
709 sizeof(u64), SMP_CACHE_BYTES,
710 SMP_CACHE_BYTES);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700711 if (!device->completion_pool) {
712 err = -ENOMEM;
713 goto err_completion_pool;
714 }
715
716 device->pdev = pdev;
717 pci_set_drvdata(pdev, device);
718#ifdef CONFIG_PCI_MSI
719 if (pci_enable_msi(pdev) == 0) {
720 device->msi = 1;
721 } else {
722 device->msi = 0;
723 }
724#endif
Thomas Gleixnerdace1452006-07-01 19:29:38 -0700725 err = request_irq(pdev->irq, &ioat_do_interrupt, IRQF_SHARED, "ioat",
Chris Leech0bbd5f42006-05-23 17:35:34 -0700726 device);
727 if (err)
728 goto err_irq;
729
730 device->reg_base = reg_base;
731
Shannon Nelson43d6e362007-10-16 01:27:39 -0700732 writeb(IOAT_INTRCTRL_MASTER_INT_EN,
733 device->reg_base + IOAT_INTRCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700734 pci_set_master(pdev);
735
736 INIT_LIST_HEAD(&device->common.channels);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700737 ioat_dma_enumerate_channels(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700738
Dan Williams7405f742007-01-02 11:10:43 -0700739 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700740 device->common.device_alloc_chan_resources =
741 ioat_dma_alloc_chan_resources;
742 device->common.device_free_chan_resources =
743 ioat_dma_free_chan_resources;
Dan Williams7405f742007-01-02 11:10:43 -0700744 device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
745 device->common.device_is_tx_complete = ioat_dma_is_complete;
746 device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
747 device->common.device_dependency_added = ioat_dma_dependency_added;
748 device->common.dev = &pdev->dev;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700749 printk(KERN_INFO
750 "ioatdma: Intel(R) I/OAT DMA Engine found, %d channels\n",
751 device->common.chancnt);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700752
753 err = ioat_self_test(device);
754 if (err)
755 goto err_self_test;
756
757 dma_async_device_register(&device->common);
758
759 return 0;
760
761err_self_test:
762err_irq:
763 pci_pool_destroy(device->completion_pool);
764err_completion_pool:
765 pci_pool_destroy(device->dma_pool);
766err_dma_pool:
767 kfree(device);
768err_kzalloc:
769 iounmap(reg_base);
770err_ioremap:
771 pci_release_regions(pdev);
772err_request_regions:
773err_set_dma_mask:
774 pci_disable_device(pdev);
775err_enable_device:
Dan Aloni428ed602007-03-08 09:57:36 -0800776
Shannon Nelson43d6e362007-10-16 01:27:39 -0700777 printk(KERN_INFO
778 "ioatdma: Intel(R) I/OAT DMA Engine initialization failed\n");
Dan Aloni428ed602007-03-08 09:57:36 -0800779
Chris Leech0bbd5f42006-05-23 17:35:34 -0700780 return err;
781}
782
Dan Aloni428ed602007-03-08 09:57:36 -0800783static void ioat_shutdown(struct pci_dev *pdev)
784{
785 struct ioat_device *device;
786 device = pci_get_drvdata(pdev);
787
788 dma_async_device_unregister(&device->common);
789}
790
Chris Leech0bbd5f42006-05-23 17:35:34 -0700791static void __devexit ioat_remove(struct pci_dev *pdev)
792{
793 struct ioat_device *device;
794 struct dma_chan *chan, *_chan;
795 struct ioat_dma_chan *ioat_chan;
796
797 device = pci_get_drvdata(pdev);
798 dma_async_device_unregister(&device->common);
799
800 free_irq(device->pdev->irq, device);
801#ifdef CONFIG_PCI_MSI
802 if (device->msi)
803 pci_disable_msi(device->pdev);
804#endif
805 pci_pool_destroy(device->dma_pool);
806 pci_pool_destroy(device->completion_pool);
807 iounmap(device->reg_base);
808 pci_release_regions(pdev);
809 pci_disable_device(pdev);
Shannon Nelson43d6e362007-10-16 01:27:39 -0700810 list_for_each_entry_safe(chan, _chan,
811 &device->common.channels, device_node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700812 ioat_chan = to_ioat_chan(chan);
813 list_del(&chan->device_node);
814 kfree(ioat_chan);
815 }
816 kfree(device);
817}
818
819/* MODULE API */
Chris Leech000725d2007-03-08 09:57:33 -0800820MODULE_VERSION("1.9");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700821MODULE_LICENSE("GPL");
822MODULE_AUTHOR("Intel Corporation");
823
824static int __init ioat_init_module(void)
825{
826 /* it's currently unsafe to unload this module */
827 /* if forced, worst case is that rmmod hangs */
David S. Miller8070b2b2006-06-26 00:10:46 -0700828 __unsafe(THIS_MODULE);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700829
Randy Dunlap92504f72007-06-27 14:09:56 -0700830 return pci_register_driver(&ioat_pci_driver);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700831}
832
833module_init(ioat_init_module);
834
835static void __exit ioat_exit_module(void)
836{
Randy Dunlap92504f72007-06-27 14:09:56 -0700837 pci_unregister_driver(&ioat_pci_driver);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700838}
839
840module_exit(ioat_exit_module);