blob: c1c2dcc6fc2eea413ed8367a4e839b93a7f2362e [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
Shannon Nelson43d6e362007-10-16 01:27:39 -070015 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070021 */
22
23/*
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25 * copy operations.
26 */
27
28#include <linux/init.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/interrupt.h>
32#include <linux/dmaengine.h>
33#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070034#include <linux/dma-mapping.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -070035#include "ioatdma.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070036#include "ioatdma_registers.h"
37#include "ioatdma_hw.h"
38
39#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
Shannon Nelson8ab89562007-10-16 01:27:39 -070040#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
Chris Leech0bbd5f42006-05-23 17:35:34 -070041#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
Dan Williams7405f742007-01-02 11:10:43 -070042#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
Chris Leech0bbd5f42006-05-23 17:35:34 -070043
Shannon Nelson7bb67c12007-11-14 16:59:51 -080044static int ioat_pending_level = 4;
45module_param(ioat_pending_level, int, 0644);
46MODULE_PARM_DESC(ioat_pending_level,
47 "high-water mark for pushing ioat descriptors (default: 4)");
48
Chris Leech0bbd5f42006-05-23 17:35:34 -070049/* internal functions */
Shannon Nelson43d6e362007-10-16 01:27:39 -070050static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
Shannon Nelson7bb67c12007-11-14 16:59:51 -080052
Shannon Nelson7f2b2912007-10-18 03:07:14 -070053static struct ioat_desc_sw *
Shannon Nelson7bb67c12007-11-14 16:59:51 -080054ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55static struct ioat_desc_sw *
56ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -070057
Shannon Nelson7f2b2912007-10-18 03:07:14 -070058static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
59 struct ioatdma_device *device,
60 int index)
Shannon Nelson3e037452007-10-16 01:27:40 -070061{
62 return device->idx[index];
63}
64
65/**
66 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
67 * @irq: interrupt id
68 * @data: interrupt data
69 */
70static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
71{
72 struct ioatdma_device *instance = data;
73 struct ioat_dma_chan *ioat_chan;
74 unsigned long attnstatus;
75 int bit;
76 u8 intrctrl;
77
78 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
79
80 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
81 return IRQ_NONE;
82
83 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
84 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
85 return IRQ_NONE;
86 }
87
88 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
89 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
90 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
91 tasklet_schedule(&ioat_chan->cleanup_task);
92 }
93
94 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
95 return IRQ_HANDLED;
96}
97
98/**
99 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
100 * @irq: interrupt id
101 * @data: interrupt data
102 */
103static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
104{
105 struct ioat_dma_chan *ioat_chan = data;
106
107 tasklet_schedule(&ioat_chan->cleanup_task);
108
109 return IRQ_HANDLED;
110}
111
112static void ioat_dma_cleanup_tasklet(unsigned long data);
113
114/**
115 * ioat_dma_enumerate_channels - find and initialize the device's channels
116 * @device: the device to be enumerated
117 */
Shannon Nelson8ab89562007-10-16 01:27:39 -0700118static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700119{
120 u8 xfercap_scale;
121 u32 xfercap;
122 int i;
123 struct ioat_dma_chan *ioat_chan;
124
Chris Leeche3828812007-03-08 09:57:35 -0800125 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700127 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
128
129 for (i = 0; i < device->common.chancnt; i++) {
130 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
131 if (!ioat_chan) {
132 device->common.chancnt = i;
133 break;
134 }
135
136 ioat_chan->device = device;
137 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138 ioat_chan->xfercap = xfercap;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800139 ioat_chan->desccount = 0;
140 if (ioat_chan->device->version != IOAT_VER_1_2) {
141 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142 | IOAT_DMA_DCA_ANY_CPU,
143 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
144 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700145 spin_lock_init(&ioat_chan->cleanup_lock);
146 spin_lock_init(&ioat_chan->desc_lock);
147 INIT_LIST_HEAD(&ioat_chan->free_desc);
148 INIT_LIST_HEAD(&ioat_chan->used_desc);
149 /* This should be made common somewhere in dmaengine.c */
150 ioat_chan->common.device = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700151 list_add_tail(&ioat_chan->common.device_node,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700152 &device->common.channels);
Shannon Nelson3e037452007-10-16 01:27:40 -0700153 device->idx[i] = ioat_chan;
154 tasklet_init(&ioat_chan->cleanup_task,
155 ioat_dma_cleanup_tasklet,
156 (unsigned long) ioat_chan);
157 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700158 }
159 return device->common.chancnt;
160}
161
Shannon Nelson43d6e362007-10-16 01:27:39 -0700162static void ioat_set_src(dma_addr_t addr,
163 struct dma_async_tx_descriptor *tx,
164 int index)
Dan Williams7405f742007-01-02 11:10:43 -0700165{
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700166 tx_to_ioat_desc(tx)->src = addr;
Dan Williams7405f742007-01-02 11:10:43 -0700167}
168
Shannon Nelson43d6e362007-10-16 01:27:39 -0700169static void ioat_set_dest(dma_addr_t addr,
170 struct dma_async_tx_descriptor *tx,
171 int index)
Dan Williams7405f742007-01-02 11:10:43 -0700172{
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700173 tx_to_ioat_desc(tx)->dst = addr;
Dan Williams7405f742007-01-02 11:10:43 -0700174}
175
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800176static inline void __ioat1_dma_memcpy_issue_pending(
177 struct ioat_dma_chan *ioat_chan);
178static inline void __ioat2_dma_memcpy_issue_pending(
179 struct ioat_dma_chan *ioat_chan);
180
181static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
Dan Williams7405f742007-01-02 11:10:43 -0700182{
183 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700184 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
185 struct ioat_desc_sw *prev, *new;
186 struct ioat_dma_descriptor *hw;
Dan Williams7405f742007-01-02 11:10:43 -0700187 dma_cookie_t cookie;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700188 LIST_HEAD(new_chain);
189 u32 copy;
190 size_t len;
191 dma_addr_t src, dst;
192 int orig_ack;
193 unsigned int desc_count = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700194
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700195 /* src and dest and len are stored in the initial descriptor */
196 len = first->len;
197 src = first->src;
198 dst = first->dst;
199 orig_ack = first->async_tx.ack;
200 new = first;
201
Dan Williams7405f742007-01-02 11:10:43 -0700202 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700203 prev = to_ioat_desc(ioat_chan->used_desc.prev);
204 prefetch(prev->hw);
205 do {
206 copy = min((u32) len, ioat_chan->xfercap);
207
208 new->async_tx.ack = 1;
209
210 hw = new->hw;
211 hw->size = copy;
212 hw->ctl = 0;
213 hw->src_addr = src;
214 hw->dst_addr = dst;
215 hw->next = 0;
216
217 /* chain together the physical address list for the HW */
218 wmb();
219 prev->hw->next = (u64) new->async_tx.phys;
220
221 len -= copy;
222 dst += copy;
223 src += copy;
224
225 list_add_tail(&new->node, &new_chain);
226 desc_count++;
227 prev = new;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800228 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700229
230 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
Shannon Nelson95218432007-10-18 03:07:15 -0700231 if (new->async_tx.callback) {
232 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
233 if (first != new) {
234 /* move callback into to last desc */
235 new->async_tx.callback = first->async_tx.callback;
236 new->async_tx.callback_param
237 = first->async_tx.callback_param;
238 first->async_tx.callback = NULL;
239 first->async_tx.callback_param = NULL;
240 }
241 }
242
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700243 new->tx_cnt = desc_count;
244 new->async_tx.ack = orig_ack; /* client is in control of this ack */
245
246 /* store the original values for use in later cleanup */
247 if (new != first) {
248 new->src = first->src;
249 new->dst = first->dst;
250 new->len = first->len;
251 }
252
Dan Williams7405f742007-01-02 11:10:43 -0700253 /* cookie incr and addition to used_list must be atomic */
254 cookie = ioat_chan->common.cookie;
255 cookie++;
256 if (cookie < 0)
257 cookie = 1;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700258 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
Dan Williams7405f742007-01-02 11:10:43 -0700259
260 /* write address into NextDescriptor field of last desc in chain */
261 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700262 first->async_tx.phys;
263 __list_splice(&new_chain, ioat_chan->used_desc.prev);
Dan Williams7405f742007-01-02 11:10:43 -0700264
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800265 ioat_chan->dmacount += desc_count;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700266 ioat_chan->pending += desc_count;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800267 if (ioat_chan->pending >= ioat_pending_level)
268 __ioat1_dma_memcpy_issue_pending(ioat_chan);
Dan Williams7405f742007-01-02 11:10:43 -0700269 spin_unlock_bh(&ioat_chan->desc_lock);
270
Dan Williams7405f742007-01-02 11:10:43 -0700271 return cookie;
272}
273
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800274static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
275{
276 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
277 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
278 struct ioat_desc_sw *new;
279 struct ioat_dma_descriptor *hw;
280 dma_cookie_t cookie;
281 u32 copy;
282 size_t len;
283 dma_addr_t src, dst;
284 int orig_ack;
285 unsigned int desc_count = 0;
286
287 /* src and dest and len are stored in the initial descriptor */
288 len = first->len;
289 src = first->src;
290 dst = first->dst;
291 orig_ack = first->async_tx.ack;
292 new = first;
293
294 /* ioat_chan->desc_lock is still in force in version 2 path */
295
296 do {
297 copy = min((u32) len, ioat_chan->xfercap);
298
299 new->async_tx.ack = 1;
300
301 hw = new->hw;
302 hw->size = copy;
303 hw->ctl = 0;
304 hw->src_addr = src;
305 hw->dst_addr = dst;
306
307 len -= copy;
308 dst += copy;
309 src += copy;
310 desc_count++;
311 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
312
313 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
314 if (new->async_tx.callback) {
315 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
316 if (first != new) {
317 /* move callback into to last desc */
318 new->async_tx.callback = first->async_tx.callback;
319 new->async_tx.callback_param
320 = first->async_tx.callback_param;
321 first->async_tx.callback = NULL;
322 first->async_tx.callback_param = NULL;
323 }
324 }
325
326 new->tx_cnt = desc_count;
327 new->async_tx.ack = orig_ack; /* client is in control of this ack */
328
329 /* store the original values for use in later cleanup */
330 if (new != first) {
331 new->src = first->src;
332 new->dst = first->dst;
333 new->len = first->len;
334 }
335
336 /* cookie incr and addition to used_list must be atomic */
337 cookie = ioat_chan->common.cookie;
338 cookie++;
339 if (cookie < 0)
340 cookie = 1;
341 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
342
343 ioat_chan->dmacount += desc_count;
344 ioat_chan->pending += desc_count;
345 if (ioat_chan->pending >= ioat_pending_level)
346 __ioat2_dma_memcpy_issue_pending(ioat_chan);
347 spin_unlock_bh(&ioat_chan->desc_lock);
348
349 return cookie;
350}
351
352/**
353 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
354 * @ioat_chan: the channel supplying the memory pool for the descriptors
355 * @flags: allocation flags
356 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700357static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
Shannon Nelson43d6e362007-10-16 01:27:39 -0700358 struct ioat_dma_chan *ioat_chan,
359 gfp_t flags)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700360{
361 struct ioat_dma_descriptor *desc;
362 struct ioat_desc_sw *desc_sw;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700363 struct ioatdma_device *ioatdma_device;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700364 dma_addr_t phys;
365
Shannon Nelson8ab89562007-10-16 01:27:39 -0700366 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
367 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700368 if (unlikely(!desc))
369 return NULL;
370
371 desc_sw = kzalloc(sizeof(*desc_sw), flags);
372 if (unlikely(!desc_sw)) {
Shannon Nelson8ab89562007-10-16 01:27:39 -0700373 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700374 return NULL;
375 }
376
377 memset(desc, 0, sizeof(*desc));
Dan Williams7405f742007-01-02 11:10:43 -0700378 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
379 desc_sw->async_tx.tx_set_src = ioat_set_src;
380 desc_sw->async_tx.tx_set_dest = ioat_set_dest;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800381 switch (ioat_chan->device->version) {
382 case IOAT_VER_1_2:
383 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
384 break;
385 case IOAT_VER_2_0:
386 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
387 break;
388 }
Dan Williams7405f742007-01-02 11:10:43 -0700389 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800390
Chris Leech0bbd5f42006-05-23 17:35:34 -0700391 desc_sw->hw = desc;
Dan Williams7405f742007-01-02 11:10:43 -0700392 desc_sw->async_tx.phys = phys;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700393
394 return desc_sw;
395}
396
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800397static int ioat_initial_desc_count = 256;
398module_param(ioat_initial_desc_count, int, 0644);
399MODULE_PARM_DESC(ioat_initial_desc_count,
400 "initial descriptors per channel (default: 256)");
401
402/**
403 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
404 * @ioat_chan: the channel to be massaged
405 */
406static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
407{
408 struct ioat_desc_sw *desc, *_desc;
409
410 /* setup used_desc */
411 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
412 ioat_chan->used_desc.prev = NULL;
413
414 /* pull free_desc out of the circle so that every node is a hw
415 * descriptor, but leave it pointing to the list
416 */
417 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
418 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
419
420 /* circle link the hw descriptors */
421 desc = to_ioat_desc(ioat_chan->free_desc.next);
422 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
423 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
424 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
425 }
426}
427
428/**
429 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
430 * @chan: the channel to be filled out
431 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700432static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
433{
434 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
435 struct ioat_desc_sw *desc = NULL;
436 u16 chanctrl;
437 u32 chanerr;
438 int i;
439 LIST_HEAD(tmp_list);
440
Shannon Nelsone4223972007-08-24 23:02:53 -0700441 /* have we already been set up? */
442 if (!list_empty(&ioat_chan->free_desc))
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800443 return ioat_chan->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700444
Shannon Nelson43d6e362007-10-16 01:27:39 -0700445 /* Setup register to interrupt and write completion status on error */
Shannon Nelsone4223972007-08-24 23:02:53 -0700446 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
Chris Leech0bbd5f42006-05-23 17:35:34 -0700447 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
448 IOAT_CHANCTRL_ERR_COMPLETION_EN;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700449 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700450
Chris Leeche3828812007-03-08 09:57:35 -0800451 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700452 if (chanerr) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700453 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700454 "CHANERR = %x, clearing\n", chanerr);
Chris Leeche3828812007-03-08 09:57:35 -0800455 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700456 }
457
458 /* Allocate descriptors */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800459 for (i = 0; i < ioat_initial_desc_count; i++) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700460 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
461 if (!desc) {
Shannon Nelson43d6e362007-10-16 01:27:39 -0700462 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700463 "Only %d initial descriptors\n", i);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700464 break;
465 }
466 list_add_tail(&desc->node, &tmp_list);
467 }
468 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800469 ioat_chan->desccount = i;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700470 list_splice(&tmp_list, &ioat_chan->free_desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800471 if (ioat_chan->device->version != IOAT_VER_1_2)
472 ioat2_dma_massage_chan_desc(ioat_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700473 spin_unlock_bh(&ioat_chan->desc_lock);
474
475 /* allocate a completion writeback area */
476 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
477 ioat_chan->completion_virt =
478 pci_pool_alloc(ioat_chan->device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700479 GFP_KERNEL,
480 &ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700481 memset(ioat_chan->completion_virt, 0,
482 sizeof(*ioat_chan->completion_virt));
Chris Leeche3828812007-03-08 09:57:35 -0800483 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
484 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
485 writel(((u64) ioat_chan->completion_addr) >> 32,
486 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700487
Shannon Nelson3e037452007-10-16 01:27:40 -0700488 tasklet_enable(&ioat_chan->cleanup_task);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800489 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
490 return ioat_chan->desccount;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700491}
492
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800493/**
494 * ioat_dma_free_chan_resources - release all the descriptors
495 * @chan: the channel to be cleaned
496 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700497static void ioat_dma_free_chan_resources(struct dma_chan *chan)
498{
499 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700500 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700501 struct ioat_desc_sw *desc, *_desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700502 int in_use_descs = 0;
503
Shannon Nelson3e037452007-10-16 01:27:40 -0700504 tasklet_disable(&ioat_chan->cleanup_task);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700505 ioat_dma_memcpy_cleanup(ioat_chan);
506
Shannon Nelson3e037452007-10-16 01:27:40 -0700507 /* Delay 100ms after reset to allow internal DMA logic to quiesce
508 * before removing DMA descriptor resources.
509 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800510 writeb(IOAT_CHANCMD_RESET,
511 ioat_chan->reg_base
512 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
Shannon Nelson3e037452007-10-16 01:27:40 -0700513 mdelay(100);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700514
515 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800516 switch (ioat_chan->device->version) {
517 case IOAT_VER_1_2:
518 list_for_each_entry_safe(desc, _desc,
519 &ioat_chan->used_desc, node) {
520 in_use_descs++;
521 list_del(&desc->node);
522 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
523 desc->async_tx.phys);
524 kfree(desc);
525 }
526 list_for_each_entry_safe(desc, _desc,
527 &ioat_chan->free_desc, node) {
528 list_del(&desc->node);
529 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
530 desc->async_tx.phys);
531 kfree(desc);
532 }
533 break;
534 case IOAT_VER_2_0:
535 list_for_each_entry_safe(desc, _desc,
536 ioat_chan->free_desc.next, node) {
537 list_del(&desc->node);
538 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
539 desc->async_tx.phys);
540 kfree(desc);
541 }
542 desc = to_ioat_desc(ioat_chan->free_desc.next);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700543 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
Dan Williams7405f742007-01-02 11:10:43 -0700544 desc->async_tx.phys);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700545 kfree(desc);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800546 INIT_LIST_HEAD(&ioat_chan->free_desc);
547 INIT_LIST_HEAD(&ioat_chan->used_desc);
548 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700549 }
550 spin_unlock_bh(&ioat_chan->desc_lock);
551
Shannon Nelson8ab89562007-10-16 01:27:39 -0700552 pci_pool_free(ioatdma_device->completion_pool,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700553 ioat_chan->completion_virt,
554 ioat_chan->completion_addr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700555
556 /* one is ok since we left it on there on purpose */
557 if (in_use_descs > 1)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700558 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700559 "Freeing %d in use descriptors!\n",
Chris Leech0bbd5f42006-05-23 17:35:34 -0700560 in_use_descs - 1);
561
562 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700563 ioat_chan->pending = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800564 ioat_chan->dmacount = 0;
Shannon Nelson3e037452007-10-16 01:27:40 -0700565}
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700566
Shannon Nelson3e037452007-10-16 01:27:40 -0700567/**
568 * ioat_dma_get_next_descriptor - return the next available descriptor
569 * @ioat_chan: IOAT DMA channel handle
570 *
571 * Gets the next descriptor from the chain, and must be called with the
572 * channel's desc_lock held. Allocates more descriptors if the channel
573 * has run out.
574 */
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700575static struct ioat_desc_sw *
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800576ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
Shannon Nelson3e037452007-10-16 01:27:40 -0700577{
578 struct ioat_desc_sw *new = NULL;
579
580 if (!list_empty(&ioat_chan->free_desc)) {
581 new = to_ioat_desc(ioat_chan->free_desc.next);
582 list_del(&new->node);
583 } else {
584 /* try to get another desc */
585 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
586 /* will this ever happen? */
587 /* TODO add upper limit on these */
588 BUG_ON(!new);
589 }
590
591 prefetch(new->hw);
592 return new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700593}
594
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800595static struct ioat_desc_sw *
596ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
597{
598 struct ioat_desc_sw *new = NULL;
599
600 /*
601 * used.prev points to where to start processing
602 * used.next points to next free descriptor
603 * if used.prev == NULL, there are none waiting to be processed
604 * if used.next == used.prev.prev, there is only one free descriptor,
605 * and we need to use it to as a noop descriptor before
606 * linking in a new set of descriptors, since the device
607 * has probably already read the pointer to it
608 */
609 if (ioat_chan->used_desc.prev &&
610 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
611
612 struct ioat_desc_sw *desc = NULL;
613 struct ioat_desc_sw *noop_desc = NULL;
614 int i;
615
616 /* set up the noop descriptor */
617 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
618 noop_desc->hw->size = 0;
619 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
620 noop_desc->hw->src_addr = 0;
621 noop_desc->hw->dst_addr = 0;
622
623 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
624 ioat_chan->pending++;
625 ioat_chan->dmacount++;
626
627 /* get a few more descriptors */
628 for (i = 16; i; i--) {
629 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
630 BUG_ON(!desc);
631 list_add_tail(&desc->node, ioat_chan->used_desc.next);
632
633 desc->hw->next
634 = to_ioat_desc(desc->node.next)->async_tx.phys;
635 to_ioat_desc(desc->node.prev)->hw->next
636 = desc->async_tx.phys;
637 ioat_chan->desccount++;
638 }
639
640 ioat_chan->used_desc.next = noop_desc->node.next;
641 }
642 new = to_ioat_desc(ioat_chan->used_desc.next);
643 prefetch(new);
644 ioat_chan->used_desc.next = new->node.next;
645
646 if (ioat_chan->used_desc.prev == NULL)
647 ioat_chan->used_desc.prev = &new->node;
648
649 prefetch(new->hw);
650 return new;
651}
652
653static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
654 struct ioat_dma_chan *ioat_chan)
655{
656 if (!ioat_chan)
657 return NULL;
658
659 switch (ioat_chan->device->version) {
660 case IOAT_VER_1_2:
661 return ioat1_dma_get_next_descriptor(ioat_chan);
662 break;
663 case IOAT_VER_2_0:
664 return ioat2_dma_get_next_descriptor(ioat_chan);
665 break;
666 }
667 return NULL;
668}
669
670static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
Shannon Nelson43d6e362007-10-16 01:27:39 -0700671 struct dma_chan *chan,
672 size_t len,
673 int int_en)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700674{
Dan Williams7405f742007-01-02 11:10:43 -0700675 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700676 struct ioat_desc_sw *new;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700677
678 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700679 new = ioat_dma_get_next_descriptor(ioat_chan);
680 new->len = len;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700681 spin_unlock_bh(&ioat_chan->desc_lock);
682
Dan Williams7405f742007-01-02 11:10:43 -0700683 return new ? &new->async_tx : NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700684}
685
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800686static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
687 struct dma_chan *chan,
688 size_t len,
689 int int_en)
690{
691 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
692 struct ioat_desc_sw *new;
693
694 spin_lock_bh(&ioat_chan->desc_lock);
695 new = ioat2_dma_get_next_descriptor(ioat_chan);
696 new->len = len;
697
698 /* leave ioat_chan->desc_lock set in version 2 path */
699 return new ? &new->async_tx : NULL;
700}
701
702
Chris Leech0bbd5f42006-05-23 17:35:34 -0700703/**
Shannon Nelson43d6e362007-10-16 01:27:39 -0700704 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
705 * descriptors to hw
Chris Leech0bbd5f42006-05-23 17:35:34 -0700706 * @chan: DMA channel handle
707 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800708static inline void __ioat1_dma_memcpy_issue_pending(
709 struct ioat_dma_chan *ioat_chan)
710{
711 ioat_chan->pending = 0;
712 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
713}
714
715static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700716{
717 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
718
719 if (ioat_chan->pending != 0) {
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800720 spin_lock_bh(&ioat_chan->desc_lock);
721 __ioat1_dma_memcpy_issue_pending(ioat_chan);
722 spin_unlock_bh(&ioat_chan->desc_lock);
723 }
724}
725
726static inline void __ioat2_dma_memcpy_issue_pending(
727 struct ioat_dma_chan *ioat_chan)
728{
729 ioat_chan->pending = 0;
730 writew(ioat_chan->dmacount,
731 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
732}
733
734static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
735{
736 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
737
738 if (ioat_chan->pending != 0) {
739 spin_lock_bh(&ioat_chan->desc_lock);
740 __ioat2_dma_memcpy_issue_pending(ioat_chan);
741 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700742 }
743}
744
Shannon Nelson3e037452007-10-16 01:27:40 -0700745static void ioat_dma_cleanup_tasklet(unsigned long data)
746{
747 struct ioat_dma_chan *chan = (void *)data;
748 ioat_dma_memcpy_cleanup(chan);
749 writew(IOAT_CHANCTRL_INT_DISABLE,
750 chan->reg_base + IOAT_CHANCTRL_OFFSET);
751}
752
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800753/**
754 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
755 * @chan: ioat channel to be cleaned up
756 */
Shannon Nelson43d6e362007-10-16 01:27:39 -0700757static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700758{
759 unsigned long phys_complete;
760 struct ioat_desc_sw *desc, *_desc;
761 dma_cookie_t cookie = 0;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800762 unsigned long desc_phys;
763 struct ioat_desc_sw *latest_desc;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700764
Shannon Nelson43d6e362007-10-16 01:27:39 -0700765 prefetch(ioat_chan->completion_virt);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700766
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700767 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
Chris Leech0bbd5f42006-05-23 17:35:34 -0700768 return;
769
770 /* The completion writeback can happen at any time,
771 so reads by the driver need to be atomic operations
772 The descriptor physical addresses are limited to 32-bits
773 when the CPU can only do a 32-bit mov */
774
775#if (BITS_PER_LONG == 64)
776 phys_complete =
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700777 ioat_chan->completion_virt->full
778 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700779#else
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700780 phys_complete =
781 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700782#endif
783
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700784 if ((ioat_chan->completion_virt->full
785 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
Shannon Nelson43d6e362007-10-16 01:27:39 -0700786 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
787 dev_err(&ioat_chan->device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -0700788 "Channel halted, chanerr = %x\n",
Shannon Nelson43d6e362007-10-16 01:27:39 -0700789 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
Chris Leech0bbd5f42006-05-23 17:35:34 -0700790
791 /* TODO do something to salvage the situation */
792 }
793
Shannon Nelson43d6e362007-10-16 01:27:39 -0700794 if (phys_complete == ioat_chan->last_completion) {
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700795 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700796 return;
797 }
798
Shannon Nelson3e037452007-10-16 01:27:40 -0700799 cookie = 0;
Shannon Nelson43d6e362007-10-16 01:27:39 -0700800 spin_lock_bh(&ioat_chan->desc_lock);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800801 switch (ioat_chan->device->version) {
802 case IOAT_VER_1_2:
803 list_for_each_entry_safe(desc, _desc,
804 &ioat_chan->used_desc, node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -0700805
Shannon Nelson43d6e362007-10-16 01:27:39 -0700806 /*
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800807 * Incoming DMA requests may use multiple descriptors,
808 * due to exceeding xfercap, perhaps. If so, only the
809 * last one will have a cookie, and require unmapping.
Shannon Nelson43d6e362007-10-16 01:27:39 -0700810 */
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800811 if (desc->async_tx.cookie) {
812 cookie = desc->async_tx.cookie;
813
814 /*
815 * yes we are unmapping both _page and _single
816 * alloc'd regions with unmap_page. Is this
817 * *really* that bad?
818 */
819 pci_unmap_page(ioat_chan->device->pdev,
820 pci_unmap_addr(desc, dst),
821 pci_unmap_len(desc, len),
822 PCI_DMA_FROMDEVICE);
823 pci_unmap_page(ioat_chan->device->pdev,
824 pci_unmap_addr(desc, src),
825 pci_unmap_len(desc, len),
826 PCI_DMA_TODEVICE);
827
828 if (desc->async_tx.callback) {
829 desc->async_tx.callback(desc->async_tx.callback_param);
830 desc->async_tx.callback = NULL;
831 }
832 }
833
834 if (desc->async_tx.phys != phys_complete) {
835 /*
836 * a completed entry, but not the last, so clean
837 * up if the client is done with the descriptor
838 */
839 if (desc->async_tx.ack) {
840 list_del(&desc->node);
841 list_add_tail(&desc->node,
842 &ioat_chan->free_desc);
843 } else
844 desc->async_tx.cookie = 0;
845 } else {
846 /*
847 * last used desc. Do not remove, so we can
848 * append from it, but don't look at it next
849 * time, either
850 */
851 desc->async_tx.cookie = 0;
852
853 /* TODO check status bits? */
854 break;
Shannon Nelson95218432007-10-18 03:07:15 -0700855 }
Chris Leech0bbd5f42006-05-23 17:35:34 -0700856 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800857 break;
858 case IOAT_VER_2_0:
859 /* has some other thread has already cleaned up? */
860 if (ioat_chan->used_desc.prev == NULL)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700861 break;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800862
863 /* work backwards to find latest finished desc */
864 desc = to_ioat_desc(ioat_chan->used_desc.next);
865 latest_desc = NULL;
866 do {
867 desc = to_ioat_desc(desc->node.prev);
868 desc_phys = (unsigned long)desc->async_tx.phys
869 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
870 if (desc_phys == phys_complete) {
871 latest_desc = desc;
872 break;
873 }
874 } while (&desc->node != ioat_chan->used_desc.prev);
875
876 if (latest_desc != NULL) {
877
878 /* work forwards to clear finished descriptors */
879 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
880 &desc->node != latest_desc->node.next &&
881 &desc->node != ioat_chan->used_desc.next;
882 desc = to_ioat_desc(desc->node.next)) {
883 if (desc->async_tx.cookie) {
884 cookie = desc->async_tx.cookie;
885 desc->async_tx.cookie = 0;
886
887 pci_unmap_page(ioat_chan->device->pdev,
888 pci_unmap_addr(desc, dst),
889 pci_unmap_len(desc, len),
890 PCI_DMA_FROMDEVICE);
891 pci_unmap_page(ioat_chan->device->pdev,
892 pci_unmap_addr(desc, src),
893 pci_unmap_len(desc, len),
894 PCI_DMA_TODEVICE);
895
896 if (desc->async_tx.callback) {
897 desc->async_tx.callback(desc->async_tx.callback_param);
898 desc->async_tx.callback = NULL;
899 }
900 }
901 }
902
903 /* move used.prev up beyond those that are finished */
904 if (&desc->node == ioat_chan->used_desc.next)
905 ioat_chan->used_desc.prev = NULL;
906 else
907 ioat_chan->used_desc.prev = &desc->node;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700908 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800909 break;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700910 }
911
Shannon Nelson43d6e362007-10-16 01:27:39 -0700912 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700913
Shannon Nelson43d6e362007-10-16 01:27:39 -0700914 ioat_chan->last_completion = phys_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700915 if (cookie != 0)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700916 ioat_chan->completed_cookie = cookie;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700917
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700918 spin_unlock_bh(&ioat_chan->cleanup_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700919}
920
Dan Williams7405f742007-01-02 11:10:43 -0700921static void ioat_dma_dependency_added(struct dma_chan *chan)
922{
923 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
924 spin_lock_bh(&ioat_chan->desc_lock);
925 if (ioat_chan->pending == 0) {
926 spin_unlock_bh(&ioat_chan->desc_lock);
927 ioat_dma_memcpy_cleanup(ioat_chan);
928 } else
929 spin_unlock_bh(&ioat_chan->desc_lock);
930}
931
Chris Leech0bbd5f42006-05-23 17:35:34 -0700932/**
933 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
934 * @chan: IOAT DMA channel handle
935 * @cookie: DMA transaction identifier
Randy Dunlap65088712006-07-03 19:45:31 -0700936 * @done: if not %NULL, updated with last completed transaction
937 * @used: if not %NULL, updated with last used transaction
Chris Leech0bbd5f42006-05-23 17:35:34 -0700938 */
Chris Leech0bbd5f42006-05-23 17:35:34 -0700939static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700940 dma_cookie_t cookie,
941 dma_cookie_t *done,
942 dma_cookie_t *used)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700943{
944 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
945 dma_cookie_t last_used;
946 dma_cookie_t last_complete;
947 enum dma_status ret;
948
949 last_used = chan->cookie;
950 last_complete = ioat_chan->completed_cookie;
951
952 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700953 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700954 if (used)
955 *used = last_used;
956
957 ret = dma_async_is_complete(cookie, last_complete, last_used);
958 if (ret == DMA_SUCCESS)
959 return ret;
960
961 ioat_dma_memcpy_cleanup(ioat_chan);
962
963 last_used = chan->cookie;
964 last_complete = ioat_chan->completed_cookie;
965
966 if (done)
Shannon Nelson43d6e362007-10-16 01:27:39 -0700967 *done = last_complete;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700968 if (used)
969 *used = last_used;
970
971 return dma_async_is_complete(cookie, last_complete, last_used);
972}
973
Shannon Nelson43d6e362007-10-16 01:27:39 -0700974static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700975{
976 struct ioat_desc_sw *desc;
977
978 spin_lock_bh(&ioat_chan->desc_lock);
979
Shannon Nelson3e037452007-10-16 01:27:40 -0700980 desc = ioat_dma_get_next_descriptor(ioat_chan);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700981 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
982 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
983 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700984 desc->hw->size = 0;
985 desc->hw->src_addr = 0;
986 desc->hw->dst_addr = 0;
Dan Williams7405f742007-01-02 11:10:43 -0700987 desc->async_tx.ack = 1;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800988 switch (ioat_chan->device->version) {
989 case IOAT_VER_1_2:
990 desc->hw->next = 0;
991 list_add_tail(&desc->node, &ioat_chan->used_desc);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700992
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800993 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
994 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
995 writel(((u64) desc->async_tx.phys) >> 32,
996 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
997
998 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
999 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1000 break;
1001 case IOAT_VER_2_0:
1002 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1003 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1004 writel(((u64) desc->async_tx.phys) >> 32,
1005 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1006
1007 ioat_chan->dmacount++;
1008 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1009 break;
1010 }
Chris Leech0bbd5f42006-05-23 17:35:34 -07001011 spin_unlock_bh(&ioat_chan->desc_lock);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001012}
1013
1014/*
1015 * Perform a IOAT transaction to verify the HW works.
1016 */
1017#define IOAT_TEST_SIZE 2000
1018
Shannon Nelson95218432007-10-18 03:07:15 -07001019static void ioat_dma_test_callback(void *dma_async_param)
1020{
1021 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
1022 dma_async_param);
1023}
1024
Shannon Nelson3e037452007-10-16 01:27:40 -07001025/**
1026 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1027 * @device: device to be tested
1028 */
1029static int ioat_dma_self_test(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001030{
1031 int i;
1032 u8 *src;
1033 u8 *dest;
1034 struct dma_chan *dma_chan;
Shannon Nelson5149fd02007-10-18 03:07:13 -07001035 struct dma_async_tx_descriptor *tx = NULL;
Dan Williams7405f742007-01-02 11:10:43 -07001036 dma_addr_t addr;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001037 dma_cookie_t cookie;
1038 int err = 0;
1039
Christoph Lametere94b1762006-12-06 20:33:17 -08001040 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001041 if (!src)
1042 return -ENOMEM;
Christoph Lametere94b1762006-12-06 20:33:17 -08001043 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001044 if (!dest) {
1045 kfree(src);
1046 return -ENOMEM;
1047 }
1048
1049 /* Fill in src buffer */
1050 for (i = 0; i < IOAT_TEST_SIZE; i++)
1051 src[i] = (u8)i;
1052
1053 /* Start copy, using first DMA channel */
1054 dma_chan = container_of(device->common.channels.next,
Shannon Nelson43d6e362007-10-16 01:27:39 -07001055 struct dma_chan,
1056 device_node);
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001057 if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
Shannon Nelson43d6e362007-10-16 01:27:39 -07001058 dev_err(&device->pdev->dev,
1059 "selftest cannot allocate chan resource\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001060 err = -ENODEV;
1061 goto out;
1062 }
1063
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001064 tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
Shannon Nelson5149fd02007-10-18 03:07:13 -07001065 if (!tx) {
1066 dev_err(&device->pdev->dev,
1067 "Self-test prep failed, disabling\n");
1068 err = -ENODEV;
1069 goto free_resources;
1070 }
1071
Dan Williams7405f742007-01-02 11:10:43 -07001072 async_tx_ack(tx);
1073 addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001074 DMA_TO_DEVICE);
1075 tx->tx_set_src(addr, tx, 0);
Dan Williams7405f742007-01-02 11:10:43 -07001076 addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001077 DMA_FROM_DEVICE);
1078 tx->tx_set_dest(addr, tx, 0);
Shannon Nelson95218432007-10-18 03:07:15 -07001079 tx->callback = ioat_dma_test_callback;
1080 tx->callback_param = (void *)0x8086;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001081 cookie = tx->tx_submit(tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -07001082 if (cookie < 0) {
1083 dev_err(&device->pdev->dev,
1084 "Self-test setup failed, disabling\n");
1085 err = -ENODEV;
1086 goto free_resources;
1087 }
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001088 device->common.device_issue_pending(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001089 msleep(1);
1090
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001091 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1092 != DMA_SUCCESS) {
Shannon Nelson43d6e362007-10-16 01:27:39 -07001093 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001094 "Self-test copy timed out, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001095 err = -ENODEV;
1096 goto free_resources;
1097 }
1098 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
Shannon Nelson43d6e362007-10-16 01:27:39 -07001099 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001100 "Self-test copy failed compare, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -07001101 err = -ENODEV;
1102 goto free_resources;
1103 }
1104
1105free_resources:
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001106 device->common.device_free_chan_resources(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001107out:
1108 kfree(src);
1109 kfree(dest);
1110 return err;
1111}
1112
Shannon Nelson3e037452007-10-16 01:27:40 -07001113static char ioat_interrupt_style[32] = "msix";
1114module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1115 sizeof(ioat_interrupt_style), 0644);
1116MODULE_PARM_DESC(ioat_interrupt_style,
1117 "set ioat interrupt style: msix (default), "
1118 "msix-single-vector, msi, intx)");
1119
1120/**
1121 * ioat_dma_setup_interrupts - setup interrupt handler
1122 * @device: ioat device
1123 */
1124static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1125{
1126 struct ioat_dma_chan *ioat_chan;
1127 int err, i, j, msixcnt;
1128 u8 intrctrl = 0;
1129
1130 if (!strcmp(ioat_interrupt_style, "msix"))
1131 goto msix;
1132 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1133 goto msix_single_vector;
1134 if (!strcmp(ioat_interrupt_style, "msi"))
1135 goto msi;
1136 if (!strcmp(ioat_interrupt_style, "intx"))
1137 goto intx;
Shannon Nelson5149fd02007-10-18 03:07:13 -07001138 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
1139 ioat_interrupt_style);
1140 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -07001141
1142msix:
1143 /* The number of MSI-X vectors should equal the number of channels */
1144 msixcnt = device->common.chancnt;
1145 for (i = 0; i < msixcnt; i++)
1146 device->msix_entries[i].entry = i;
1147
1148 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
1149 if (err < 0)
1150 goto msi;
1151 if (err > 0)
1152 goto msix_single_vector;
1153
1154 for (i = 0; i < msixcnt; i++) {
1155 ioat_chan = ioat_lookup_chan_by_index(device, i);
1156 err = request_irq(device->msix_entries[i].vector,
1157 ioat_dma_do_interrupt_msix,
1158 0, "ioat-msix", ioat_chan);
1159 if (err) {
1160 for (j = 0; j < i; j++) {
1161 ioat_chan =
1162 ioat_lookup_chan_by_index(device, j);
1163 free_irq(device->msix_entries[j].vector,
1164 ioat_chan);
1165 }
1166 goto msix_single_vector;
1167 }
1168 }
1169 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1170 device->irq_mode = msix_multi_vector;
1171 goto done;
1172
1173msix_single_vector:
1174 device->msix_entries[0].entry = 0;
1175 err = pci_enable_msix(device->pdev, device->msix_entries, 1);
1176 if (err)
1177 goto msi;
1178
1179 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
1180 0, "ioat-msix", device);
1181 if (err) {
1182 pci_disable_msix(device->pdev);
1183 goto msi;
1184 }
1185 device->irq_mode = msix_single_vector;
1186 goto done;
1187
1188msi:
1189 err = pci_enable_msi(device->pdev);
1190 if (err)
1191 goto intx;
1192
1193 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1194 0, "ioat-msi", device);
1195 if (err) {
1196 pci_disable_msi(device->pdev);
1197 goto intx;
1198 }
1199 /*
1200 * CB 1.2 devices need a bit set in configuration space to enable MSI
1201 */
1202 if (device->version == IOAT_VER_1_2) {
1203 u32 dmactrl;
1204 pci_read_config_dword(device->pdev,
1205 IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1206 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1207 pci_write_config_dword(device->pdev,
1208 IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1209 }
1210 device->irq_mode = msi;
1211 goto done;
1212
1213intx:
1214 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1215 IRQF_SHARED, "ioat-intx", device);
1216 if (err)
1217 goto err_no_irq;
1218 device->irq_mode = intx;
1219
1220done:
1221 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1222 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1223 return 0;
1224
1225err_no_irq:
1226 /* Disable all interrupt generation */
1227 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1228 dev_err(&device->pdev->dev, "no usable interrupts\n");
1229 device->irq_mode = none;
1230 return -1;
1231}
1232
1233/**
1234 * ioat_dma_remove_interrupts - remove whatever interrupts were set
1235 * @device: ioat device
1236 */
1237static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1238{
1239 struct ioat_dma_chan *ioat_chan;
1240 int i;
1241
1242 /* Disable all interrupt generation */
1243 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1244
1245 switch (device->irq_mode) {
1246 case msix_multi_vector:
1247 for (i = 0; i < device->common.chancnt; i++) {
1248 ioat_chan = ioat_lookup_chan_by_index(device, i);
1249 free_irq(device->msix_entries[i].vector, ioat_chan);
1250 }
1251 pci_disable_msix(device->pdev);
1252 break;
1253 case msix_single_vector:
1254 free_irq(device->msix_entries[0].vector, device);
1255 pci_disable_msix(device->pdev);
1256 break;
1257 case msi:
1258 free_irq(device->pdev->irq, device);
1259 pci_disable_msi(device->pdev);
1260 break;
1261 case intx:
1262 free_irq(device->pdev->irq, device);
1263 break;
1264 case none:
1265 dev_warn(&device->pdev->dev,
1266 "call to %s without interrupts setup\n", __func__);
1267 }
1268 device->irq_mode = none;
1269}
1270
Shannon Nelson8ab89562007-10-16 01:27:39 -07001271struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1272 void __iomem *iobase)
Chris Leech0bbd5f42006-05-23 17:35:34 -07001273{
1274 int err;
Shannon Nelson8ab89562007-10-16 01:27:39 -07001275 struct ioatdma_device *device;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001276
1277 device = kzalloc(sizeof(*device), GFP_KERNEL);
1278 if (!device) {
1279 err = -ENOMEM;
1280 goto err_kzalloc;
1281 }
Shannon Nelson8ab89562007-10-16 01:27:39 -07001282 device->pdev = pdev;
1283 device->reg_base = iobase;
1284 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001285
1286 /* DMA coherent memory pool for DMA descriptor allocations */
1287 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
Shannon Nelson8ab89562007-10-16 01:27:39 -07001288 sizeof(struct ioat_dma_descriptor),
1289 64, 0);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001290 if (!device->dma_pool) {
1291 err = -ENOMEM;
1292 goto err_dma_pool;
1293 }
1294
Shannon Nelson43d6e362007-10-16 01:27:39 -07001295 device->completion_pool = pci_pool_create("completion_pool", pdev,
1296 sizeof(u64), SMP_CACHE_BYTES,
1297 SMP_CACHE_BYTES);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001298 if (!device->completion_pool) {
1299 err = -ENOMEM;
1300 goto err_completion_pool;
1301 }
1302
Chris Leech0bbd5f42006-05-23 17:35:34 -07001303 INIT_LIST_HEAD(&device->common.channels);
Shannon Nelson43d6e362007-10-16 01:27:39 -07001304 ioat_dma_enumerate_channels(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001305
Shannon Nelson43d6e362007-10-16 01:27:39 -07001306 device->common.device_alloc_chan_resources =
1307 ioat_dma_alloc_chan_resources;
1308 device->common.device_free_chan_resources =
1309 ioat_dma_free_chan_resources;
Dan Williams7405f742007-01-02 11:10:43 -07001310 device->common.dev = &pdev->dev;
Shannon Nelson7bb67c12007-11-14 16:59:51 -08001311
1312 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1313 device->common.device_is_tx_complete = ioat_dma_is_complete;
1314 device->common.device_dependency_added = ioat_dma_dependency_added;
1315 switch (device->version) {
1316 case IOAT_VER_1_2:
1317 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1318 device->common.device_issue_pending =
1319 ioat1_dma_memcpy_issue_pending;
1320 break;
1321 case IOAT_VER_2_0:
1322 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1323 device->common.device_issue_pending =
1324 ioat2_dma_memcpy_issue_pending;
1325 break;
1326 }
1327
Shannon Nelson3e037452007-10-16 01:27:40 -07001328 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001329 "Intel(R) I/OAT DMA Engine found,"
1330 " %d channels, device version 0x%02x, driver version %s\n",
1331 device->common.chancnt, device->version, IOAT_DMA_VERSION);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001332
Shannon Nelson3e037452007-10-16 01:27:40 -07001333 err = ioat_dma_setup_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001334 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -07001335 goto err_setup_interrupts;
Shannon Nelson8ab89562007-10-16 01:27:39 -07001336
Shannon Nelson3e037452007-10-16 01:27:40 -07001337 err = ioat_dma_self_test(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -07001338 if (err)
1339 goto err_self_test;
1340
1341 dma_async_device_register(&device->common);
1342
Shannon Nelson8ab89562007-10-16 01:27:39 -07001343 return device;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001344
1345err_self_test:
Shannon Nelson3e037452007-10-16 01:27:40 -07001346 ioat_dma_remove_interrupts(device);
1347err_setup_interrupts:
Chris Leech0bbd5f42006-05-23 17:35:34 -07001348 pci_pool_destroy(device->completion_pool);
1349err_completion_pool:
1350 pci_pool_destroy(device->dma_pool);
1351err_dma_pool:
1352 kfree(device);
1353err_kzalloc:
Shannon Nelson3e037452007-10-16 01:27:40 -07001354 dev_err(&device->pdev->dev,
Shannon Nelson5149fd02007-10-18 03:07:13 -07001355 "Intel(R) I/OAT DMA Engine initialization failed\n");
Shannon Nelson8ab89562007-10-16 01:27:39 -07001356 return NULL;
Chris Leech0bbd5f42006-05-23 17:35:34 -07001357}
1358
Shannon Nelson8ab89562007-10-16 01:27:39 -07001359void ioat_dma_remove(struct ioatdma_device *device)
Dan Aloni428ed602007-03-08 09:57:36 -08001360{
Chris Leech0bbd5f42006-05-23 17:35:34 -07001361 struct dma_chan *chan, *_chan;
1362 struct ioat_dma_chan *ioat_chan;
1363
Shannon Nelson3e037452007-10-16 01:27:40 -07001364 ioat_dma_remove_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001365
Shannon Nelsondfe22992007-10-18 03:07:13 -07001366 dma_async_device_unregister(&device->common);
1367
Chris Leech0bbd5f42006-05-23 17:35:34 -07001368 pci_pool_destroy(device->dma_pool);
1369 pci_pool_destroy(device->completion_pool);
Shannon Nelson8ab89562007-10-16 01:27:39 -07001370
Shannon Nelson7df7cf02007-10-18 03:07:12 -07001371 iounmap(device->reg_base);
1372 pci_release_regions(device->pdev);
1373 pci_disable_device(device->pdev);
1374
Shannon Nelson43d6e362007-10-16 01:27:39 -07001375 list_for_each_entry_safe(chan, _chan,
1376 &device->common.channels, device_node) {
Chris Leech0bbd5f42006-05-23 17:35:34 -07001377 ioat_chan = to_ioat_chan(chan);
1378 list_del(&chan->device_node);
1379 kfree(ioat_chan);
1380 }
1381 kfree(device);
1382}
1383