blob: 60aa04d95a0b8556bd745cd086c4717c961b90c0 [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
Dave Jiang85596a12015-08-11 08:48:10 -07003 * Copyright(c) 2004 - 2015 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070014 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 */
18
19/*
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
21 * copy operations.
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -070027#include <linux/pci.h>
28#include <linux/interrupt.h>
29#include <linux/dmaengine.h>
30#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070031#include <linux/dma-mapping.h>
Maciej Sosnowski09177e82008-07-22 10:07:33 -070032#include <linux/workqueue.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040033#include <linux/prefetch.h>
Dan Williams584ec222009-07-28 14:32:12 -070034#include "dma.h"
35#include "registers.h"
36#include "hw.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070037
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000038#include "../dmaengine.h"
39
Dan Williams5cbafa62009-08-26 13:01:44 -070040int ioat_pending_level = 4;
Shannon Nelson7bb67c12007-11-14 16:59:51 -080041module_param(ioat_pending_level, int, 0644);
42MODULE_PARM_DESC(ioat_pending_level,
43 "high-water mark for pushing ioat descriptors (default: 4)");
44
Shannon Nelson3e037452007-10-16 01:27:40 -070045/**
46 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
47 * @irq: interrupt id
48 * @data: interrupt data
49 */
50static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
51{
52 struct ioatdma_device *instance = data;
Dave Jiang5a976882015-08-11 08:48:21 -070053 struct ioatdma_chan *ioat_chan;
Shannon Nelson3e037452007-10-16 01:27:40 -070054 unsigned long attnstatus;
55 int bit;
56 u8 intrctrl;
57
58 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
59
60 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
61 return IRQ_NONE;
62
63 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
64 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
65 return IRQ_NONE;
66 }
67
68 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
Akinobu Mita984b3f52010-03-05 13:41:37 -080069 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
Dave Jiang5a976882015-08-11 08:48:21 -070070 ioat_chan = ioat_chan_by_index(instance, bit);
71 if (test_bit(IOAT_RUN, &ioat_chan->state))
72 tasklet_schedule(&ioat_chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -070073 }
74
75 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
76 return IRQ_HANDLED;
77}
78
79/**
80 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
81 * @irq: interrupt id
82 * @data: interrupt data
83 */
84static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
85{
Dave Jiang5a976882015-08-11 08:48:21 -070086 struct ioatdma_chan *ioat_chan = data;
Shannon Nelson3e037452007-10-16 01:27:40 -070087
Dave Jiang5a976882015-08-11 08:48:21 -070088 if (test_bit(IOAT_RUN, &ioat_chan->state))
89 tasklet_schedule(&ioat_chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -070090
91 return IRQ_HANDLED;
92}
93
Dan Williams5cbafa62009-08-26 13:01:44 -070094/* common channel initialization */
Dave Jiang5a976882015-08-11 08:48:21 -070095void
96ioat_init_channel(struct ioatdma_device *device, struct ioatdma_chan *ioat_chan,
97 int idx)
Dan Williams5cbafa62009-08-26 13:01:44 -070098{
99 struct dma_device *dma = &device->common;
Dave Jiang5a976882015-08-11 08:48:21 -0700100 struct dma_chan *c = &ioat_chan->dma_chan;
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700101 unsigned long data = (unsigned long) c;
Dan Williams5cbafa62009-08-26 13:01:44 -0700102
Dave Jiang5a976882015-08-11 08:48:21 -0700103 ioat_chan->device = device;
104 ioat_chan->reg_base = device->reg_base + (0x80 * (idx + 1));
105 spin_lock_init(&ioat_chan->cleanup_lock);
106 ioat_chan->dma_chan.device = dma;
107 dma_cookie_init(&ioat_chan->dma_chan);
108 list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
109 device->idx[idx] = ioat_chan;
110 init_timer(&ioat_chan->timer);
111 ioat_chan->timer.function = device->timer_fn;
112 ioat_chan->timer.data = data;
113 tasklet_init(&ioat_chan->cleanup_task, device->cleanup_fn, data);
Dan Williams5cbafa62009-08-26 13:01:44 -0700114}
115
Dave Jiang5a976882015-08-11 08:48:21 -0700116void ioat_stop(struct ioatdma_chan *ioat_chan)
Dan Williamsda87ca42014-02-19 16:19:35 -0800117{
Dave Jiang5a976882015-08-11 08:48:21 -0700118 struct ioatdma_device *device = ioat_chan->device;
Dan Williamsda87ca42014-02-19 16:19:35 -0800119 struct pci_dev *pdev = device->pdev;
Dave Jiang5a976882015-08-11 08:48:21 -0700120 int chan_id = chan_num(ioat_chan);
Dan Williamsda87ca42014-02-19 16:19:35 -0800121 struct msix_entry *msix;
122
123 /* 1/ stop irq from firing tasklets
124 * 2/ stop the tasklet from re-arming irqs
125 */
Dave Jiang5a976882015-08-11 08:48:21 -0700126 clear_bit(IOAT_RUN, &ioat_chan->state);
Dan Williamsda87ca42014-02-19 16:19:35 -0800127
128 /* flush inflight interrupts */
129 switch (device->irq_mode) {
130 case IOAT_MSIX:
131 msix = &device->msix_entries[chan_id];
132 synchronize_irq(msix->vector);
133 break;
134 case IOAT_MSI:
135 case IOAT_INTX:
136 synchronize_irq(pdev->irq);
137 break;
138 default:
139 break;
140 }
141
142 /* flush inflight timers */
Dave Jiang5a976882015-08-11 08:48:21 -0700143 del_timer_sync(&ioat_chan->timer);
Dan Williamsda87ca42014-02-19 16:19:35 -0800144
145 /* flush inflight tasklet runs */
Dave Jiang5a976882015-08-11 08:48:21 -0700146 tasklet_kill(&ioat_chan->cleanup_task);
Dan Williamsda87ca42014-02-19 16:19:35 -0800147
148 /* final cleanup now that everything is quiesced and can't re-arm */
Dave Jiang5a976882015-08-11 08:48:21 -0700149 device->cleanup_fn((unsigned long)&ioat_chan->dma_chan);
Dan Williamsda87ca42014-02-19 16:19:35 -0800150}
151
Dave Jiang5a976882015-08-11 08:48:21 -0700152dma_addr_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
Dan Williams5cbafa62009-08-26 13:01:44 -0700153{
Dan Williams27502932012-03-23 13:36:42 -0700154 dma_addr_t phys_complete;
Dan Williams4fb9b9e2009-09-08 12:01:04 -0700155 u64 completion;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700156
Dave Jiang5a976882015-08-11 08:48:21 -0700157 completion = *ioat_chan->completion;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700158 phys_complete = ioat_chansts_to_addr(completion);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700159
Dave Jiang5a976882015-08-11 08:48:21 -0700160 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
Dan Williams6df91832009-09-08 12:00:55 -0700161 (unsigned long long) phys_complete);
162
Dan Williams09c8a5b2009-09-08 12:01:49 -0700163 if (is_ioat_halted(completion)) {
Dave Jiang5a976882015-08-11 08:48:21 -0700164 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
165
166 dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n",
Dan Williams09c8a5b2009-09-08 12:01:49 -0700167 chanerr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700168
169 /* TODO do something to salvage the situation */
170 }
171
Dan Williams5cbafa62009-08-26 13:01:44 -0700172 return phys_complete;
173}
174
Dave Jiang5a976882015-08-11 08:48:21 -0700175bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
Dan Williams27502932012-03-23 13:36:42 -0700176 dma_addr_t *phys_complete)
Dan Williams09c8a5b2009-09-08 12:01:49 -0700177{
Dave Jiang5a976882015-08-11 08:48:21 -0700178 *phys_complete = ioat_get_current_completion(ioat_chan);
179 if (*phys_complete == ioat_chan->last_completion)
Dan Williams09c8a5b2009-09-08 12:01:49 -0700180 return false;
Dave Jiang5a976882015-08-11 08:48:21 -0700181 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
182 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
Dan Williams09c8a5b2009-09-08 12:01:49 -0700183
184 return true;
185}
186
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700187enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -0700188ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
189 struct dma_tx_state *txstate)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700190{
Dave Jiang5a976882015-08-11 08:48:21 -0700191 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
192 struct ioatdma_device *device = ioat_chan->device;
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000193 enum dma_status ret;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700194
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000195 ret = dma_cookie_status(c, cookie, txstate);
Vinod Koul2f16f802013-10-16 20:48:52 +0530196 if (ret == DMA_COMPLETE)
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000197 return ret;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700198
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700199 device->cleanup_fn((unsigned long) c);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700200
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000201 return dma_cookie_status(c, cookie, txstate);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700202}
203
Chris Leech0bbd5f42006-05-23 17:35:34 -0700204/*
205 * Perform a IOAT transaction to verify the HW works.
206 */
207#define IOAT_TEST_SIZE 2000
208
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800209static void ioat_dma_test_callback(void *dma_async_param)
Shannon Nelson95218432007-10-18 03:07:15 -0700210{
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700211 struct completion *cmp = dma_async_param;
212
213 complete(cmp);
Shannon Nelson95218432007-10-18 03:07:15 -0700214}
215
Shannon Nelson3e037452007-10-16 01:27:40 -0700216/**
217 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
218 * @device: device to be tested
219 */
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800220int ioat_dma_self_test(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700221{
222 int i;
223 u8 *src;
224 u8 *dest;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700225 struct dma_device *dma = &device->common;
226 struct device *dev = &device->pdev->dev;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700227 struct dma_chan *dma_chan;
Shannon Nelson711924b2007-12-17 16:20:08 -0800228 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700229 dma_addr_t dma_dest, dma_src;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700230 dma_cookie_t cookie;
231 int err = 0;
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700232 struct completion cmp;
Dan Williams0c33e1c2009-03-02 13:31:35 -0700233 unsigned long tmo;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200234 unsigned long flags;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700235
Christoph Lametere94b1762006-12-06 20:33:17 -0800236 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700237 if (!src)
238 return -ENOMEM;
Christoph Lametere94b1762006-12-06 20:33:17 -0800239 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700240 if (!dest) {
241 kfree(src);
242 return -ENOMEM;
243 }
244
245 /* Fill in src buffer */
246 for (i = 0; i < IOAT_TEST_SIZE; i++)
247 src[i] = (u8)i;
248
249 /* Start copy, using first DMA channel */
Dan Williamsbc3c7022009-07-28 14:33:42 -0700250 dma_chan = container_of(dma->channels.next, struct dma_chan,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700251 device_node);
Dan Williamsbc3c7022009-07-28 14:33:42 -0700252 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
253 dev_err(dev, "selftest cannot allocate chan resource\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700254 err = -ENODEV;
255 goto out;
256 }
257
Dan Williamsbc3c7022009-07-28 14:33:42 -0700258 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
Jiang Liu3532e562014-01-02 12:58:52 -0800259 if (dma_mapping_error(dev, dma_src)) {
260 dev_err(dev, "mapping src buffer failed\n");
261 goto free_resources;
262 }
Dan Williamsbc3c7022009-07-28 14:33:42 -0700263 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
Jiang Liu3532e562014-01-02 12:58:52 -0800264 if (dma_mapping_error(dev, dma_dest)) {
265 dev_err(dev, "mapping dest buffer failed\n");
266 goto unmap_src;
267 }
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200268 flags = DMA_PREP_INTERRUPT;
Dan Williams00367312008-02-02 19:49:57 -0700269 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200270 IOAT_TEST_SIZE, flags);
Shannon Nelson5149fd02007-10-18 03:07:13 -0700271 if (!tx) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700272 dev_err(dev, "Self-test prep failed, disabling\n");
Shannon Nelson5149fd02007-10-18 03:07:13 -0700273 err = -ENODEV;
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000274 goto unmap_dma;
Shannon Nelson5149fd02007-10-18 03:07:13 -0700275 }
276
Dan Williams7405f742007-01-02 11:10:43 -0700277 async_tx_ack(tx);
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700278 init_completion(&cmp);
Shannon Nelson95218432007-10-18 03:07:15 -0700279 tx->callback = ioat_dma_test_callback;
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700280 tx->callback_param = &cmp;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800281 cookie = tx->tx_submit(tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700282 if (cookie < 0) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700283 dev_err(dev, "Self-test setup failed, disabling\n");
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700284 err = -ENODEV;
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000285 goto unmap_dma;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700286 }
Dan Williamsbc3c7022009-07-28 14:33:42 -0700287 dma->device_issue_pending(dma_chan);
Dan Williams532d3b12008-12-03 17:16:55 -0700288
Dan Williams0c33e1c2009-03-02 13:31:35 -0700289 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
Chris Leech0bbd5f42006-05-23 17:35:34 -0700290
Dan Williams0c33e1c2009-03-02 13:31:35 -0700291 if (tmo == 0 ||
Linus Walleij07934482010-03-26 16:50:49 -0700292 dma->device_tx_status(dma_chan, cookie, NULL)
Vinod Koul2f16f802013-10-16 20:48:52 +0530293 != DMA_COMPLETE) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700294 dev_err(dev, "Self-test copy timed out, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700295 err = -ENODEV;
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000296 goto unmap_dma;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700297 }
298 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700299 dev_err(dev, "Self-test copy failed compare, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700300 err = -ENODEV;
301 goto free_resources;
302 }
303
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000304unmap_dma:
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000305 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
Jiang Liu3532e562014-01-02 12:58:52 -0800306unmap_src:
307 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700308free_resources:
Dan Williamsbc3c7022009-07-28 14:33:42 -0700309 dma->device_free_chan_resources(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700310out:
311 kfree(src);
312 kfree(dest);
313 return err;
314}
315
Shannon Nelson3e037452007-10-16 01:27:40 -0700316static char ioat_interrupt_style[32] = "msix";
317module_param_string(ioat_interrupt_style, ioat_interrupt_style,
318 sizeof(ioat_interrupt_style), 0644);
319MODULE_PARM_DESC(ioat_interrupt_style,
Dan Williams4c5d9612013-11-13 16:29:52 -0800320 "set ioat interrupt style: msix (default), msi, intx");
Shannon Nelson3e037452007-10-16 01:27:40 -0700321
322/**
323 * ioat_dma_setup_interrupts - setup interrupt handler
324 * @device: ioat device
325 */
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700326int ioat_dma_setup_interrupts(struct ioatdma_device *device)
Shannon Nelson3e037452007-10-16 01:27:40 -0700327{
Dave Jiang5a976882015-08-11 08:48:21 -0700328 struct ioatdma_chan *ioat_chan;
Dan Williamse6c0b692009-09-08 17:29:44 -0700329 struct pci_dev *pdev = device->pdev;
330 struct device *dev = &pdev->dev;
331 struct msix_entry *msix;
332 int i, j, msixcnt;
333 int err = -EINVAL;
Shannon Nelson3e037452007-10-16 01:27:40 -0700334 u8 intrctrl = 0;
335
336 if (!strcmp(ioat_interrupt_style, "msix"))
337 goto msix;
Shannon Nelson3e037452007-10-16 01:27:40 -0700338 if (!strcmp(ioat_interrupt_style, "msi"))
339 goto msi;
340 if (!strcmp(ioat_interrupt_style, "intx"))
341 goto intx;
Dan Williamse6c0b692009-09-08 17:29:44 -0700342 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
Shannon Nelson5149fd02007-10-18 03:07:13 -0700343 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -0700344
345msix:
346 /* The number of MSI-X vectors should equal the number of channels */
347 msixcnt = device->common.chancnt;
348 for (i = 0; i < msixcnt; i++)
349 device->msix_entries[i].entry = i;
350
Alexander Gordeev368da992014-03-06 21:11:21 +0100351 err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
Dan Williams4c5d9612013-11-13 16:29:52 -0800352 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -0700353 goto msi;
Shannon Nelson3e037452007-10-16 01:27:40 -0700354
355 for (i = 0; i < msixcnt; i++) {
Dan Williamse6c0b692009-09-08 17:29:44 -0700356 msix = &device->msix_entries[i];
Dave Jiang5a976882015-08-11 08:48:21 -0700357 ioat_chan = ioat_chan_by_index(device, i);
Dan Williamse6c0b692009-09-08 17:29:44 -0700358 err = devm_request_irq(dev, msix->vector,
359 ioat_dma_do_interrupt_msix, 0,
Dave Jiang5a976882015-08-11 08:48:21 -0700360 "ioat-msix", ioat_chan);
Shannon Nelson3e037452007-10-16 01:27:40 -0700361 if (err) {
362 for (j = 0; j < i; j++) {
Dan Williamse6c0b692009-09-08 17:29:44 -0700363 msix = &device->msix_entries[j];
Dave Jiang5a976882015-08-11 08:48:21 -0700364 ioat_chan = ioat_chan_by_index(device, j);
365 devm_free_irq(dev, msix->vector, ioat_chan);
Shannon Nelson3e037452007-10-16 01:27:40 -0700366 }
Dan Williams4c5d9612013-11-13 16:29:52 -0800367 goto msi;
Shannon Nelson3e037452007-10-16 01:27:40 -0700368 }
369 }
370 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700371 device->irq_mode = IOAT_MSIX;
Shannon Nelson3e037452007-10-16 01:27:40 -0700372 goto done;
373
Shannon Nelson3e037452007-10-16 01:27:40 -0700374msi:
Dan Williamse6c0b692009-09-08 17:29:44 -0700375 err = pci_enable_msi(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -0700376 if (err)
377 goto intx;
378
Dan Williamse6c0b692009-09-08 17:29:44 -0700379 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
380 "ioat-msi", device);
Shannon Nelson3e037452007-10-16 01:27:40 -0700381 if (err) {
Dan Williamse6c0b692009-09-08 17:29:44 -0700382 pci_disable_msi(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -0700383 goto intx;
384 }
Dan Williams779e5612013-11-13 16:30:43 -0800385 device->irq_mode = IOAT_MSI;
Shannon Nelson3e037452007-10-16 01:27:40 -0700386 goto done;
387
388intx:
Dan Williamse6c0b692009-09-08 17:29:44 -0700389 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
390 IRQF_SHARED, "ioat-intx", device);
Shannon Nelson3e037452007-10-16 01:27:40 -0700391 if (err)
392 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -0700393
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700394 device->irq_mode = IOAT_INTX;
Shannon Nelson3e037452007-10-16 01:27:40 -0700395done:
Dan Williamsf2427e22009-07-28 14:42:38 -0700396 if (device->intr_quirk)
397 device->intr_quirk(device);
Shannon Nelson3e037452007-10-16 01:27:40 -0700398 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
399 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
400 return 0;
401
402err_no_irq:
403 /* Disable all interrupt generation */
404 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700405 device->irq_mode = IOAT_NOIRQ;
Dan Williamse6c0b692009-09-08 17:29:44 -0700406 dev_err(dev, "no usable interrupts\n");
407 return err;
Shannon Nelson3e037452007-10-16 01:27:40 -0700408}
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700409EXPORT_SYMBOL(ioat_dma_setup_interrupts);
Shannon Nelson3e037452007-10-16 01:27:40 -0700410
Dan Williamse6c0b692009-09-08 17:29:44 -0700411static void ioat_disable_interrupts(struct ioatdma_device *device)
Shannon Nelson3e037452007-10-16 01:27:40 -0700412{
Shannon Nelson3e037452007-10-16 01:27:40 -0700413 /* Disable all interrupt generation */
414 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
Shannon Nelson3e037452007-10-16 01:27:40 -0700415}
416
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800417int ioat_probe(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700418{
Dan Williamsf2427e22009-07-28 14:42:38 -0700419 int err = -ENODEV;
420 struct dma_device *dma = &device->common;
421 struct pci_dev *pdev = device->pdev;
Dan Williamse6c0b692009-09-08 17:29:44 -0700422 struct device *dev = &pdev->dev;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700423
424 /* DMA coherent memory pool for DMA descriptor allocations */
425 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
Shannon Nelson8ab89562007-10-16 01:27:39 -0700426 sizeof(struct ioat_dma_descriptor),
427 64, 0);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700428 if (!device->dma_pool) {
429 err = -ENOMEM;
430 goto err_dma_pool;
431 }
432
Shannon Nelson43d6e362007-10-16 01:27:39 -0700433 device->completion_pool = pci_pool_create("completion_pool", pdev,
434 sizeof(u64), SMP_CACHE_BYTES,
435 SMP_CACHE_BYTES);
Dan Williams5cbafa62009-08-26 13:01:44 -0700436
Chris Leech0bbd5f42006-05-23 17:35:34 -0700437 if (!device->completion_pool) {
438 err = -ENOMEM;
439 goto err_completion_pool;
440 }
441
Dan Williams5cbafa62009-08-26 13:01:44 -0700442 device->enumerate_channels(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700443
Dan Williamsf2427e22009-07-28 14:42:38 -0700444 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
Dan Williamsf2427e22009-07-28 14:42:38 -0700445 dma->dev = &pdev->dev;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800446
Dan Williamsbc3c7022009-07-28 14:33:42 -0700447 if (!dma->chancnt) {
Dan Williamsa6d52d72009-12-19 15:36:02 -0700448 dev_err(dev, "channel enumeration error\n");
Maciej Sosnowski8b794b12009-02-26 11:04:54 +0100449 goto err_setup_interrupts;
450 }
451
Shannon Nelson3e037452007-10-16 01:27:40 -0700452 err = ioat_dma_setup_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700453 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -0700454 goto err_setup_interrupts;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700455
Dan Williams9de6fc72009-09-08 17:42:58 -0700456 err = device->self_test(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700457 if (err)
458 goto err_self_test;
459
Dan Williamsf2427e22009-07-28 14:42:38 -0700460 return 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700461
462err_self_test:
Dan Williamse6c0b692009-09-08 17:29:44 -0700463 ioat_disable_interrupts(device);
Shannon Nelson3e037452007-10-16 01:27:40 -0700464err_setup_interrupts:
Chris Leech0bbd5f42006-05-23 17:35:34 -0700465 pci_pool_destroy(device->completion_pool);
466err_completion_pool:
467 pci_pool_destroy(device->dma_pool);
468err_dma_pool:
Dan Williamsf2427e22009-07-28 14:42:38 -0700469 return err;
470}
471
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800472int ioat_register(struct ioatdma_device *device)
Dan Williamsf2427e22009-07-28 14:42:38 -0700473{
474 int err = dma_async_device_register(&device->common);
475
476 if (err) {
477 ioat_disable_interrupts(device);
478 pci_pool_destroy(device->completion_pool);
479 pci_pool_destroy(device->dma_pool);
480 }
481
482 return err;
483}
484
Dan Williams5669e312009-09-08 17:42:56 -0700485static ssize_t cap_show(struct dma_chan *c, char *page)
486{
487 struct dma_device *dma = c->device;
488
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700489 return sprintf(page, "copy%s%s%s%s%s\n",
Dan Williams5669e312009-09-08 17:42:56 -0700490 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
491 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
492 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
493 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
Dan Williams5669e312009-09-08 17:42:56 -0700494 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
495
496}
497struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
498
499static ssize_t version_show(struct dma_chan *c, char *page)
500{
501 struct dma_device *dma = c->device;
502 struct ioatdma_device *device = to_ioatdma_device(dma);
503
504 return sprintf(page, "%d.%d\n",
505 device->version >> 4, device->version & 0xf);
506}
507struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
508
Dan Williams5669e312009-09-08 17:42:56 -0700509static ssize_t
510ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
511{
512 struct ioat_sysfs_entry *entry;
Dave Jiang5a976882015-08-11 08:48:21 -0700513 struct ioatdma_chan *ioat_chan;
Dan Williams5669e312009-09-08 17:42:56 -0700514
515 entry = container_of(attr, struct ioat_sysfs_entry, attr);
Dave Jiang5a976882015-08-11 08:48:21 -0700516 ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
Dan Williams5669e312009-09-08 17:42:56 -0700517
518 if (!entry->show)
519 return -EIO;
Dave Jiang5a976882015-08-11 08:48:21 -0700520 return entry->show(&ioat_chan->dma_chan, page);
Dan Williams5669e312009-09-08 17:42:56 -0700521}
522
Emese Revfy52cf25d2010-01-19 02:58:23 +0100523const struct sysfs_ops ioat_sysfs_ops = {
Dan Williams5669e312009-09-08 17:42:56 -0700524 .show = ioat_attr_show,
525};
526
Dan Williams5669e312009-09-08 17:42:56 -0700527void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
528{
529 struct dma_device *dma = &device->common;
530 struct dma_chan *c;
531
532 list_for_each_entry(c, &dma->channels, device_node) {
Dave Jiang5a976882015-08-11 08:48:21 -0700533 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
Dan Williams5669e312009-09-08 17:42:56 -0700534 struct kobject *parent = &c->dev->device.kobj;
535 int err;
536
Dave Jiang5a976882015-08-11 08:48:21 -0700537 err = kobject_init_and_add(&ioat_chan->kobj, type,
538 parent, "quickdata");
Dan Williams5669e312009-09-08 17:42:56 -0700539 if (err) {
Dave Jiang5a976882015-08-11 08:48:21 -0700540 dev_warn(to_dev(ioat_chan),
Dan Williams5669e312009-09-08 17:42:56 -0700541 "sysfs init error (%d), continuing...\n", err);
Dave Jiang5a976882015-08-11 08:48:21 -0700542 kobject_put(&ioat_chan->kobj);
543 set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
Dan Williams5669e312009-09-08 17:42:56 -0700544 }
545 }
546}
547
548void ioat_kobject_del(struct ioatdma_device *device)
549{
550 struct dma_device *dma = &device->common;
551 struct dma_chan *c;
552
553 list_for_each_entry(c, &dma->channels, device_node) {
Dave Jiang5a976882015-08-11 08:48:21 -0700554 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
Dan Williams5669e312009-09-08 17:42:56 -0700555
Dave Jiang5a976882015-08-11 08:48:21 -0700556 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
557 kobject_del(&ioat_chan->kobj);
558 kobject_put(&ioat_chan->kobj);
Dan Williams5669e312009-09-08 17:42:56 -0700559 }
560 }
561}
562
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800563void ioat_dma_remove(struct ioatdma_device *device)
Dan Aloni428ed602007-03-08 09:57:36 -0800564{
Dan Williamsbc3c7022009-07-28 14:33:42 -0700565 struct dma_device *dma = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700566
Dan Williamse6c0b692009-09-08 17:29:44 -0700567 ioat_disable_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700568
Dan Williams5669e312009-09-08 17:42:56 -0700569 ioat_kobject_del(device);
570
Dan Williamsbc3c7022009-07-28 14:33:42 -0700571 dma_async_device_unregister(dma);
Shannon Nelsondfe22992007-10-18 03:07:13 -0700572
Chris Leech0bbd5f42006-05-23 17:35:34 -0700573 pci_pool_destroy(device->dma_pool);
574 pci_pool_destroy(device->completion_pool);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700575
Dan Williamsdcbc8532009-07-28 14:44:50 -0700576 INIT_LIST_HEAD(&dma->channels);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700577}