blob: f40768dfc3e6c18bda01152b59870be0b0df2c4b [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
Dave Jiang85596a12015-08-11 08:48:10 -07003 * Copyright(c) 2004 - 2015 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070014 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 */
18
19/*
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
21 * copy operations.
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -070027#include <linux/pci.h>
28#include <linux/interrupt.h>
29#include <linux/dmaengine.h>
30#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070031#include <linux/dma-mapping.h>
Maciej Sosnowski09177e82008-07-22 10:07:33 -070032#include <linux/workqueue.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040033#include <linux/prefetch.h>
Venki Pallipadi3ad0b022008-10-22 16:34:52 -070034#include <linux/i7300_idle.h>
Dan Williams584ec222009-07-28 14:32:12 -070035#include "dma.h"
36#include "registers.h"
37#include "hw.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070038
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000039#include "../dmaengine.h"
40
Dan Williams5cbafa62009-08-26 13:01:44 -070041int ioat_pending_level = 4;
Shannon Nelson7bb67c12007-11-14 16:59:51 -080042module_param(ioat_pending_level, int, 0644);
43MODULE_PARM_DESC(ioat_pending_level,
44 "high-water mark for pushing ioat descriptors (default: 4)");
45
Shannon Nelson3e037452007-10-16 01:27:40 -070046/**
47 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
48 * @irq: interrupt id
49 * @data: interrupt data
50 */
51static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
52{
53 struct ioatdma_device *instance = data;
Dan Williamsdcbc8532009-07-28 14:44:50 -070054 struct ioat_chan_common *chan;
Shannon Nelson3e037452007-10-16 01:27:40 -070055 unsigned long attnstatus;
56 int bit;
57 u8 intrctrl;
58
59 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
60
61 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
62 return IRQ_NONE;
63
64 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
65 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
66 return IRQ_NONE;
67 }
68
69 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
Akinobu Mita984b3f52010-03-05 13:41:37 -080070 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
Dan Williamsdcbc8532009-07-28 14:44:50 -070071 chan = ioat_chan_by_index(instance, bit);
Dan Williamsda87ca42014-02-19 16:19:35 -080072 if (test_bit(IOAT_RUN, &chan->state))
73 tasklet_schedule(&chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -070074 }
75
76 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
77 return IRQ_HANDLED;
78}
79
80/**
81 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
82 * @irq: interrupt id
83 * @data: interrupt data
84 */
85static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
86{
Dan Williamsdcbc8532009-07-28 14:44:50 -070087 struct ioat_chan_common *chan = data;
Shannon Nelson3e037452007-10-16 01:27:40 -070088
Dan Williamsda87ca42014-02-19 16:19:35 -080089 if (test_bit(IOAT_RUN, &chan->state))
90 tasklet_schedule(&chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -070091
92 return IRQ_HANDLED;
93}
94
Dan Williams5cbafa62009-08-26 13:01:44 -070095/* common channel initialization */
Dan Williamsaa4d72a2010-03-03 21:21:13 -070096void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
Dan Williams5cbafa62009-08-26 13:01:44 -070097{
98 struct dma_device *dma = &device->common;
Dan Williamsaa4d72a2010-03-03 21:21:13 -070099 struct dma_chan *c = &chan->common;
100 unsigned long data = (unsigned long) c;
Dan Williams5cbafa62009-08-26 13:01:44 -0700101
102 chan->device = device;
103 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
Dan Williams5cbafa62009-08-26 13:01:44 -0700104 spin_lock_init(&chan->cleanup_lock);
105 chan->common.device = dma;
Russell King - ARM Linux8ac69542012-03-06 22:36:27 +0000106 dma_cookie_init(&chan->common);
Dan Williams5cbafa62009-08-26 13:01:44 -0700107 list_add_tail(&chan->common.device_node, &dma->channels);
108 device->idx[idx] = chan;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700109 init_timer(&chan->timer);
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700110 chan->timer.function = device->timer_fn;
111 chan->timer.data = data;
112 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
Dan Williams5cbafa62009-08-26 13:01:44 -0700113}
114
Dan Williamsda87ca42014-02-19 16:19:35 -0800115void ioat_stop(struct ioat_chan_common *chan)
116{
117 struct ioatdma_device *device = chan->device;
118 struct pci_dev *pdev = device->pdev;
119 int chan_id = chan_num(chan);
120 struct msix_entry *msix;
121
122 /* 1/ stop irq from firing tasklets
123 * 2/ stop the tasklet from re-arming irqs
124 */
125 clear_bit(IOAT_RUN, &chan->state);
126
127 /* flush inflight interrupts */
128 switch (device->irq_mode) {
129 case IOAT_MSIX:
130 msix = &device->msix_entries[chan_id];
131 synchronize_irq(msix->vector);
132 break;
133 case IOAT_MSI:
134 case IOAT_INTX:
135 synchronize_irq(pdev->irq);
136 break;
137 default:
138 break;
139 }
140
141 /* flush inflight timers */
142 del_timer_sync(&chan->timer);
143
144 /* flush inflight tasklet runs */
145 tasklet_kill(&chan->cleanup_task);
146
147 /* final cleanup now that everything is quiesced and can't re-arm */
148 device->cleanup_fn((unsigned long) &chan->common);
149}
150
Dan Williams27502932012-03-23 13:36:42 -0700151dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
Dan Williams5cbafa62009-08-26 13:01:44 -0700152{
Dan Williams27502932012-03-23 13:36:42 -0700153 dma_addr_t phys_complete;
Dan Williams4fb9b9e2009-09-08 12:01:04 -0700154 u64 completion;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700155
Dan Williams4fb9b9e2009-09-08 12:01:04 -0700156 completion = *chan->completion;
Dan Williams09c8a5b2009-09-08 12:01:49 -0700157 phys_complete = ioat_chansts_to_addr(completion);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700158
Dan Williams6df91832009-09-08 12:00:55 -0700159 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
160 (unsigned long long) phys_complete);
161
Dan Williams09c8a5b2009-09-08 12:01:49 -0700162 if (is_ioat_halted(completion)) {
163 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
Dan Williamsdcbc8532009-07-28 14:44:50 -0700164 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
Dan Williams09c8a5b2009-09-08 12:01:49 -0700165 chanerr);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700166
167 /* TODO do something to salvage the situation */
168 }
169
Dan Williams5cbafa62009-08-26 13:01:44 -0700170 return phys_complete;
171}
172
Dan Williams09c8a5b2009-09-08 12:01:49 -0700173bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
Dan Williams27502932012-03-23 13:36:42 -0700174 dma_addr_t *phys_complete)
Dan Williams09c8a5b2009-09-08 12:01:49 -0700175{
176 *phys_complete = ioat_get_current_completion(chan);
177 if (*phys_complete == chan->last_completion)
178 return false;
179 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
180 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
181
182 return true;
183}
184
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700185enum dma_status
Linus Walleij07934482010-03-26 16:50:49 -0700186ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
187 struct dma_tx_state *txstate)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700188{
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700189 struct ioat_chan_common *chan = to_chan_common(c);
190 struct ioatdma_device *device = chan->device;
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000191 enum dma_status ret;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700192
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000193 ret = dma_cookie_status(c, cookie, txstate);
Vinod Koul2f16f802013-10-16 20:48:52 +0530194 if (ret == DMA_COMPLETE)
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000195 return ret;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700196
Dan Williamsaa4d72a2010-03-03 21:21:13 -0700197 device->cleanup_fn((unsigned long) c);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700198
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000199 return dma_cookie_status(c, cookie, txstate);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700200}
201
Chris Leech0bbd5f42006-05-23 17:35:34 -0700202/*
203 * Perform a IOAT transaction to verify the HW works.
204 */
205#define IOAT_TEST_SIZE 2000
206
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800207static void ioat_dma_test_callback(void *dma_async_param)
Shannon Nelson95218432007-10-18 03:07:15 -0700208{
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700209 struct completion *cmp = dma_async_param;
210
211 complete(cmp);
Shannon Nelson95218432007-10-18 03:07:15 -0700212}
213
Shannon Nelson3e037452007-10-16 01:27:40 -0700214/**
215 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
216 * @device: device to be tested
217 */
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800218int ioat_dma_self_test(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700219{
220 int i;
221 u8 *src;
222 u8 *dest;
Dan Williamsbc3c7022009-07-28 14:33:42 -0700223 struct dma_device *dma = &device->common;
224 struct device *dev = &device->pdev->dev;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700225 struct dma_chan *dma_chan;
Shannon Nelson711924b2007-12-17 16:20:08 -0800226 struct dma_async_tx_descriptor *tx;
Dan Williams00367312008-02-02 19:49:57 -0700227 dma_addr_t dma_dest, dma_src;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700228 dma_cookie_t cookie;
229 int err = 0;
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700230 struct completion cmp;
Dan Williams0c33e1c2009-03-02 13:31:35 -0700231 unsigned long tmo;
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200232 unsigned long flags;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700233
Christoph Lametere94b1762006-12-06 20:33:17 -0800234 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700235 if (!src)
236 return -ENOMEM;
Christoph Lametere94b1762006-12-06 20:33:17 -0800237 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700238 if (!dest) {
239 kfree(src);
240 return -ENOMEM;
241 }
242
243 /* Fill in src buffer */
244 for (i = 0; i < IOAT_TEST_SIZE; i++)
245 src[i] = (u8)i;
246
247 /* Start copy, using first DMA channel */
Dan Williamsbc3c7022009-07-28 14:33:42 -0700248 dma_chan = container_of(dma->channels.next, struct dma_chan,
Shannon Nelson43d6e362007-10-16 01:27:39 -0700249 device_node);
Dan Williamsbc3c7022009-07-28 14:33:42 -0700250 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
251 dev_err(dev, "selftest cannot allocate chan resource\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700252 err = -ENODEV;
253 goto out;
254 }
255
Dan Williamsbc3c7022009-07-28 14:33:42 -0700256 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
Jiang Liu3532e562014-01-02 12:58:52 -0800257 if (dma_mapping_error(dev, dma_src)) {
258 dev_err(dev, "mapping src buffer failed\n");
259 goto free_resources;
260 }
Dan Williamsbc3c7022009-07-28 14:33:42 -0700261 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
Jiang Liu3532e562014-01-02 12:58:52 -0800262 if (dma_mapping_error(dev, dma_dest)) {
263 dev_err(dev, "mapping dest buffer failed\n");
264 goto unmap_src;
265 }
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200266 flags = DMA_PREP_INTERRUPT;
Dan Williams00367312008-02-02 19:49:57 -0700267 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
Maciej Sosnowski4f005db2009-04-23 12:31:51 +0200268 IOAT_TEST_SIZE, flags);
Shannon Nelson5149fd02007-10-18 03:07:13 -0700269 if (!tx) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700270 dev_err(dev, "Self-test prep failed, disabling\n");
Shannon Nelson5149fd02007-10-18 03:07:13 -0700271 err = -ENODEV;
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000272 goto unmap_dma;
Shannon Nelson5149fd02007-10-18 03:07:13 -0700273 }
274
Dan Williams7405f742007-01-02 11:10:43 -0700275 async_tx_ack(tx);
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700276 init_completion(&cmp);
Shannon Nelson95218432007-10-18 03:07:15 -0700277 tx->callback = ioat_dma_test_callback;
Dan Williamsb9bdcbb2009-01-06 11:38:22 -0700278 tx->callback_param = &cmp;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800279 cookie = tx->tx_submit(tx);
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700280 if (cookie < 0) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700281 dev_err(dev, "Self-test setup failed, disabling\n");
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700282 err = -ENODEV;
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000283 goto unmap_dma;
Shannon Nelson7f2b2912007-10-18 03:07:14 -0700284 }
Dan Williamsbc3c7022009-07-28 14:33:42 -0700285 dma->device_issue_pending(dma_chan);
Dan Williams532d3b12008-12-03 17:16:55 -0700286
Dan Williams0c33e1c2009-03-02 13:31:35 -0700287 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
Chris Leech0bbd5f42006-05-23 17:35:34 -0700288
Dan Williams0c33e1c2009-03-02 13:31:35 -0700289 if (tmo == 0 ||
Linus Walleij07934482010-03-26 16:50:49 -0700290 dma->device_tx_status(dma_chan, cookie, NULL)
Vinod Koul2f16f802013-10-16 20:48:52 +0530291 != DMA_COMPLETE) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700292 dev_err(dev, "Self-test copy timed out, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700293 err = -ENODEV;
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000294 goto unmap_dma;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700295 }
296 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
Dan Williamsbc3c7022009-07-28 14:33:42 -0700297 dev_err(dev, "Self-test copy failed compare, disabling\n");
Chris Leech0bbd5f42006-05-23 17:35:34 -0700298 err = -ENODEV;
299 goto free_resources;
300 }
301
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000302unmap_dma:
Bartlomiej Zolnierkiewicz522d9742012-11-05 10:00:13 +0000303 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
Jiang Liu3532e562014-01-02 12:58:52 -0800304unmap_src:
305 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700306free_resources:
Dan Williamsbc3c7022009-07-28 14:33:42 -0700307 dma->device_free_chan_resources(dma_chan);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700308out:
309 kfree(src);
310 kfree(dest);
311 return err;
312}
313
Shannon Nelson3e037452007-10-16 01:27:40 -0700314static char ioat_interrupt_style[32] = "msix";
315module_param_string(ioat_interrupt_style, ioat_interrupt_style,
316 sizeof(ioat_interrupt_style), 0644);
317MODULE_PARM_DESC(ioat_interrupt_style,
Dan Williams4c5d9612013-11-13 16:29:52 -0800318 "set ioat interrupt style: msix (default), msi, intx");
Shannon Nelson3e037452007-10-16 01:27:40 -0700319
320/**
321 * ioat_dma_setup_interrupts - setup interrupt handler
322 * @device: ioat device
323 */
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700324int ioat_dma_setup_interrupts(struct ioatdma_device *device)
Shannon Nelson3e037452007-10-16 01:27:40 -0700325{
Dan Williamsdcbc8532009-07-28 14:44:50 -0700326 struct ioat_chan_common *chan;
Dan Williamse6c0b692009-09-08 17:29:44 -0700327 struct pci_dev *pdev = device->pdev;
328 struct device *dev = &pdev->dev;
329 struct msix_entry *msix;
330 int i, j, msixcnt;
331 int err = -EINVAL;
Shannon Nelson3e037452007-10-16 01:27:40 -0700332 u8 intrctrl = 0;
333
334 if (!strcmp(ioat_interrupt_style, "msix"))
335 goto msix;
Shannon Nelson3e037452007-10-16 01:27:40 -0700336 if (!strcmp(ioat_interrupt_style, "msi"))
337 goto msi;
338 if (!strcmp(ioat_interrupt_style, "intx"))
339 goto intx;
Dan Williamse6c0b692009-09-08 17:29:44 -0700340 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
Shannon Nelson5149fd02007-10-18 03:07:13 -0700341 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -0700342
343msix:
344 /* The number of MSI-X vectors should equal the number of channels */
345 msixcnt = device->common.chancnt;
346 for (i = 0; i < msixcnt; i++)
347 device->msix_entries[i].entry = i;
348
Alexander Gordeev368da992014-03-06 21:11:21 +0100349 err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
Dan Williams4c5d9612013-11-13 16:29:52 -0800350 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -0700351 goto msi;
Shannon Nelson3e037452007-10-16 01:27:40 -0700352
353 for (i = 0; i < msixcnt; i++) {
Dan Williamse6c0b692009-09-08 17:29:44 -0700354 msix = &device->msix_entries[i];
Dan Williamsdcbc8532009-07-28 14:44:50 -0700355 chan = ioat_chan_by_index(device, i);
Dan Williamse6c0b692009-09-08 17:29:44 -0700356 err = devm_request_irq(dev, msix->vector,
357 ioat_dma_do_interrupt_msix, 0,
Dan Williamsdcbc8532009-07-28 14:44:50 -0700358 "ioat-msix", chan);
Shannon Nelson3e037452007-10-16 01:27:40 -0700359 if (err) {
360 for (j = 0; j < i; j++) {
Dan Williamse6c0b692009-09-08 17:29:44 -0700361 msix = &device->msix_entries[j];
Dan Williamsdcbc8532009-07-28 14:44:50 -0700362 chan = ioat_chan_by_index(device, j);
363 devm_free_irq(dev, msix->vector, chan);
Shannon Nelson3e037452007-10-16 01:27:40 -0700364 }
Dan Williams4c5d9612013-11-13 16:29:52 -0800365 goto msi;
Shannon Nelson3e037452007-10-16 01:27:40 -0700366 }
367 }
368 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700369 device->irq_mode = IOAT_MSIX;
Shannon Nelson3e037452007-10-16 01:27:40 -0700370 goto done;
371
Shannon Nelson3e037452007-10-16 01:27:40 -0700372msi:
Dan Williamse6c0b692009-09-08 17:29:44 -0700373 err = pci_enable_msi(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -0700374 if (err)
375 goto intx;
376
Dan Williamse6c0b692009-09-08 17:29:44 -0700377 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
378 "ioat-msi", device);
Shannon Nelson3e037452007-10-16 01:27:40 -0700379 if (err) {
Dan Williamse6c0b692009-09-08 17:29:44 -0700380 pci_disable_msi(pdev);
Shannon Nelson3e037452007-10-16 01:27:40 -0700381 goto intx;
382 }
Dan Williams779e5612013-11-13 16:30:43 -0800383 device->irq_mode = IOAT_MSI;
Shannon Nelson3e037452007-10-16 01:27:40 -0700384 goto done;
385
386intx:
Dan Williamse6c0b692009-09-08 17:29:44 -0700387 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
388 IRQF_SHARED, "ioat-intx", device);
Shannon Nelson3e037452007-10-16 01:27:40 -0700389 if (err)
390 goto err_no_irq;
Shannon Nelson3e037452007-10-16 01:27:40 -0700391
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700392 device->irq_mode = IOAT_INTX;
Shannon Nelson3e037452007-10-16 01:27:40 -0700393done:
Dan Williamsf2427e22009-07-28 14:42:38 -0700394 if (device->intr_quirk)
395 device->intr_quirk(device);
Shannon Nelson3e037452007-10-16 01:27:40 -0700396 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
397 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
398 return 0;
399
400err_no_irq:
401 /* Disable all interrupt generation */
402 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700403 device->irq_mode = IOAT_NOIRQ;
Dan Williamse6c0b692009-09-08 17:29:44 -0700404 dev_err(dev, "no usable interrupts\n");
405 return err;
Shannon Nelson3e037452007-10-16 01:27:40 -0700406}
Dave Jiang8a52b9f2013-03-26 15:42:47 -0700407EXPORT_SYMBOL(ioat_dma_setup_interrupts);
Shannon Nelson3e037452007-10-16 01:27:40 -0700408
Dan Williamse6c0b692009-09-08 17:29:44 -0700409static void ioat_disable_interrupts(struct ioatdma_device *device)
Shannon Nelson3e037452007-10-16 01:27:40 -0700410{
Shannon Nelson3e037452007-10-16 01:27:40 -0700411 /* Disable all interrupt generation */
412 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
Shannon Nelson3e037452007-10-16 01:27:40 -0700413}
414
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800415int ioat_probe(struct ioatdma_device *device)
Chris Leech0bbd5f42006-05-23 17:35:34 -0700416{
Dan Williamsf2427e22009-07-28 14:42:38 -0700417 int err = -ENODEV;
418 struct dma_device *dma = &device->common;
419 struct pci_dev *pdev = device->pdev;
Dan Williamse6c0b692009-09-08 17:29:44 -0700420 struct device *dev = &pdev->dev;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700421
422 /* DMA coherent memory pool for DMA descriptor allocations */
423 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
Shannon Nelson8ab89562007-10-16 01:27:39 -0700424 sizeof(struct ioat_dma_descriptor),
425 64, 0);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700426 if (!device->dma_pool) {
427 err = -ENOMEM;
428 goto err_dma_pool;
429 }
430
Shannon Nelson43d6e362007-10-16 01:27:39 -0700431 device->completion_pool = pci_pool_create("completion_pool", pdev,
432 sizeof(u64), SMP_CACHE_BYTES,
433 SMP_CACHE_BYTES);
Dan Williams5cbafa62009-08-26 13:01:44 -0700434
Chris Leech0bbd5f42006-05-23 17:35:34 -0700435 if (!device->completion_pool) {
436 err = -ENOMEM;
437 goto err_completion_pool;
438 }
439
Dan Williams5cbafa62009-08-26 13:01:44 -0700440 device->enumerate_channels(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700441
Dan Williamsf2427e22009-07-28 14:42:38 -0700442 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
Dan Williamsf2427e22009-07-28 14:42:38 -0700443 dma->dev = &pdev->dev;
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800444
Dan Williamsbc3c7022009-07-28 14:33:42 -0700445 if (!dma->chancnt) {
Dan Williamsa6d52d72009-12-19 15:36:02 -0700446 dev_err(dev, "channel enumeration error\n");
Maciej Sosnowski8b794b12009-02-26 11:04:54 +0100447 goto err_setup_interrupts;
448 }
449
Shannon Nelson3e037452007-10-16 01:27:40 -0700450 err = ioat_dma_setup_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700451 if (err)
Shannon Nelson3e037452007-10-16 01:27:40 -0700452 goto err_setup_interrupts;
Shannon Nelson8ab89562007-10-16 01:27:39 -0700453
Dan Williams9de6fc72009-09-08 17:42:58 -0700454 err = device->self_test(device);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700455 if (err)
456 goto err_self_test;
457
Dan Williamsf2427e22009-07-28 14:42:38 -0700458 return 0;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700459
460err_self_test:
Dan Williamse6c0b692009-09-08 17:29:44 -0700461 ioat_disable_interrupts(device);
Shannon Nelson3e037452007-10-16 01:27:40 -0700462err_setup_interrupts:
Chris Leech0bbd5f42006-05-23 17:35:34 -0700463 pci_pool_destroy(device->completion_pool);
464err_completion_pool:
465 pci_pool_destroy(device->dma_pool);
466err_dma_pool:
Dan Williamsf2427e22009-07-28 14:42:38 -0700467 return err;
468}
469
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800470int ioat_register(struct ioatdma_device *device)
Dan Williamsf2427e22009-07-28 14:42:38 -0700471{
472 int err = dma_async_device_register(&device->common);
473
474 if (err) {
475 ioat_disable_interrupts(device);
476 pci_pool_destroy(device->completion_pool);
477 pci_pool_destroy(device->dma_pool);
478 }
479
480 return err;
481}
482
Dan Williams5669e312009-09-08 17:42:56 -0700483static ssize_t cap_show(struct dma_chan *c, char *page)
484{
485 struct dma_device *dma = c->device;
486
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700487 return sprintf(page, "copy%s%s%s%s%s\n",
Dan Williams5669e312009-09-08 17:42:56 -0700488 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
489 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
490 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
491 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
Dan Williams5669e312009-09-08 17:42:56 -0700492 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
493
494}
495struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
496
497static ssize_t version_show(struct dma_chan *c, char *page)
498{
499 struct dma_device *dma = c->device;
500 struct ioatdma_device *device = to_ioatdma_device(dma);
501
502 return sprintf(page, "%d.%d\n",
503 device->version >> 4, device->version & 0xf);
504}
505struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
506
Dan Williams5669e312009-09-08 17:42:56 -0700507static ssize_t
508ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
509{
510 struct ioat_sysfs_entry *entry;
511 struct ioat_chan_common *chan;
512
513 entry = container_of(attr, struct ioat_sysfs_entry, attr);
514 chan = container_of(kobj, struct ioat_chan_common, kobj);
515
516 if (!entry->show)
517 return -EIO;
518 return entry->show(&chan->common, page);
519}
520
Emese Revfy52cf25d2010-01-19 02:58:23 +0100521const struct sysfs_ops ioat_sysfs_ops = {
Dan Williams5669e312009-09-08 17:42:56 -0700522 .show = ioat_attr_show,
523};
524
Dan Williams5669e312009-09-08 17:42:56 -0700525void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
526{
527 struct dma_device *dma = &device->common;
528 struct dma_chan *c;
529
530 list_for_each_entry(c, &dma->channels, device_node) {
531 struct ioat_chan_common *chan = to_chan_common(c);
532 struct kobject *parent = &c->dev->device.kobj;
533 int err;
534
535 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
536 if (err) {
537 dev_warn(to_dev(chan),
538 "sysfs init error (%d), continuing...\n", err);
539 kobject_put(&chan->kobj);
540 set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
541 }
542 }
543}
544
545void ioat_kobject_del(struct ioatdma_device *device)
546{
547 struct dma_device *dma = &device->common;
548 struct dma_chan *c;
549
550 list_for_each_entry(c, &dma->channels, device_node) {
551 struct ioat_chan_common *chan = to_chan_common(c);
552
553 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
554 kobject_del(&chan->kobj);
555 kobject_put(&chan->kobj);
556 }
557 }
558}
559
Greg Kroah-Hartman4bf27b82012-12-21 15:09:59 -0800560void ioat_dma_remove(struct ioatdma_device *device)
Dan Aloni428ed602007-03-08 09:57:36 -0800561{
Dan Williamsbc3c7022009-07-28 14:33:42 -0700562 struct dma_device *dma = &device->common;
Chris Leech0bbd5f42006-05-23 17:35:34 -0700563
Dan Williamse6c0b692009-09-08 17:29:44 -0700564 ioat_disable_interrupts(device);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700565
Dan Williams5669e312009-09-08 17:42:56 -0700566 ioat_kobject_del(device);
567
Dan Williamsbc3c7022009-07-28 14:33:42 -0700568 dma_async_device_unregister(dma);
Shannon Nelsondfe22992007-10-18 03:07:13 -0700569
Chris Leech0bbd5f42006-05-23 17:35:34 -0700570 pci_pool_destroy(device->dma_pool);
571 pci_pool_destroy(device->completion_pool);
Shannon Nelson8ab89562007-10-16 01:27:39 -0700572
Dan Williamsdcbc8532009-07-28 14:44:50 -0700573 INIT_LIST_HEAD(&dma->channels);
Chris Leech0bbd5f42006-05-23 17:35:34 -0700574}