blob: 6cf622da0286481fccb32a73285c72c83fb6a57b [file] [log] [blame]
Shannon Nelson2ed6dc32007-10-16 01:27:42 -07001/*
2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2007 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/pci.h>
25#include <linux/smp.h>
26#include <linux/interrupt.h>
27#include <linux/dca.h>
28
29/* either a kernel change is needed, or we need something like this in kernel */
30#ifndef CONFIG_SMP
31#include <asm/smp.h>
32#undef cpu_physical_id
33#define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
34#endif
35
36#include "ioatdma.h"
37#include "ioatdma_registers.h"
38
39/*
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -070040 * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
Shannon Nelson2ed6dc32007-10-16 01:27:42 -070041 * contain the bit number of the APIC ID to map into the DCA tag. If the valid
42 * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
43 */
44#define DCA_TAG_MAP_VALID 0x80
45
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -070046#define DCA3_TAG_MAP_BIT_TO_INV 0x80
47#define DCA3_TAG_MAP_BIT_TO_SEL 0x40
48#define DCA3_TAG_MAP_LITERAL_VAL 0x1
49
50#define DCA_TAG_MAP_MASK 0xDF
51
Shannon Nelson2ed6dc32007-10-16 01:27:42 -070052/*
53 * "Legacy" DCA systems do not implement the DCA register set in the
54 * I/OAT device. Software needs direct support for their tag mappings.
55 */
56
57#define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x))
58#define IOAT_TAG_MAP_LEN 8
59
60static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
61 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
62static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
63 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
64static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
65 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
66static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
67
68/* pack PCI B/D/F into a u16 */
69static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
70{
71 return (pci->bus->number << 8) | pci->devfn;
72}
73
Shannon Nelson5149fd02007-10-18 03:07:13 -070074static int dca_enabled_in_bios(struct pci_dev *pdev)
Shannon Nelson2ed6dc32007-10-16 01:27:42 -070075{
76 /* CPUID level 9 returns DCA configuration */
77 /* Bit 0 indicates DCA enabled by the BIOS */
78 unsigned long cpuid_level_9;
79 int res;
80
81 cpuid_level_9 = cpuid_eax(9);
82 res = test_bit(0, &cpuid_level_9);
83 if (!res)
Shannon Nelson5149fd02007-10-18 03:07:13 -070084 dev_err(&pdev->dev, "DCA is disabled in BIOS\n");
Shannon Nelson2ed6dc32007-10-16 01:27:42 -070085
86 return res;
87}
88
Shannon Nelson5149fd02007-10-18 03:07:13 -070089static int system_has_dca_enabled(struct pci_dev *pdev)
Shannon Nelson2ed6dc32007-10-16 01:27:42 -070090{
91 if (boot_cpu_has(X86_FEATURE_DCA))
Shannon Nelson5149fd02007-10-18 03:07:13 -070092 return dca_enabled_in_bios(pdev);
Shannon Nelson2ed6dc32007-10-16 01:27:42 -070093
Shannon Nelson5149fd02007-10-18 03:07:13 -070094 dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
Shannon Nelson2ed6dc32007-10-16 01:27:42 -070095 return 0;
96}
97
98struct ioat_dca_slot {
99 struct pci_dev *pdev; /* requester device */
100 u16 rid; /* requester id, as used by IOAT */
101};
102
103#define IOAT_DCA_MAX_REQ 6
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700104#define IOAT3_DCA_MAX_REQ 2
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700105
106struct ioat_dca_priv {
107 void __iomem *iobase;
Al Viro53a0c982008-03-29 03:08:08 +0000108 void __iomem *dca_base;
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700109 int max_requesters;
110 int requester_count;
111 u8 tag_map[IOAT_TAG_MAP_LEN];
112 struct ioat_dca_slot req_slots[0];
113};
114
115/* 5000 series chipset DCA Port Requester ID Table Entry Format
116 * [15:8] PCI-Express Bus Number
117 * [7:3] PCI-Express Device Number
118 * [2:0] PCI-Express Function Number
119 *
120 * 5000 series chipset DCA control register format
121 * [7:1] Reserved (0)
122 * [0] Ignore Function Number
123 */
124
125static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
126{
127 struct ioat_dca_priv *ioatdca = dca_priv(dca);
128 struct pci_dev *pdev;
129 int i;
130 u16 id;
131
132 /* This implementation only supports PCI-Express */
133 if (dev->bus != &pci_bus_type)
134 return -ENODEV;
135 pdev = to_pci_dev(dev);
136 id = dcaid_from_pcidev(pdev);
137
138 if (ioatdca->requester_count == ioatdca->max_requesters)
139 return -ENODEV;
140
141 for (i = 0; i < ioatdca->max_requesters; i++) {
142 if (ioatdca->req_slots[i].pdev == NULL) {
143 /* found an empty slot */
144 ioatdca->requester_count++;
145 ioatdca->req_slots[i].pdev = pdev;
146 ioatdca->req_slots[i].rid = id;
147 writew(id, ioatdca->dca_base + (i * 4));
148 /* make sure the ignore function bit is off */
149 writeb(0, ioatdca->dca_base + (i * 4) + 2);
150 return i;
151 }
152 }
153 /* Error, ioatdma->requester_count is out of whack */
154 return -EFAULT;
155}
156
157static int ioat_dca_remove_requester(struct dca_provider *dca,
158 struct device *dev)
159{
160 struct ioat_dca_priv *ioatdca = dca_priv(dca);
161 struct pci_dev *pdev;
162 int i;
163
164 /* This implementation only supports PCI-Express */
165 if (dev->bus != &pci_bus_type)
166 return -ENODEV;
167 pdev = to_pci_dev(dev);
168
169 for (i = 0; i < ioatdca->max_requesters; i++) {
170 if (ioatdca->req_slots[i].pdev == pdev) {
171 writew(0, ioatdca->dca_base + (i * 4));
172 ioatdca->req_slots[i].pdev = NULL;
173 ioatdca->req_slots[i].rid = 0;
174 ioatdca->requester_count--;
175 return i;
176 }
177 }
178 return -ENODEV;
179}
180
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700181static u8 ioat_dca_get_tag(struct dca_provider *dca,
182 struct device *dev,
183 int cpu)
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700184{
185 struct ioat_dca_priv *ioatdca = dca_priv(dca);
186 int i, apic_id, bit, value;
187 u8 entry, tag;
188
189 tag = 0;
190 apic_id = cpu_physical_id(cpu);
191
192 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
193 entry = ioatdca->tag_map[i];
194 if (entry & DCA_TAG_MAP_VALID) {
195 bit = entry & ~DCA_TAG_MAP_VALID;
196 value = (apic_id & (1 << bit)) ? 1 : 0;
197 } else {
198 value = entry ? 1 : 0;
199 }
200 tag |= (value << i);
201 }
202 return tag;
203}
204
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700205static int ioat_dca_dev_managed(struct dca_provider *dca,
206 struct device *dev)
207{
208 struct ioat_dca_priv *ioatdca = dca_priv(dca);
209 struct pci_dev *pdev;
210 int i;
211
212 pdev = to_pci_dev(dev);
213 for (i = 0; i < ioatdca->max_requesters; i++) {
214 if (ioatdca->req_slots[i].pdev == pdev)
215 return 1;
216 }
217 return 0;
218}
219
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700220static struct dca_ops ioat_dca_ops = {
221 .add_requester = ioat_dca_add_requester,
222 .remove_requester = ioat_dca_remove_requester,
223 .get_tag = ioat_dca_get_tag,
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700224 .dev_managed = ioat_dca_dev_managed,
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700225};
226
227
228struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
229{
230 struct dca_provider *dca;
231 struct ioat_dca_priv *ioatdca;
232 u8 *tag_map = NULL;
233 int i;
234 int err;
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700235 u8 version;
236 u8 max_requesters;
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700237
Shannon Nelson5149fd02007-10-18 03:07:13 -0700238 if (!system_has_dca_enabled(pdev))
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700239 return NULL;
240
241 /* I/OAT v1 systems must have a known tag_map to support DCA */
242 switch (pdev->vendor) {
243 case PCI_VENDOR_ID_INTEL:
244 switch (pdev->device) {
245 case PCI_DEVICE_ID_INTEL_IOAT:
246 tag_map = ioat_tag_map_BNB;
247 break;
248 case PCI_DEVICE_ID_INTEL_IOAT_CNB:
249 tag_map = ioat_tag_map_CNB;
250 break;
251 case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
252 tag_map = ioat_tag_map_SCNB;
253 break;
254 }
255 break;
256 case PCI_VENDOR_ID_UNISYS:
257 switch (pdev->device) {
258 case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
259 tag_map = ioat_tag_map_UNISYS;
260 break;
261 }
262 break;
263 }
264 if (tag_map == NULL)
265 return NULL;
266
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700267 version = readb(iobase + IOAT_VER_OFFSET);
268 if (version == IOAT_VER_3_0)
269 max_requesters = IOAT3_DCA_MAX_REQ;
270 else
271 max_requesters = IOAT_DCA_MAX_REQ;
272
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700273 dca = alloc_dca_provider(&ioat_dca_ops,
274 sizeof(*ioatdca) +
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700275 (sizeof(struct ioat_dca_slot) * max_requesters));
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700276 if (!dca)
277 return NULL;
278
279 ioatdca = dca_priv(dca);
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700280 ioatdca->max_requesters = max_requesters;
Shannon Nelson2ed6dc32007-10-16 01:27:42 -0700281 ioatdca->dca_base = iobase + 0x54;
282
283 /* copy over the APIC ID to DCA tag mapping */
284 for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
285 ioatdca->tag_map[i] = tag_map[i];
286
287 err = register_dca_provider(dca, &pdev->dev);
288 if (err) {
289 free_dca_provider(dca);
290 return NULL;
291 }
292
293 return dca;
294}
295
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800296
297static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
298{
299 struct ioat_dca_priv *ioatdca = dca_priv(dca);
300 struct pci_dev *pdev;
301 int i;
302 u16 id;
303 u16 global_req_table;
304
305 /* This implementation only supports PCI-Express */
306 if (dev->bus != &pci_bus_type)
307 return -ENODEV;
308 pdev = to_pci_dev(dev);
309 id = dcaid_from_pcidev(pdev);
310
311 if (ioatdca->requester_count == ioatdca->max_requesters)
312 return -ENODEV;
313
314 for (i = 0; i < ioatdca->max_requesters; i++) {
315 if (ioatdca->req_slots[i].pdev == NULL) {
316 /* found an empty slot */
317 ioatdca->requester_count++;
318 ioatdca->req_slots[i].pdev = pdev;
319 ioatdca->req_slots[i].rid = id;
320 global_req_table =
321 readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
322 writel(id | IOAT_DCA_GREQID_VALID,
323 ioatdca->iobase + global_req_table + (i * 4));
324 return i;
325 }
326 }
327 /* Error, ioatdma->requester_count is out of whack */
328 return -EFAULT;
329}
330
331static int ioat2_dca_remove_requester(struct dca_provider *dca,
332 struct device *dev)
333{
334 struct ioat_dca_priv *ioatdca = dca_priv(dca);
335 struct pci_dev *pdev;
336 int i;
337 u16 global_req_table;
338
339 /* This implementation only supports PCI-Express */
340 if (dev->bus != &pci_bus_type)
341 return -ENODEV;
342 pdev = to_pci_dev(dev);
343
344 for (i = 0; i < ioatdca->max_requesters; i++) {
345 if (ioatdca->req_slots[i].pdev == pdev) {
346 global_req_table =
347 readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
348 writel(0, ioatdca->iobase + global_req_table + (i * 4));
349 ioatdca->req_slots[i].pdev = NULL;
350 ioatdca->req_slots[i].rid = 0;
351 ioatdca->requester_count--;
352 return i;
353 }
354 }
355 return -ENODEV;
356}
357
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700358static u8 ioat2_dca_get_tag(struct dca_provider *dca,
359 struct device *dev,
360 int cpu)
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800361{
362 u8 tag;
363
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700364 tag = ioat_dca_get_tag(dca, dev, cpu);
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800365 tag = (~tag) & 0x1F;
366 return tag;
367}
368
369static struct dca_ops ioat2_dca_ops = {
370 .add_requester = ioat2_dca_add_requester,
371 .remove_requester = ioat2_dca_remove_requester,
372 .get_tag = ioat2_dca_get_tag,
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700373 .dev_managed = ioat_dca_dev_managed,
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800374};
375
Al Viro53a0c982008-03-29 03:08:08 +0000376static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
Shannon Nelson7bb67c12007-11-14 16:59:51 -0800377{
378 int slots = 0;
379 u32 req;
380 u16 global_req_table;
381
382 global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
383 if (global_req_table == 0)
384 return 0;
385 do {
386 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
387 slots++;
388 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
389
390 return slots;
391}
392
393struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
394{
395 struct dca_provider *dca;
396 struct ioat_dca_priv *ioatdca;
397 int slots;
398 int i;
399 int err;
400 u32 tag_map;
401 u16 dca_offset;
402 u16 csi_fsb_control;
403 u16 pcie_control;
404 u8 bit;
405
406 if (!system_has_dca_enabled(pdev))
407 return NULL;
408
409 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
410 if (dca_offset == 0)
411 return NULL;
412
413 slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
414 if (slots == 0)
415 return NULL;
416
417 dca = alloc_dca_provider(&ioat2_dca_ops,
418 sizeof(*ioatdca)
419 + (sizeof(struct ioat_dca_slot) * slots));
420 if (!dca)
421 return NULL;
422
423 ioatdca = dca_priv(dca);
424 ioatdca->iobase = iobase;
425 ioatdca->dca_base = iobase + dca_offset;
426 ioatdca->max_requesters = slots;
427
428 /* some bios might not know to turn these on */
429 csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
430 if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
431 csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
432 writew(csi_fsb_control,
433 ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
434 }
435 pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
436 if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
437 pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
438 writew(pcie_control,
439 ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
440 }
441
442
443 /* TODO version, compatibility and configuration checks */
444
445 /* copy out the APIC to DCA tag map */
446 tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
447 for (i = 0; i < 5; i++) {
448 bit = (tag_map >> (4 * i)) & 0x0f;
449 if (bit < 8)
450 ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
451 else
452 ioatdca->tag_map[i] = 0;
453 }
454
455 err = register_dca_provider(dca, &pdev->dev);
456 if (err) {
457 free_dca_provider(dca);
458 return NULL;
459 }
460
461 return dca;
462}
Maciej Sosnowski7f1b3582008-07-22 17:30:57 -0700463
464static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
465{
466 struct ioat_dca_priv *ioatdca = dca_priv(dca);
467 struct pci_dev *pdev;
468 int i;
469 u16 id;
470 u16 global_req_table;
471
472 /* This implementation only supports PCI-Express */
473 if (dev->bus != &pci_bus_type)
474 return -ENODEV;
475 pdev = to_pci_dev(dev);
476 id = dcaid_from_pcidev(pdev);
477
478 if (ioatdca->requester_count == ioatdca->max_requesters)
479 return -ENODEV;
480
481 for (i = 0; i < ioatdca->max_requesters; i++) {
482 if (ioatdca->req_slots[i].pdev == NULL) {
483 /* found an empty slot */
484 ioatdca->requester_count++;
485 ioatdca->req_slots[i].pdev = pdev;
486 ioatdca->req_slots[i].rid = id;
487 global_req_table =
488 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
489 writel(id | IOAT_DCA_GREQID_VALID,
490 ioatdca->iobase + global_req_table + (i * 4));
491 return i;
492 }
493 }
494 /* Error, ioatdma->requester_count is out of whack */
495 return -EFAULT;
496}
497
498static int ioat3_dca_remove_requester(struct dca_provider *dca,
499 struct device *dev)
500{
501 struct ioat_dca_priv *ioatdca = dca_priv(dca);
502 struct pci_dev *pdev;
503 int i;
504 u16 global_req_table;
505
506 /* This implementation only supports PCI-Express */
507 if (dev->bus != &pci_bus_type)
508 return -ENODEV;
509 pdev = to_pci_dev(dev);
510
511 for (i = 0; i < ioatdca->max_requesters; i++) {
512 if (ioatdca->req_slots[i].pdev == pdev) {
513 global_req_table =
514 readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
515 writel(0, ioatdca->iobase + global_req_table + (i * 4));
516 ioatdca->req_slots[i].pdev = NULL;
517 ioatdca->req_slots[i].rid = 0;
518 ioatdca->requester_count--;
519 return i;
520 }
521 }
522 return -ENODEV;
523}
524
525static u8 ioat3_dca_get_tag(struct dca_provider *dca,
526 struct device *dev,
527 int cpu)
528{
529 u8 tag;
530
531 struct ioat_dca_priv *ioatdca = dca_priv(dca);
532 int i, apic_id, bit, value;
533 u8 entry;
534
535 tag = 0;
536 apic_id = cpu_physical_id(cpu);
537
538 for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
539 entry = ioatdca->tag_map[i];
540 if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
541 bit = entry &
542 ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
543 value = (apic_id & (1 << bit)) ? 1 : 0;
544 } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
545 bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
546 value = (apic_id & (1 << bit)) ? 0 : 1;
547 } else {
548 value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
549 }
550 tag |= (value << i);
551 }
552
553 return tag;
554}
555
556static struct dca_ops ioat3_dca_ops = {
557 .add_requester = ioat3_dca_add_requester,
558 .remove_requester = ioat3_dca_remove_requester,
559 .get_tag = ioat3_dca_get_tag,
560 .dev_managed = ioat_dca_dev_managed,
561};
562
563static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
564{
565 int slots = 0;
566 u32 req;
567 u16 global_req_table;
568
569 global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
570 if (global_req_table == 0)
571 return 0;
572
573 do {
574 req = readl(iobase + global_req_table + (slots * sizeof(u32)));
575 slots++;
576 } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
577
578 return slots;
579}
580
581struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
582{
583 struct dca_provider *dca;
584 struct ioat_dca_priv *ioatdca;
585 int slots;
586 int i;
587 int err;
588 u16 dca_offset;
589 u16 csi_fsb_control;
590 u16 pcie_control;
591 u8 bit;
592
593 union {
594 u64 full;
595 struct {
596 u32 low;
597 u32 high;
598 };
599 } tag_map;
600
601 if (!system_has_dca_enabled(pdev))
602 return NULL;
603
604 dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
605 if (dca_offset == 0)
606 return NULL;
607
608 slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
609 if (slots == 0)
610 return NULL;
611
612 dca = alloc_dca_provider(&ioat3_dca_ops,
613 sizeof(*ioatdca)
614 + (sizeof(struct ioat_dca_slot) * slots));
615 if (!dca)
616 return NULL;
617
618 ioatdca = dca_priv(dca);
619 ioatdca->iobase = iobase;
620 ioatdca->dca_base = iobase + dca_offset;
621 ioatdca->max_requesters = slots;
622
623 /* some bios might not know to turn these on */
624 csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
625 if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
626 csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
627 writew(csi_fsb_control,
628 ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
629 }
630 pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
631 if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
632 pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
633 writew(pcie_control,
634 ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
635 }
636
637
638 /* TODO version, compatibility and configuration checks */
639
640 /* copy out the APIC to DCA tag map */
641 tag_map.low =
642 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
643 tag_map.high =
644 readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
645 for (i = 0; i < 8; i++) {
646 bit = tag_map.full >> (8 * i);
647 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
648 }
649
650 err = register_dca_provider(dca, &pdev->dev);
651 if (err) {
652 free_dca_provider(dca);
653 return NULL;
654 }
655
656 return dca;
657}