blob: b57100a2c83441504d2f6cf1a49a9a62c4f7342c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file contains work-arounds for x86 and x86_64 platform bugs.
3 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/pci.h>
5#include <linux/irq.h>
6
Venki Pallipadid54bd572007-10-12 23:04:23 +02007#include <asm/hpet.h>
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
10
Greg Kroah-Hartmana18e3692012-12-21 14:02:53 -080011static void quirk_intel_irqbalance(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070012{
Sergei Shtylyov38175052011-07-11 19:01:38 +040013 u8 config;
Matthew Wilcox9585ca02008-02-10 23:18:15 -050014 u16 word;
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16 /* BIOS may enable hardware IRQ balancing for
17 * E7520/E7320/E7525(revision ID 0x9 and below)
18 * based platforms.
19 * Disable SW irqbalance/affinity on those platforms.
20 */
Sergei Shtylyov38175052011-07-11 19:01:38 +040021 if (dev->revision > 0x9)
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 return;
23
Andrew Mortona86f34b2007-05-02 19:27:04 +020024 /* enable access to config space*/
25 pci_read_config_byte(dev, 0xf4, &config);
26 pci_write_config_byte(dev, 0xf4, config|0x2);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Matthew Wilcox9585ca02008-02-10 23:18:15 -050028 /*
29 * read xTPR register. We may not have a pci_dev for device 8
30 * because it might be hidden until the above write.
31 */
32 pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34 if (!(word & (1 << 13))) {
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -070035 dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
36 "disabling irq balancing and affinity\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 noirqdebug_setup("");
38#ifdef CONFIG_PROC_FS
39 no_irq_affinity = 1;
40#endif
41 }
42
Andrew Mortona86f34b2007-05-02 19:27:04 +020043 /* put back the original value for config space*/
Alan Coxda9bb1d2006-01-18 17:44:13 -080044 if (!(config & 0x2))
Andrew Mortona86f34b2007-05-02 19:27:04 +020045 pci_write_config_byte(dev, 0xf4, config);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046}
Thomas Gleixner76492232007-10-19 20:35:02 +020047DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
48 quirk_intel_irqbalance);
49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
50 quirk_intel_irqbalance);
51DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
52 quirk_intel_irqbalance);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#endif
Venki Pallipadid54bd572007-10-12 23:04:23 +020054
55#if defined(CONFIG_HPET_TIMER)
56unsigned long force_hpet_address;
57
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +020058static enum {
59 NONE_FORCE_HPET_RESUME,
60 OLD_ICH_FORCE_HPET_RESUME,
Udo A. Steinbergb1968842007-10-19 20:35:02 +020061 ICH_FORCE_HPET_RESUME,
Carlos Corbachod79a5f82007-10-19 18:51:27 +010062 VT8237_FORCE_HPET_RESUME,
63 NVIDIA_FORCE_HPET_RESUME,
Andreas Herrmanne8aa4662008-05-09 11:49:11 +020064 ATI_FORCE_HPET_RESUME,
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +020065} force_hpet_resume_type;
66
Venki Pallipadid54bd572007-10-12 23:04:23 +020067static void __iomem *rcba_base;
68
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +020069static void ich_force_hpet_resume(void)
Venki Pallipadid54bd572007-10-12 23:04:23 +020070{
71 u32 val;
72
73 if (!force_hpet_address)
74 return;
75
Stoyan Gaydarov8c5dfd22009-03-10 00:10:32 -050076 BUG_ON(rcba_base == NULL);
Venki Pallipadid54bd572007-10-12 23:04:23 +020077
78 /* read the Function Disable register, dword mode only */
79 val = readl(rcba_base + 0x3404);
80 if (!(val & 0x80)) {
81 /* HPET disabled in HPTC. Trying to enable */
82 writel(val | 0x80, rcba_base + 0x3404);
83 }
84
85 val = readl(rcba_base + 0x3404);
86 if (!(val & 0x80))
87 BUG();
88 else
89 printk(KERN_DEBUG "Force enabled HPET at resume\n");
90
91 return;
92}
93
94static void ich_force_enable_hpet(struct pci_dev *dev)
95{
96 u32 val;
97 u32 uninitialized_var(rcba);
98 int err = 0;
99
100 if (hpet_address || force_hpet_address)
101 return;
102
103 pci_read_config_dword(dev, 0xF0, &rcba);
104 rcba &= 0xFFFFC000;
105 if (rcba == 0) {
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700106 dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
107 "cannot force enable HPET\n");
Venki Pallipadid54bd572007-10-12 23:04:23 +0200108 return;
109 }
110
111 /* use bits 31:14, 16 kB aligned */
112 rcba_base = ioremap_nocache(rcba, 0x4000);
113 if (rcba_base == NULL) {
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700114 dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
115 "cannot force enable HPET\n");
Venki Pallipadid54bd572007-10-12 23:04:23 +0200116 return;
117 }
118
119 /* read the Function Disable register, dword mode only */
120 val = readl(rcba_base + 0x3404);
121
122 if (val & 0x80) {
123 /* HPET is enabled in HPTC. Just not reported by BIOS */
124 val = val & 0x3;
125 force_hpet_address = 0xFED00000 | (val << 12);
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700126 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
127 "0x%lx\n", force_hpet_address);
Venki Pallipadid54bd572007-10-12 23:04:23 +0200128 iounmap(rcba_base);
129 return;
130 }
131
132 /* HPET disabled in HPTC. Trying to enable */
133 writel(val | 0x80, rcba_base + 0x3404);
134
135 val = readl(rcba_base + 0x3404);
136 if (!(val & 0x80)) {
137 err = 1;
138 } else {
139 val = val & 0x3;
140 force_hpet_address = 0xFED00000 | (val << 12);
141 }
142
143 if (err) {
144 force_hpet_address = 0;
145 iounmap(rcba_base);
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700146 dev_printk(KERN_DEBUG, &dev->dev,
147 "Failed to force enable HPET\n");
Venki Pallipadid54bd572007-10-12 23:04:23 +0200148 } else {
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200149 force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700150 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
151 "0x%lx\n", force_hpet_address);
Venki Pallipadid54bd572007-10-12 23:04:23 +0200152 }
153}
154
155DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
Thomas Gleixner76492232007-10-19 20:35:02 +0200156 ich_force_enable_hpet);
Krzysztof Oledzki74e411c2008-06-04 03:40:17 +0200157DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
158 ich_force_enable_hpet);
Venki Pallipadid54bd572007-10-12 23:04:23 +0200159DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
Thomas Gleixner76492232007-10-19 20:35:02 +0200160 ich_force_enable_hpet);
Venki Pallipadied6fb172007-10-12 23:04:24 +0200161DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
Thomas Gleixner76492232007-10-19 20:35:02 +0200162 ich_force_enable_hpet);
Venki Pallipadid54bd572007-10-12 23:04:23 +0200163DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
Thomas Gleixner76492232007-10-19 20:35:02 +0200164 ich_force_enable_hpet);
Venki Pallipadid54bd572007-10-12 23:04:23 +0200165DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
Thomas Gleixner76492232007-10-19 20:35:02 +0200166 ich_force_enable_hpet);
Venki Pallipadid54bd572007-10-12 23:04:23 +0200167DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
Thomas Gleixner76492232007-10-19 20:35:02 +0200168 ich_force_enable_hpet);
Janne Kulmalabacbe992008-12-16 13:39:57 +0200169DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
170 ich_force_enable_hpet);
Alistair John Strachandff244a2008-01-30 13:33:39 +0100171DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
172 ich_force_enable_hpet);
Andi Kleen42bb8cc2009-01-09 12:17:40 -0800173DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */
174 ich_force_enable_hpet);
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200175
176static struct pci_dev *cached_dev;
177
Thomas Gleixner7c4728f2008-05-10 21:42:14 +0200178static void hpet_print_force_info(void)
179{
180 printk(KERN_INFO "HPET not enabled in BIOS. "
181 "You might try hpet=force boot option\n");
182}
183
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200184static void old_ich_force_hpet_resume(void)
185{
186 u32 val;
187 u32 uninitialized_var(gen_cntl);
188
189 if (!force_hpet_address || !cached_dev)
190 return;
191
192 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
193 gen_cntl &= (~(0x7 << 15));
194 gen_cntl |= (0x4 << 15);
195
196 pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
197 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
198 val = gen_cntl >> 15;
199 val &= 0x7;
200 if (val == 0x4)
201 printk(KERN_DEBUG "Force enabled HPET at resume\n");
202 else
203 BUG();
204}
205
206static void old_ich_force_enable_hpet(struct pci_dev *dev)
207{
208 u32 val;
209 u32 uninitialized_var(gen_cntl);
210
211 if (hpet_address || force_hpet_address)
212 return;
213
214 pci_read_config_dword(dev, 0xD0, &gen_cntl);
215 /*
216 * Bit 17 is HPET enable bit.
217 * Bit 16:15 control the HPET base address.
218 */
219 val = gen_cntl >> 15;
220 val &= 0x7;
221 if (val & 0x4) {
222 val &= 0x3;
223 force_hpet_address = 0xFED00000 | (val << 12);
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700224 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
225 force_hpet_address);
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200226 return;
227 }
228
229 /*
230 * HPET is disabled. Trying enabling at FED00000 and check
231 * whether it sticks
232 */
233 gen_cntl &= (~(0x7 << 15));
234 gen_cntl |= (0x4 << 15);
235 pci_write_config_dword(dev, 0xD0, gen_cntl);
236
237 pci_read_config_dword(dev, 0xD0, &gen_cntl);
238
239 val = gen_cntl >> 15;
240 val &= 0x7;
241 if (val & 0x4) {
242 /* HPET is enabled in HPTC. Just not reported by BIOS */
243 val &= 0x3;
244 force_hpet_address = 0xFED00000 | (val << 12);
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700245 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
246 "0x%lx\n", force_hpet_address);
Venki Pallipadi32a2da62007-10-12 23:04:24 +0200247 cached_dev = dev;
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200248 force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
249 return;
250 }
251
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700252 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200253}
254
Udo A. Steinberg158ad322007-10-19 20:35:02 +0200255/*
256 * Undocumented chipset features. Make sure that the user enforced
257 * this.
258 */
259static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
260{
261 if (hpet_force_user)
262 old_ich_force_enable_hpet(dev);
263}
264
Joe Buehler4c2a9972008-06-09 08:55:20 -0400265DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
266 old_ich_force_enable_hpet_user);
Udo A. Steinberg158ad322007-10-19 20:35:02 +0200267DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
268 old_ich_force_enable_hpet_user);
269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
270 old_ich_force_enable_hpet_user);
271DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
272 old_ich_force_enable_hpet_user);
273DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
274 old_ich_force_enable_hpet_user);
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200275DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
Thomas Gleixner76492232007-10-19 20:35:02 +0200276 old_ich_force_enable_hpet);
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200277DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
Thomas Gleixner76492232007-10-19 20:35:02 +0200278 old_ich_force_enable_hpet);
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200279
Udo A. Steinbergb1968842007-10-19 20:35:02 +0200280
281static void vt8237_force_hpet_resume(void)
282{
283 u32 val;
284
285 if (!force_hpet_address || !cached_dev)
286 return;
287
288 val = 0xfed00000 | 0x80;
289 pci_write_config_dword(cached_dev, 0x68, val);
290
291 pci_read_config_dword(cached_dev, 0x68, &val);
292 if (val & 0x80)
293 printk(KERN_DEBUG "Force enabled HPET at resume\n");
294 else
295 BUG();
296}
297
298static void vt8237_force_enable_hpet(struct pci_dev *dev)
299{
300 u32 uninitialized_var(val);
301
Thomas Gleixner7c4728f2008-05-10 21:42:14 +0200302 if (hpet_address || force_hpet_address)
Udo A. Steinbergb1968842007-10-19 20:35:02 +0200303 return;
304
Thomas Gleixner7c4728f2008-05-10 21:42:14 +0200305 if (!hpet_force_user) {
306 hpet_print_force_info();
307 return;
308 }
309
Udo A. Steinbergb1968842007-10-19 20:35:02 +0200310 pci_read_config_dword(dev, 0x68, &val);
311 /*
312 * Bit 7 is HPET enable bit.
313 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
314 */
315 if (val & 0x80) {
316 force_hpet_address = (val & ~0x3ff);
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700317 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
318 force_hpet_address);
Udo A. Steinbergb1968842007-10-19 20:35:02 +0200319 return;
320 }
321
322 /*
323 * HPET is disabled. Trying enabling at FED00000 and check
324 * whether it sticks
325 */
326 val = 0xfed00000 | 0x80;
327 pci_write_config_dword(dev, 0x68, val);
328
329 pci_read_config_dword(dev, 0x68, &val);
330 if (val & 0x80) {
331 force_hpet_address = (val & ~0x3ff);
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700332 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
333 "0x%lx\n", force_hpet_address);
Udo A. Steinbergb1968842007-10-19 20:35:02 +0200334 cached_dev = dev;
335 force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
336 return;
337 }
338
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700339 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
Udo A. Steinbergb1968842007-10-19 20:35:02 +0200340}
341
342DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
343 vt8237_force_enable_hpet);
344DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
345 vt8237_force_enable_hpet);
Udo van den Heuvel892df7f2010-09-14 07:15:08 +0200346DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
347 vt8237_force_enable_hpet);
Udo A. Steinbergb1968842007-10-19 20:35:02 +0200348
Andreas Herrmanne8aa4662008-05-09 11:49:11 +0200349static void ati_force_hpet_resume(void)
350{
351 pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
352 printk(KERN_DEBUG "Force enabled HPET at resume\n");
353}
354
Andreas Herrmanne7250b82008-09-05 18:33:26 +0200355static u32 ati_ixp4x0_rev(struct pci_dev *dev)
356{
Borislav Petkov73f46042013-03-04 21:16:20 +0100357 int err = 0;
358 u32 d = 0;
359 u8 b = 0;
Andreas Herrmanne7250b82008-09-05 18:33:26 +0200360
Borislav Petkov73f46042013-03-04 21:16:20 +0100361 err = pci_read_config_byte(dev, 0xac, &b);
Andreas Herrmanne7250b82008-09-05 18:33:26 +0200362 b &= ~(1<<5);
Borislav Petkov73f46042013-03-04 21:16:20 +0100363 err |= pci_write_config_byte(dev, 0xac, b);
364 err |= pci_read_config_dword(dev, 0x70, &d);
Andreas Herrmanne7250b82008-09-05 18:33:26 +0200365 d |= 1<<8;
Borislav Petkov73f46042013-03-04 21:16:20 +0100366 err |= pci_write_config_dword(dev, 0x70, d);
367 err |= pci_read_config_dword(dev, 0x8, &d);
Andreas Herrmanne7250b82008-09-05 18:33:26 +0200368 d &= 0xff;
369 dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
Borislav Petkov73f46042013-03-04 21:16:20 +0100370
371 WARN_ON_ONCE(err);
372
Andreas Herrmanne7250b82008-09-05 18:33:26 +0200373 return d;
374}
375
Andreas Herrmanne8aa4662008-05-09 11:49:11 +0200376static void ati_force_enable_hpet(struct pci_dev *dev)
377{
Andreas Herrmanne7250b82008-09-05 18:33:26 +0200378 u32 d, val;
379 u8 b;
Andreas Herrmanne8aa4662008-05-09 11:49:11 +0200380
Thomas Gleixner7c4728f2008-05-10 21:42:14 +0200381 if (hpet_address || force_hpet_address)
Andreas Herrmanne8aa4662008-05-09 11:49:11 +0200382 return;
383
Thomas Gleixner7c4728f2008-05-10 21:42:14 +0200384 if (!hpet_force_user) {
385 hpet_print_force_info();
386 return;
387 }
388
Andreas Herrmanne7250b82008-09-05 18:33:26 +0200389 d = ati_ixp4x0_rev(dev);
390 if (d < 0x82)
391 return;
392
393 /* base address */
Andreas Herrmanne8aa4662008-05-09 11:49:11 +0200394 pci_write_config_dword(dev, 0x14, 0xfed00000);
395 pci_read_config_dword(dev, 0x14, &val);
Andreas Herrmanne7250b82008-09-05 18:33:26 +0200396
397 /* enable interrupt */
398 outb(0x72, 0xcd6); b = inb(0xcd7);
399 b |= 0x1;
400 outb(0x72, 0xcd6); outb(b, 0xcd7);
401 outb(0x72, 0xcd6); b = inb(0xcd7);
402 if (!(b & 0x1))
403 return;
404 pci_read_config_dword(dev, 0x64, &d);
405 d |= (1<<10);
406 pci_write_config_dword(dev, 0x64, d);
407 pci_read_config_dword(dev, 0x64, &d);
408 if (!(d & (1<<10)))
409 return;
410
Andreas Herrmanne8aa4662008-05-09 11:49:11 +0200411 force_hpet_address = val;
412 force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
413 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
414 force_hpet_address);
415 cached_dev = dev;
Andreas Herrmanne8aa4662008-05-09 11:49:11 +0200416}
417DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
418 ati_force_enable_hpet);
419
Carlos Corbachod79a5f82007-10-19 18:51:27 +0100420/*
421 * Undocumented chipset feature taken from LinuxBIOS.
422 */
423static void nvidia_force_hpet_resume(void)
424{
425 pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
426 printk(KERN_DEBUG "Force enabled HPET at resume\n");
427}
428
429static void nvidia_force_enable_hpet(struct pci_dev *dev)
430{
431 u32 uninitialized_var(val);
432
Thomas Gleixner7c4728f2008-05-10 21:42:14 +0200433 if (hpet_address || force_hpet_address)
Carlos Corbachod79a5f82007-10-19 18:51:27 +0100434 return;
435
Thomas Gleixner7c4728f2008-05-10 21:42:14 +0200436 if (!hpet_force_user) {
437 hpet_print_force_info();
438 return;
439 }
440
Carlos Corbachod79a5f82007-10-19 18:51:27 +0100441 pci_write_config_dword(dev, 0x44, 0xfed00001);
442 pci_read_config_dword(dev, 0x44, &val);
443 force_hpet_address = val & 0xfffffffe;
444 force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
bjorn.helgaas@hp.com9ed88552007-12-17 14:09:40 -0700445 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
Carlos Corbachod79a5f82007-10-19 18:51:27 +0100446 force_hpet_address);
447 cached_dev = dev;
448 return;
449}
450
451/* ISA Bridges */
452DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
453 nvidia_force_enable_hpet);
454DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
455 nvidia_force_enable_hpet);
Udo A. Steinbergb1968842007-10-19 20:35:02 +0200456
Carlos Corbacho1b82ba62007-10-19 19:34:15 +0100457/* LPC bridges */
Zbigniew Luszpinski96bcf452008-03-19 15:51:50 +0100458DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
459 nvidia_force_enable_hpet);
Carlos Corbacho1b82ba62007-10-19 19:34:15 +0100460DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
461 nvidia_force_enable_hpet);
462DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
463 nvidia_force_enable_hpet);
464DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
465 nvidia_force_enable_hpet);
466DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
467 nvidia_force_enable_hpet);
468DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
469 nvidia_force_enable_hpet);
470DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
471 nvidia_force_enable_hpet);
472DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
473 nvidia_force_enable_hpet);
474DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
475 nvidia_force_enable_hpet);
476
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200477void force_hpet_resume(void)
478{
479 switch (force_hpet_resume_type) {
Harvey Harrison4a5a77d2008-02-06 22:39:44 +0100480 case ICH_FORCE_HPET_RESUME:
481 ich_force_hpet_resume();
482 return;
483 case OLD_ICH_FORCE_HPET_RESUME:
484 old_ich_force_hpet_resume();
485 return;
486 case VT8237_FORCE_HPET_RESUME:
487 vt8237_force_hpet_resume();
488 return;
489 case NVIDIA_FORCE_HPET_RESUME:
490 nvidia_force_hpet_resume();
491 return;
Andreas Herrmanne8aa4662008-05-09 11:49:11 +0200492 case ATI_FORCE_HPET_RESUME:
493 ati_force_hpet_resume();
494 return;
Harvey Harrison4a5a77d2008-02-06 22:39:44 +0100495 default:
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200496 break;
497 }
498}
Pallipadi, Venkatesh73472a42010-01-21 11:09:52 -0800499
500/*
Peter Neubauer2e151c72014-09-12 13:06:13 +0200501 * According to the datasheet e6xx systems have the HPET hardwired to
502 * 0xfed00000
503 */
504static void e6xx_force_enable_hpet(struct pci_dev *dev)
505{
506 if (hpet_address || force_hpet_address)
507 return;
508
509 force_hpet_address = 0xFED00000;
510 force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
511 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
512 "0x%lx\n", force_hpet_address);
513 return;
514}
515DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
516 e6xx_force_enable_hpet);
517
518/*
Pallipadi, Venkatesh73472a42010-01-21 11:09:52 -0800519 * HPET MSI on some boards (ATI SB700/SB800) has side effect on
520 * floppy DMA. Disable HPET MSI on such platforms.
Andreas Herrmannfec84e32010-05-17 18:43:24 +0200521 * See erratum #27 (Misinterpreted MSI Requests May Result in
522 * Corrupted LPC DMA Data) in AMD Publication #46837,
523 * "SB700 Family Product Errata", Rev. 1.0, March 2010.
Pallipadi, Venkatesh73472a42010-01-21 11:09:52 -0800524 */
525static void force_disable_hpet_msi(struct pci_dev *unused)
526{
Jan Beulich3d45ac42015-10-19 04:35:44 -0600527 hpet_msi_disable = true;
Pallipadi, Venkatesh73472a42010-01-21 11:09:52 -0800528}
529
530DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
531 force_disable_hpet_msi);
532
Andreas Herrmann9b94b3a2009-04-17 12:07:46 +0200533#endif
Venki Pallipadibfe0c1c2007-10-12 23:04:24 +0200534
Andreas Herrmann9b94b3a2009-04-17 12:07:46 +0200535#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
536/* Set correct numa_node information for AMD NB functions */
Greg Kroah-Hartmana18e3692012-12-21 14:02:53 -0800537static void quirk_amd_nb_node(struct pci_dev *dev)
Andreas Herrmann9b94b3a2009-04-17 12:07:46 +0200538{
539 struct pci_dev *nb_ht;
540 unsigned int devfn;
Prarit Bhargava303fc082009-11-12 13:09:31 -0500541 u32 node;
Andreas Herrmann9b94b3a2009-04-17 12:07:46 +0200542 u32 val;
543
544 devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
545 nb_ht = pci_get_slot(dev->bus, devfn);
546 if (!nb_ht)
547 return;
548
549 pci_read_config_dword(nb_ht, 0x60, &val);
Daniel J Blueman847d7972014-03-13 19:43:01 +0800550 node = pcibus_to_node(dev->bus) | (val & 7);
Prarit Bhargava303fc082009-11-12 13:09:31 -0500551 /*
552 * Some hardware may return an invalid node ID,
553 * so check it first:
554 */
555 if (node_online(node))
556 set_dev_node(&dev->dev, node);
Jiri Slaby748df9a2009-09-08 12:16:18 +0200557 pci_dev_put(nb_ht);
Andreas Herrmann9b94b3a2009-04-17 12:07:46 +0200558}
559
560DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
561 quirk_amd_nb_node);
562DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
563 quirk_amd_nb_node);
564DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
565 quirk_amd_nb_node);
566DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
567 quirk_amd_nb_node);
568DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
569 quirk_amd_nb_node);
570DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
571 quirk_amd_nb_node);
572DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
573 quirk_amd_nb_node);
574DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
575 quirk_amd_nb_node);
576DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
577 quirk_amd_nb_node);
Andreas Herrmannf62ef5f2011-12-02 08:21:43 +0100578DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
579 quirk_amd_nb_node);
580DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
581 quirk_amd_nb_node);
582DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
583 quirk_amd_nb_node);
584DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
585 quirk_amd_nb_node);
586DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
587 quirk_amd_nb_node);
588DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
589 quirk_amd_nb_node);
590
Venki Pallipadid54bd572007-10-12 23:04:23 +0200591#endif
Aravind Gopalakrishnanfb53a1a2014-01-23 16:13:32 -0600592
593#ifdef CONFIG_PCI
594/*
595 * Processor does not ensure DRAM scrub read/write sequence
596 * is atomic wrt accesses to CC6 save state area. Therefore
597 * if a concurrent scrub read/write access is to same address
598 * the entry may appear as if it is not written. This quirk
599 * applies to Fam16h models 00h-0Fh
600 *
601 * See "Revision Guide" for AMD F16h models 00h-0fh,
602 * document 51810 rev. 3.04, Nov 2013
603 */
604static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
605{
606 u32 val;
607
608 /*
609 * Suggested workaround:
610 * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
611 */
612 pci_read_config_dword(dev, 0x58, &val);
613 if (val & 0x1F) {
614 val &= ~(0x1F);
615 pci_write_config_dword(dev, 0x58, val);
616 }
617
618 pci_read_config_dword(dev, 0x5C, &val);
619 if (val & BIT(0)) {
620 val &= ~BIT(0);
621 pci_write_config_dword(dev, 0x5c, val);
622 }
623}
624
625DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
626 amd_disable_seq_and_redirect_scrub);
627
Tony Luck3637efb2016-09-01 11:39:33 -0700628#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
629#include <linux/jump_label.h>
630#include <asm/string_64.h>
631
632/* Ivy Bridge, Haswell, Broadwell */
633static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
634{
635 u32 capid0;
636
637 pci_read_config_dword(pdev, 0x84, &capid0);
638
639 if (capid0 & 0x10)
640 static_branch_inc(&mcsafe_key);
641}
642
643/* Skylake */
644static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
645{
Tony Lucke7905a72018-05-25 14:42:09 -0700646 u32 capid0, capid5;
Tony Luck3637efb2016-09-01 11:39:33 -0700647
648 pci_read_config_dword(pdev, 0x84, &capid0);
Tony Lucke7905a72018-05-25 14:42:09 -0700649 pci_read_config_dword(pdev, 0x98, &capid5);
Tony Luck3637efb2016-09-01 11:39:33 -0700650
Tony Lucke7905a72018-05-25 14:42:09 -0700651 /*
652 * CAPID0{7:6} indicate whether this is an advanced RAS SKU
653 * CAPID5{8:5} indicate that various NVDIMM usage modes are
654 * enabled, so memory machine check recovery is also enabled.
655 */
656 if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
Tony Luck3637efb2016-09-01 11:39:33 -0700657 static_branch_inc(&mcsafe_key);
Tony Lucke7905a72018-05-25 14:42:09 -0700658
Tony Luck3637efb2016-09-01 11:39:33 -0700659}
660DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
661DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
662DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
663DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
664#endif
Arnd Bergmannd320b9a2016-10-24 17:33:18 +0200665#endif