blob: c963593eedbe7e76bbae81eec180ddf69d7a1d03 [file] [log] [blame]
Andreas Noever16603152014-06-03 22:03:58 +02001/*
2 * Thunderbolt Cactus Ridge driver - NHI driver
3 *
4 * The NHI (native host interface) is the pci device that allows us to send and
5 * receive frames from the thunderbolt bus.
6 *
7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
8 */
9
Andreas Noever23dd5bb2014-06-03 22:04:12 +020010#include <linux/pm_runtime.h>
Andreas Noever16603152014-06-03 22:03:58 +020011#include <linux/slab.h>
12#include <linux/errno.h>
13#include <linux/pci.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/dmi.h>
17
18#include "nhi.h"
19#include "nhi_regs.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020020#include "tb.h"
Andreas Noever16603152014-06-03 22:03:58 +020021
22#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
23
24
25static int ring_interrupt_index(struct tb_ring *ring)
26{
27 int bit = ring->hop;
28 if (!ring->is_tx)
29 bit += ring->nhi->hop_count;
30 return bit;
31}
32
33/**
34 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
35 *
36 * ring->nhi->lock must be held.
37 */
38static void ring_interrupt_active(struct tb_ring *ring, bool active)
39{
Lukas Wunner19bf4d42016-03-20 13:57:20 +010040 int reg = REG_RING_INTERRUPT_BASE +
41 ring_interrupt_index(ring) / 32 * 4;
Andreas Noever16603152014-06-03 22:03:58 +020042 int bit = ring_interrupt_index(ring) & 31;
43 int mask = 1 << bit;
44 u32 old, new;
45 old = ioread32(ring->nhi->iobase + reg);
46 if (active)
47 new = old | mask;
48 else
49 new = old & ~mask;
50
51 dev_info(&ring->nhi->pdev->dev,
52 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
53 active ? "enabling" : "disabling", reg, bit, old, new);
54
55 if (new == old)
56 dev_WARN(&ring->nhi->pdev->dev,
57 "interrupt for %s %d is already %s\n",
58 RING_TYPE(ring), ring->hop,
59 active ? "enabled" : "disabled");
60 iowrite32(new, ring->nhi->iobase + reg);
61}
62
63/**
64 * nhi_disable_interrupts() - disable interrupts for all rings
65 *
66 * Use only during init and shutdown.
67 */
68static void nhi_disable_interrupts(struct tb_nhi *nhi)
69{
70 int i = 0;
71 /* disable interrupts */
72 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
73 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
74
75 /* clear interrupt status bits */
76 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
77 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
78}
79
80/* ring helper methods */
81
82static void __iomem *ring_desc_base(struct tb_ring *ring)
83{
84 void __iomem *io = ring->nhi->iobase;
85 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
86 io += ring->hop * 16;
87 return io;
88}
89
90static void __iomem *ring_options_base(struct tb_ring *ring)
91{
92 void __iomem *io = ring->nhi->iobase;
93 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
94 io += ring->hop * 32;
95 return io;
96}
97
Mika Westerberg18991df2018-07-04 08:46:07 +030098static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
Andreas Noever16603152014-06-03 22:03:58 +020099{
Mika Westerberg18991df2018-07-04 08:46:07 +0300100 /*
101 * The other 16-bits in the register is read-only and writes to it
102 * are ignored by the hardware so we can save one ioread32() by
103 * filling the read-only bits with zeroes.
104 */
105 iowrite32(cons, ring_desc_base(ring) + 8);
106}
107
108static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
109{
110 /* See ring_iowrite_cons() above for explanation */
111 iowrite32(prod << 16, ring_desc_base(ring) + 8);
Andreas Noever16603152014-06-03 22:03:58 +0200112}
113
114static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
115{
116 iowrite32(value, ring_desc_base(ring) + offset);
117}
118
119static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
120{
121 iowrite32(value, ring_desc_base(ring) + offset);
122 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
123}
124
125static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
126{
127 iowrite32(value, ring_options_base(ring) + offset);
128}
129
130static bool ring_full(struct tb_ring *ring)
131{
132 return ((ring->head + 1) % ring->size) == ring->tail;
133}
134
135static bool ring_empty(struct tb_ring *ring)
136{
137 return ring->head == ring->tail;
138}
139
140/**
141 * ring_write_descriptors() - post frames from ring->queue to the controller
142 *
143 * ring->lock is held.
144 */
145static void ring_write_descriptors(struct tb_ring *ring)
146{
147 struct ring_frame *frame, *n;
148 struct ring_desc *descriptor;
149 list_for_each_entry_safe(frame, n, &ring->queue, list) {
150 if (ring_full(ring))
151 break;
152 list_move_tail(&frame->list, &ring->in_flight);
153 descriptor = &ring->descriptors[ring->head];
154 descriptor->phys = frame->buffer_phy;
155 descriptor->time = 0;
156 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
157 if (ring->is_tx) {
158 descriptor->length = frame->size;
159 descriptor->eof = frame->eof;
160 descriptor->sof = frame->sof;
161 }
162 ring->head = (ring->head + 1) % ring->size;
Mika Westerberg18991df2018-07-04 08:46:07 +0300163 if (ring->is_tx)
164 ring_iowrite_prod(ring, ring->head);
165 else
166 ring_iowrite_cons(ring, ring->head);
Andreas Noever16603152014-06-03 22:03:58 +0200167 }
168}
169
170/**
171 * ring_work() - progress completed frames
172 *
173 * If the ring is shutting down then all frames are marked as canceled and
174 * their callbacks are invoked.
175 *
176 * Otherwise we collect all completed frame from the ring buffer, write new
177 * frame to the ring buffer and invoke the callbacks for the completed frames.
178 */
179static void ring_work(struct work_struct *work)
180{
181 struct tb_ring *ring = container_of(work, typeof(*ring), work);
182 struct ring_frame *frame;
183 bool canceled = false;
184 LIST_HEAD(done);
185 mutex_lock(&ring->lock);
186
187 if (!ring->running) {
188 /* Move all frames to done and mark them as canceled. */
189 list_splice_tail_init(&ring->in_flight, &done);
190 list_splice_tail_init(&ring->queue, &done);
191 canceled = true;
192 goto invoke_callback;
193 }
194
195 while (!ring_empty(ring)) {
196 if (!(ring->descriptors[ring->tail].flags
197 & RING_DESC_COMPLETED))
198 break;
199 frame = list_first_entry(&ring->in_flight, typeof(*frame),
200 list);
201 list_move_tail(&frame->list, &done);
202 if (!ring->is_tx) {
203 frame->size = ring->descriptors[ring->tail].length;
204 frame->eof = ring->descriptors[ring->tail].eof;
205 frame->sof = ring->descriptors[ring->tail].sof;
206 frame->flags = ring->descriptors[ring->tail].flags;
207 if (frame->sof != 0)
208 dev_WARN(&ring->nhi->pdev->dev,
209 "%s %d got unexpected SOF: %#x\n",
210 RING_TYPE(ring), ring->hop,
211 frame->sof);
212 /*
213 * known flags:
214 * raw not enabled, interupt not set: 0x2=0010
215 * raw enabled: 0xa=1010
216 * raw not enabled: 0xb=1011
217 * partial frame (>MAX_FRAME_SIZE): 0xe=1110
218 */
219 if (frame->flags != 0xa)
220 dev_WARN(&ring->nhi->pdev->dev,
221 "%s %d got unexpected flags: %#x\n",
222 RING_TYPE(ring), ring->hop,
223 frame->flags);
224 }
225 ring->tail = (ring->tail + 1) % ring->size;
226 }
227 ring_write_descriptors(ring);
228
229invoke_callback:
230 mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
231 while (!list_empty(&done)) {
232 frame = list_first_entry(&done, typeof(*frame), list);
233 /*
234 * The callback may reenqueue or delete frame.
235 * Do not hold on to it.
236 */
237 list_del_init(&frame->list);
238 frame->callback(ring, frame, canceled);
239 }
240}
241
242int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
243{
244 int ret = 0;
245 mutex_lock(&ring->lock);
246 if (ring->running) {
247 list_add_tail(&frame->list, &ring->queue);
248 ring_write_descriptors(ring);
249 } else {
250 ret = -ESHUTDOWN;
251 }
252 mutex_unlock(&ring->lock);
253 return ret;
254}
255
256static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
257 bool transmit)
258{
259 struct tb_ring *ring = NULL;
260 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
261 transmit ? "TX" : "RX", hop, size);
262
263 mutex_lock(&nhi->lock);
264 if (hop >= nhi->hop_count) {
265 dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
266 goto err;
267 }
268 if (transmit && nhi->tx_rings[hop]) {
269 dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
270 goto err;
271 } else if (!transmit && nhi->rx_rings[hop]) {
272 dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
273 goto err;
274 }
275 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
276 if (!ring)
277 goto err;
278
279 mutex_init(&ring->lock);
280 INIT_LIST_HEAD(&ring->queue);
281 INIT_LIST_HEAD(&ring->in_flight);
282 INIT_WORK(&ring->work, ring_work);
283
284 ring->nhi = nhi;
285 ring->hop = hop;
286 ring->is_tx = transmit;
287 ring->size = size;
288 ring->head = 0;
289 ring->tail = 0;
290 ring->running = false;
291 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
292 size * sizeof(*ring->descriptors),
293 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
294 if (!ring->descriptors)
295 goto err;
296
297 if (transmit)
298 nhi->tx_rings[hop] = ring;
299 else
300 nhi->rx_rings[hop] = ring;
301 mutex_unlock(&nhi->lock);
302 return ring;
303
304err:
305 if (ring)
306 mutex_destroy(&ring->lock);
307 kfree(ring);
308 mutex_unlock(&nhi->lock);
309 return NULL;
310}
311
312struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
313{
314 return ring_alloc(nhi, hop, size, true);
315}
316
317struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
318{
319 return ring_alloc(nhi, hop, size, false);
320}
321
322/**
323 * ring_start() - enable a ring
324 *
325 * Must not be invoked in parallel with ring_stop().
326 */
327void ring_start(struct tb_ring *ring)
328{
329 mutex_lock(&ring->nhi->lock);
330 mutex_lock(&ring->lock);
331 if (ring->running) {
332 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
333 goto err;
334 }
335 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
336 RING_TYPE(ring), ring->hop);
337
338 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
339 if (ring->is_tx) {
340 ring_iowrite32desc(ring, ring->size, 12);
341 ring_iowrite32options(ring, 0, 4); /* time releated ? */
342 ring_iowrite32options(ring,
343 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
344 } else {
345 ring_iowrite32desc(ring,
346 (TB_FRAME_SIZE << 16) | ring->size, 12);
347 ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
348 ring_iowrite32options(ring,
349 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
350 }
351 ring_interrupt_active(ring, true);
352 ring->running = true;
353err:
354 mutex_unlock(&ring->lock);
355 mutex_unlock(&ring->nhi->lock);
356}
357
358
359/**
360 * ring_stop() - shutdown a ring
361 *
362 * Must not be invoked from a callback.
363 *
364 * This method will disable the ring. Further calls to ring_tx/ring_rx will
365 * return -ESHUTDOWN until ring_stop has been called.
366 *
367 * All enqueued frames will be canceled and their callbacks will be executed
368 * with frame->canceled set to true (on the callback thread). This method
369 * returns only after all callback invocations have finished.
370 */
371void ring_stop(struct tb_ring *ring)
372{
373 mutex_lock(&ring->nhi->lock);
374 mutex_lock(&ring->lock);
375 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
376 RING_TYPE(ring), ring->hop);
377 if (!ring->running) {
378 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
379 RING_TYPE(ring), ring->hop);
380 goto err;
381 }
382 ring_interrupt_active(ring, false);
383
384 ring_iowrite32options(ring, 0, 0);
385 ring_iowrite64desc(ring, 0, 0);
Mika Westerberg18991df2018-07-04 08:46:07 +0300386 ring_iowrite32desc(ring, 0, 8);
Andreas Noever16603152014-06-03 22:03:58 +0200387 ring_iowrite32desc(ring, 0, 12);
388 ring->head = 0;
389 ring->tail = 0;
390 ring->running = false;
391
392err:
393 mutex_unlock(&ring->lock);
394 mutex_unlock(&ring->nhi->lock);
395
396 /*
397 * schedule ring->work to invoke callbacks on all remaining frames.
398 */
399 schedule_work(&ring->work);
400 flush_work(&ring->work);
401}
402
403/*
404 * ring_free() - free ring
405 *
406 * When this method returns all invocations of ring->callback will have
407 * finished.
408 *
409 * Ring must be stopped.
410 *
411 * Must NOT be called from ring_frame->callback!
412 */
413void ring_free(struct tb_ring *ring)
414{
415 mutex_lock(&ring->nhi->lock);
416 /*
417 * Dissociate the ring from the NHI. This also ensures that
418 * nhi_interrupt_work cannot reschedule ring->work.
419 */
420 if (ring->is_tx)
421 ring->nhi->tx_rings[ring->hop] = NULL;
422 else
423 ring->nhi->rx_rings[ring->hop] = NULL;
424
425 if (ring->running) {
426 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
427 RING_TYPE(ring), ring->hop);
428 }
429
430 dma_free_coherent(&ring->nhi->pdev->dev,
431 ring->size * sizeof(*ring->descriptors),
432 ring->descriptors, ring->descriptors_dma);
433
Sachin Kamatf19b72c2014-06-20 14:32:33 +0530434 ring->descriptors = NULL;
Andreas Noever16603152014-06-03 22:03:58 +0200435 ring->descriptors_dma = 0;
436
437
438 dev_info(&ring->nhi->pdev->dev,
439 "freeing %s %d\n",
440 RING_TYPE(ring),
441 ring->hop);
442
443 mutex_unlock(&ring->nhi->lock);
444 /**
445 * ring->work can no longer be scheduled (it is scheduled only by
446 * nhi_interrupt_work and ring_stop). Wait for it to finish before
447 * freeing the ring.
448 */
449 flush_work(&ring->work);
450 mutex_destroy(&ring->lock);
451 kfree(ring);
452}
453
454static void nhi_interrupt_work(struct work_struct *work)
455{
456 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
457 int value = 0; /* Suppress uninitialized usage warning. */
458 int bit;
459 int hop = -1;
460 int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
461 struct tb_ring *ring;
462
463 mutex_lock(&nhi->lock);
464
465 /*
466 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
467 * (TX, RX, RX overflow). We iterate over the bits and read a new
468 * dwords as required. The registers are cleared on read.
469 */
470 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
471 if (bit % 32 == 0)
472 value = ioread32(nhi->iobase
473 + REG_RING_NOTIFY_BASE
474 + 4 * (bit / 32));
475 if (++hop == nhi->hop_count) {
476 hop = 0;
477 type++;
478 }
479 if ((value & (1 << (bit % 32))) == 0)
480 continue;
481 if (type == 2) {
482 dev_warn(&nhi->pdev->dev,
483 "RX overflow for ring %d\n",
484 hop);
485 continue;
486 }
487 if (type == 0)
488 ring = nhi->tx_rings[hop];
489 else
490 ring = nhi->rx_rings[hop];
491 if (ring == NULL) {
492 dev_warn(&nhi->pdev->dev,
493 "got interrupt for inactive %s ring %d\n",
494 type ? "RX" : "TX",
495 hop);
496 continue;
497 }
498 /* we do not check ring->running, this is done in ring->work */
499 schedule_work(&ring->work);
500 }
501 mutex_unlock(&nhi->lock);
502}
503
504static irqreturn_t nhi_msi(int irq, void *data)
505{
506 struct tb_nhi *nhi = data;
507 schedule_work(&nhi->interrupt_work);
508 return IRQ_HANDLED;
509}
510
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200511static int nhi_suspend_noirq(struct device *dev)
512{
513 struct pci_dev *pdev = to_pci_dev(dev);
514 struct tb *tb = pci_get_drvdata(pdev);
515 thunderbolt_suspend(tb);
516 return 0;
517}
518
519static int nhi_resume_noirq(struct device *dev)
520{
521 struct pci_dev *pdev = to_pci_dev(dev);
522 struct tb *tb = pci_get_drvdata(pdev);
523 thunderbolt_resume(tb);
524 return 0;
525}
526
Andreas Noever16603152014-06-03 22:03:58 +0200527static void nhi_shutdown(struct tb_nhi *nhi)
528{
529 int i;
530 dev_info(&nhi->pdev->dev, "shutdown\n");
531
532 for (i = 0; i < nhi->hop_count; i++) {
533 if (nhi->tx_rings[i])
534 dev_WARN(&nhi->pdev->dev,
535 "TX ring %d is still active\n", i);
536 if (nhi->rx_rings[i])
537 dev_WARN(&nhi->pdev->dev,
538 "RX ring %d is still active\n", i);
539 }
540 nhi_disable_interrupts(nhi);
541 /*
542 * We have to release the irq before calling flush_work. Otherwise an
543 * already executing IRQ handler could call schedule_work again.
544 */
545 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
546 flush_work(&nhi->interrupt_work);
547 mutex_destroy(&nhi->lock);
548}
549
550static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
551{
552 struct tb_nhi *nhi;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200553 struct tb *tb;
Andreas Noever16603152014-06-03 22:03:58 +0200554 int res;
555
556 res = pcim_enable_device(pdev);
557 if (res) {
558 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
559 return res;
560 }
561
562 res = pci_enable_msi(pdev);
563 if (res) {
564 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
565 return res;
566 }
567
568 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
569 if (res) {
570 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
571 return res;
572 }
573
574 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
575 if (!nhi)
576 return -ENOMEM;
577
578 nhi->pdev = pdev;
579 /* cannot fail - table is allocated bin pcim_iomap_regions */
580 nhi->iobase = pcim_iomap_table(pdev)[0];
581 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
Lukas Wunner19bf4d42016-03-20 13:57:20 +0100582 if (nhi->hop_count != 12 && nhi->hop_count != 32)
Andreas Noever16603152014-06-03 22:03:58 +0200583 dev_warn(&pdev->dev, "unexpected hop count: %d\n",
584 nhi->hop_count);
585 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
586
Himangi Saraogi2a211f32014-07-12 01:12:43 +0530587 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
588 sizeof(*nhi->tx_rings), GFP_KERNEL);
589 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
590 sizeof(*nhi->rx_rings), GFP_KERNEL);
Andreas Noever16603152014-06-03 22:03:58 +0200591 if (!nhi->tx_rings || !nhi->rx_rings)
592 return -ENOMEM;
593
594 nhi_disable_interrupts(nhi); /* In case someone left them on. */
595 res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
596 IRQF_NO_SUSPEND, /* must work during _noirq */
597 "thunderbolt", nhi);
598 if (res) {
599 dev_err(&pdev->dev, "request_irq failed, aborting\n");
600 return res;
601 }
602
603 mutex_init(&nhi->lock);
604
605 pci_set_master(pdev);
606
607 /* magic value - clock related? */
608 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
609
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200610 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
611 tb = thunderbolt_alloc_and_start(nhi);
612 if (!tb) {
613 /*
614 * At this point the RX/TX rings might already have been
615 * activated. Do a proper shutdown.
616 */
617 nhi_shutdown(nhi);
618 return -EIO;
619 }
620 pci_set_drvdata(pdev, tb);
Andreas Noever16603152014-06-03 22:03:58 +0200621
622 return 0;
623}
624
625static void nhi_remove(struct pci_dev *pdev)
626{
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200627 struct tb *tb = pci_get_drvdata(pdev);
628 struct tb_nhi *nhi = tb->nhi;
629 thunderbolt_shutdown_and_free(tb);
Andreas Noever16603152014-06-03 22:03:58 +0200630 nhi_shutdown(nhi);
631}
632
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200633/*
634 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
635 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
636 * resume_noirq until we are done.
637 */
638static const struct dev_pm_ops nhi_pm_ops = {
639 .suspend_noirq = nhi_suspend_noirq,
640 .resume_noirq = nhi_resume_noirq,
641 .freeze_noirq = nhi_suspend_noirq, /*
642 * we just disable hotplug, the
643 * pci-tunnels stay alive.
644 */
Mika Westerbergc9bb6fb2017-12-19 12:44:56 +0300645 .thaw_noirq = nhi_resume_noirq,
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200646 .restore_noirq = nhi_resume_noirq,
647};
648
Sachin Kamat620863f2014-06-20 14:32:34 +0530649static struct pci_device_id nhi_ids[] = {
Andreas Noever16603152014-06-03 22:03:58 +0200650 /*
651 * We have to specify class, the TB bridges use the same device and
Lukas Wunner1d111402016-03-20 13:57:20 +0100652 * vendor (sub)id on gen 1 and gen 2 controllers.
Andreas Noever16603152014-06-03 22:03:58 +0200653 */
654 {
655 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
Lukas Wunner1d111402016-03-20 13:57:20 +0100656 .vendor = PCI_VENDOR_ID_INTEL,
Lukas Wunner19bf4d42016-03-20 13:57:20 +0100657 .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
658 .subvendor = 0x2222, .subdevice = 0x1111,
659 },
660 {
661 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
662 .vendor = PCI_VENDOR_ID_INTEL,
Lukas Wunner1d111402016-03-20 13:57:20 +0100663 .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
Andreas Noever16603152014-06-03 22:03:58 +0200664 .subvendor = 0x2222, .subdevice = 0x1111,
665 },
666 {
667 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
Lukas Wunner1d111402016-03-20 13:57:20 +0100668 .vendor = PCI_VENDOR_ID_INTEL,
Xavier Gnata82a6a812016-07-26 18:40:38 +0200669 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
670 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
671 },
672 {
673 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
674 .vendor = PCI_VENDOR_ID_INTEL,
Lukas Wunner1d111402016-03-20 13:57:20 +0100675 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
Knuth Poserna42fb352015-09-20 21:25:22 +0200676 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
Andreas Noever16603152014-06-03 22:03:58 +0200677 },
678 { 0,}
679};
680
681MODULE_DEVICE_TABLE(pci, nhi_ids);
682MODULE_LICENSE("GPL");
683
684static struct pci_driver nhi_driver = {
685 .name = "thunderbolt",
686 .id_table = nhi_ids,
687 .probe = nhi_probe,
688 .remove = nhi_remove,
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200689 .driver.pm = &nhi_pm_ops,
Andreas Noever16603152014-06-03 22:03:58 +0200690};
691
692static int __init nhi_init(void)
693{
694 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
695 return -ENOSYS;
696 return pci_register_driver(&nhi_driver);
697}
698
699static void __exit nhi_unload(void)
700{
701 pci_unregister_driver(&nhi_driver);
702}
703
704module_init(nhi_init);
705module_exit(nhi_unload);