blob: ea9de49b5e10aa03bd34f27aa7b4ce964e3b56b6 [file] [log] [blame]
Andreas Noeverd6cc51c2014-06-03 22:04:00 +02001/*
2 * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
3 *
4 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
5 */
6
7#include <linux/slab.h>
8#include <linux/errno.h>
9#include <linux/delay.h>
10
11#include "tb.h"
Andreas Noever7adf6092014-06-03 22:04:01 +020012#include "tb_regs.h"
Andreas Noever3364f0c2014-06-03 22:04:08 +020013#include "tunnel_pci.h"
Andreas Noeverd6cc51c2014-06-03 22:04:00 +020014
Mika Westerberg9d3cce02017-06-06 15:25:00 +030015/**
16 * struct tb_cm - Simple Thunderbolt connection manager
17 * @tunnel_list: List of active tunnels
18 * @hotplug_active: tb_handle_hotplug will stop progressing plug
19 * events and exit if this is not set (it needs to
20 * acquire the lock one more time). Used to drain wq
21 * after cfg has been paused.
22 */
23struct tb_cm {
24 struct list_head tunnel_list;
25 bool hotplug_active;
26};
Andreas Noever9da672a2014-06-03 22:04:05 +020027
28/* enumeration & hot plug handling */
29
30
31static void tb_scan_port(struct tb_port *port);
32
33/**
34 * tb_scan_switch() - scan for and initialize downstream switches
35 */
36static void tb_scan_switch(struct tb_switch *sw)
37{
38 int i;
39 for (i = 1; i <= sw->config.max_port_number; i++)
40 tb_scan_port(&sw->ports[i]);
41}
42
43/**
44 * tb_scan_port() - check for and initialize switches below port
45 */
46static void tb_scan_port(struct tb_port *port)
47{
48 struct tb_switch *sw;
49 if (tb_is_upstream_port(port))
50 return;
51 if (port->config.type != TB_TYPE_PORT)
52 return;
Andreas Noever343fcb82014-06-12 23:11:47 +020053 if (port->dual_link_port && port->link_nr)
54 return; /*
55 * Downstream switch is reachable through two ports.
56 * Only scan on the primary port (link_nr == 0).
57 */
Andreas Noever9da672a2014-06-03 22:04:05 +020058 if (tb_wait_for_port(port, false) <= 0)
59 return;
60 if (port->remote) {
61 tb_port_WARN(port, "port already has a remote!\n");
62 return;
63 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +030064 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
65 tb_downstream_route(port));
Andreas Noever9da672a2014-06-03 22:04:05 +020066 if (!sw)
67 return;
Mika Westerbergbfe778a2017-06-06 15:25:01 +030068
69 if (tb_switch_configure(sw)) {
70 tb_switch_put(sw);
71 return;
72 }
73
74 if (tb_switch_add(sw)) {
75 tb_switch_put(sw);
76 return;
77 }
78
Andreas Noever9da672a2014-06-03 22:04:05 +020079 port->remote = tb_upstream_port(sw);
80 tb_upstream_port(sw)->remote = port;
81 tb_scan_switch(sw);
82}
83
Andreas Noever3364f0c2014-06-03 22:04:08 +020084/**
85 * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
86 */
87static void tb_free_invalid_tunnels(struct tb *tb)
88{
Mika Westerberg9d3cce02017-06-06 15:25:00 +030089 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever3364f0c2014-06-03 22:04:08 +020090 struct tb_pci_tunnel *tunnel;
91 struct tb_pci_tunnel *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +030092
93 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Andreas Noever3364f0c2014-06-03 22:04:08 +020094 if (tb_pci_is_invalid(tunnel)) {
95 tb_pci_deactivate(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +030096 list_del(&tunnel->list);
Andreas Noever3364f0c2014-06-03 22:04:08 +020097 tb_pci_free(tunnel);
98 }
99 }
100}
101
102/**
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200103 * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
104 */
105static void tb_free_unplugged_children(struct tb_switch *sw)
106{
107 int i;
108 for (i = 1; i <= sw->config.max_port_number; i++) {
109 struct tb_port *port = &sw->ports[i];
110 if (tb_is_upstream_port(port))
111 continue;
112 if (!port->remote)
113 continue;
114 if (port->remote->sw->is_unplugged) {
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300115 tb_switch_remove(port->remote->sw);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200116 port->remote = NULL;
117 } else {
118 tb_free_unplugged_children(port->remote->sw);
119 }
120 }
121}
122
123
124/**
Andreas Noever3364f0c2014-06-03 22:04:08 +0200125 * find_pci_up_port() - return the first PCIe up port on @sw or NULL
126 */
127static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
128{
129 int i;
130 for (i = 1; i <= sw->config.max_port_number; i++)
131 if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
132 return &sw->ports[i];
133 return NULL;
134}
135
136/**
137 * find_unused_down_port() - return the first inactive PCIe down port on @sw
138 */
139static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
140{
141 int i;
142 int cap;
143 int res;
144 int data;
145 for (i = 1; i <= sw->config.max_port_number; i++) {
146 if (tb_is_upstream_port(&sw->ports[i]))
147 continue;
148 if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
149 continue;
Mika Westerbergda2da042017-06-06 15:24:58 +0300150 cap = tb_port_find_cap(&sw->ports[i], TB_PORT_CAP_ADAP);
151 if (cap < 0)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200152 continue;
153 res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
154 if (res < 0)
155 continue;
156 if (data & 0x80000000)
157 continue;
158 return &sw->ports[i];
159 }
160 return NULL;
161}
162
163/**
164 * tb_activate_pcie_devices() - scan for and activate PCIe devices
165 *
166 * This method is somewhat ad hoc. For now it only supports one device
167 * per port and only devices at depth 1.
168 */
169static void tb_activate_pcie_devices(struct tb *tb)
170{
171 int i;
172 int cap;
173 u32 data;
174 struct tb_switch *sw;
175 struct tb_port *up_port;
176 struct tb_port *down_port;
177 struct tb_pci_tunnel *tunnel;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300178 struct tb_cm *tcm = tb_priv(tb);
179
Andreas Noever3364f0c2014-06-03 22:04:08 +0200180 /* scan for pcie devices at depth 1*/
181 for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
182 if (tb_is_upstream_port(&tb->root_switch->ports[i]))
183 continue;
184 if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
185 continue;
186 if (!tb->root_switch->ports[i].remote)
187 continue;
188 sw = tb->root_switch->ports[i].remote->sw;
189 up_port = tb_find_pci_up_port(sw);
190 if (!up_port) {
191 tb_sw_info(sw, "no PCIe devices found, aborting\n");
192 continue;
193 }
194
195 /* check whether port is already activated */
Mika Westerbergda2da042017-06-06 15:24:58 +0300196 cap = tb_port_find_cap(up_port, TB_PORT_CAP_ADAP);
197 if (cap < 0)
Andreas Noever3364f0c2014-06-03 22:04:08 +0200198 continue;
199 if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
200 continue;
201 if (data & 0x80000000) {
202 tb_port_info(up_port,
203 "PCIe port already activated, aborting\n");
204 continue;
205 }
206
207 down_port = tb_find_unused_down_port(tb->root_switch);
208 if (!down_port) {
209 tb_port_info(up_port,
210 "All PCIe down ports are occupied, aborting\n");
211 continue;
212 }
213 tunnel = tb_pci_alloc(tb, up_port, down_port);
214 if (!tunnel) {
215 tb_port_info(up_port,
216 "PCIe tunnel allocation failed, aborting\n");
217 continue;
218 }
219
220 if (tb_pci_activate(tunnel)) {
221 tb_port_info(up_port,
222 "PCIe tunnel activation failed, aborting\n");
223 tb_pci_free(tunnel);
224 }
225
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300226 list_add(&tunnel->list, &tcm->tunnel_list);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200227 }
228}
Andreas Noever9da672a2014-06-03 22:04:05 +0200229
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200230/* hotplug handling */
231
232struct tb_hotplug_event {
233 struct work_struct work;
234 struct tb *tb;
235 u64 route;
236 u8 port;
237 bool unplug;
238};
239
240/**
241 * tb_handle_hotplug() - handle hotplug event
242 *
243 * Executes on tb->wq.
244 */
245static void tb_handle_hotplug(struct work_struct *work)
246{
247 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
248 struct tb *tb = ev->tb;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300249 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever053596d2014-06-03 22:04:06 +0200250 struct tb_switch *sw;
251 struct tb_port *port;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200252 mutex_lock(&tb->lock);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300253 if (!tcm->hotplug_active)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200254 goto out; /* during init, suspend or shutdown */
255
Andreas Noever053596d2014-06-03 22:04:06 +0200256 sw = get_switch_at_route(tb->root_switch, ev->route);
257 if (!sw) {
258 tb_warn(tb,
259 "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
260 ev->route, ev->port, ev->unplug);
261 goto out;
262 }
263 if (ev->port > sw->config.max_port_number) {
264 tb_warn(tb,
265 "hotplug event from non existent port %llx:%x (unplug: %d)\n",
266 ev->route, ev->port, ev->unplug);
267 goto out;
268 }
269 port = &sw->ports[ev->port];
270 if (tb_is_upstream_port(port)) {
271 tb_warn(tb,
272 "hotplug event for upstream port %llx:%x (unplug: %d)\n",
273 ev->route, ev->port, ev->unplug);
274 goto out;
275 }
276 if (ev->unplug) {
277 if (port->remote) {
278 tb_port_info(port, "unplugged\n");
Lukas Wunneraae20bb2016-03-20 13:57:20 +0100279 tb_sw_set_unplugged(port->remote->sw);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200280 tb_free_invalid_tunnels(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300281 tb_switch_remove(port->remote->sw);
Andreas Noever053596d2014-06-03 22:04:06 +0200282 port->remote = NULL;
283 } else {
284 tb_port_info(port,
285 "got unplug event for disconnected port, ignoring\n");
286 }
287 } else if (port->remote) {
288 tb_port_info(port,
289 "got plug event for connected port, ignoring\n");
290 } else {
291 tb_port_info(port, "hotplug: scanning\n");
292 tb_scan_port(port);
293 if (!port->remote) {
294 tb_port_info(port, "hotplug: no switch found\n");
295 } else if (port->remote->sw->config.depth > 1) {
296 tb_sw_warn(port->remote->sw,
297 "hotplug: chaining not supported\n");
Andreas Noever3364f0c2014-06-03 22:04:08 +0200298 } else {
299 tb_sw_info(port->remote->sw,
300 "hotplug: activating pcie devices\n");
301 tb_activate_pcie_devices(tb);
Andreas Noever053596d2014-06-03 22:04:06 +0200302 }
303 }
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200304out:
305 mutex_unlock(&tb->lock);
306 kfree(ev);
307}
308
309/**
310 * tb_schedule_hotplug_handler() - callback function for the control channel
311 *
312 * Delegates to tb_handle_hotplug.
313 */
Mika Westerberg81a54b52017-06-06 15:25:09 +0300314static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
315 const void *buf, size_t size)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200316{
Mika Westerberg81a54b52017-06-06 15:25:09 +0300317 const struct cfg_event_pkg *pkg = buf;
318 struct tb_hotplug_event *ev;
319 u64 route;
320
321 if (type != TB_CFG_PKG_EVENT) {
322 tb_warn(tb, "unexpected event %#x, ignoring\n", type);
323 return;
324 }
325
326 route = tb_cfg_get_route(&pkg->header);
327
328 if (tb_cfg_error(tb->ctl, route, pkg->port,
329 TB_CFG_ERROR_ACK_PLUG_EVENT)) {
330 tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
331 pkg->port);
332 }
333
334 ev = kmalloc(sizeof(*ev), GFP_KERNEL);
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200335 if (!ev)
336 return;
337 INIT_WORK(&ev->work, tb_handle_hotplug);
338 ev->tb = tb;
339 ev->route = route;
Mika Westerberg81a54b52017-06-06 15:25:09 +0300340 ev->port = pkg->port;
341 ev->unplug = pkg->unplug;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200342 queue_work(tb->wq, &ev->work);
343}
344
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300345static void tb_stop(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200346{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300347 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200348 struct tb_pci_tunnel *tunnel;
349 struct tb_pci_tunnel *n;
350
Andreas Noever3364f0c2014-06-03 22:04:08 +0200351 /* tunnels are only present after everything has been initialized */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300352 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
Andreas Noever3364f0c2014-06-03 22:04:08 +0200353 tb_pci_deactivate(tunnel);
354 tb_pci_free(tunnel);
355 }
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300356 tb_switch_remove(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300357 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200358}
359
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300360static int tb_start(struct tb *tb)
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200361{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300362 struct tb_cm *tcm = tb_priv(tb);
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300363 int ret;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200364
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300365 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
Andreas Noevera25c8b22014-06-03 22:04:02 +0200366 if (!tb->root_switch)
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300367 return -ENOMEM;
Andreas Noevera25c8b22014-06-03 22:04:02 +0200368
Mika Westerbergbfe778a2017-06-06 15:25:01 +0300369 ret = tb_switch_configure(tb->root_switch);
370 if (ret) {
371 tb_switch_put(tb->root_switch);
372 return ret;
373 }
374
375 /* Announce the switch to the world */
376 ret = tb_switch_add(tb->root_switch);
377 if (ret) {
378 tb_switch_put(tb->root_switch);
379 return ret;
380 }
381
Andreas Noever9da672a2014-06-03 22:04:05 +0200382 /* Full scan to discover devices added before the driver was loaded. */
383 tb_scan_switch(tb->root_switch);
Andreas Noever3364f0c2014-06-03 22:04:08 +0200384 tb_activate_pcie_devices(tb);
Andreas Noever9da672a2014-06-03 22:04:05 +0200385
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200386 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300387 tcm->hotplug_active = true;
388 return 0;
Andreas Noeverd6cc51c2014-06-03 22:04:00 +0200389}
390
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300391static int tb_suspend_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200392{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300393 struct tb_cm *tcm = tb_priv(tb);
394
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200395 tb_info(tb, "suspending...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200396 tb_switch_suspend(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300397 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200398 tb_info(tb, "suspend finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300399
400 return 0;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200401}
402
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300403static int tb_resume_noirq(struct tb *tb)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200404{
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300405 struct tb_cm *tcm = tb_priv(tb);
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200406 struct tb_pci_tunnel *tunnel, *n;
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300407
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200408 tb_info(tb, "resuming...\n");
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200409
410 /* remove any pci devices the firmware might have setup */
411 tb_switch_reset(tb, 0);
412
413 tb_switch_resume(tb->root_switch);
414 tb_free_invalid_tunnels(tb);
415 tb_free_unplugged_children(tb->root_switch);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300416 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200417 tb_pci_restart(tunnel);
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300418 if (!list_empty(&tcm->tunnel_list)) {
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200419 /*
420 * the pcie links need some time to get going.
421 * 100ms works for me...
422 */
423 tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
424 msleep(100);
425 }
426 /* Allow tb_handle_hotplug to progress events */
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300427 tcm->hotplug_active = true;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200428 tb_info(tb, "resume finished\n");
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300429
430 return 0;
431}
432
433static const struct tb_cm_ops tb_cm_ops = {
434 .start = tb_start,
435 .stop = tb_stop,
436 .suspend_noirq = tb_suspend_noirq,
437 .resume_noirq = tb_resume_noirq,
Mika Westerberg81a54b52017-06-06 15:25:09 +0300438 .handle_event = tb_handle_event,
Mika Westerberg9d3cce02017-06-06 15:25:00 +0300439};
440
441struct tb *tb_probe(struct tb_nhi *nhi)
442{
443 struct tb_cm *tcm;
444 struct tb *tb;
445
446 tb = tb_domain_alloc(nhi, sizeof(*tcm));
447 if (!tb)
448 return NULL;
449
450 tb->cm_ops = &tb_cm_ops;
451
452 tcm = tb_priv(tb);
453 INIT_LIST_HEAD(&tcm->tunnel_list);
454
455 return tb;
Andreas Noever23dd5bb2014-06-03 22:04:12 +0200456}