Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Thunderbolt Cactus Ridge driver - eeprom access |
| 3 | * |
| 4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> |
| 5 | */ |
| 6 | |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 7 | #include <linux/crc32.h> |
Sachin Kamat | 2b35404 | 2014-06-20 14:32:29 +0530 | [diff] [blame] | 8 | #include <linux/slab.h> |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 9 | #include "tb.h" |
| 10 | |
| 11 | /** |
| 12 | * tb_eeprom_ctl_write() - write control word |
| 13 | */ |
| 14 | static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) |
| 15 | { |
| 16 | return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1); |
| 17 | } |
| 18 | |
| 19 | /** |
| 20 | * tb_eeprom_ctl_write() - read control word |
| 21 | */ |
| 22 | static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) |
| 23 | { |
| 24 | return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1); |
| 25 | } |
| 26 | |
| 27 | enum tb_eeprom_transfer { |
| 28 | TB_EEPROM_IN, |
| 29 | TB_EEPROM_OUT, |
| 30 | }; |
| 31 | |
| 32 | /** |
| 33 | * tb_eeprom_active - enable rom access |
| 34 | * |
| 35 | * WARNING: Always disable access after usage. Otherwise the controller will |
| 36 | * fail to reprobe. |
| 37 | */ |
| 38 | static int tb_eeprom_active(struct tb_switch *sw, bool enable) |
| 39 | { |
| 40 | struct tb_eeprom_ctl ctl; |
| 41 | int res = tb_eeprom_ctl_read(sw, &ctl); |
| 42 | if (res) |
| 43 | return res; |
| 44 | if (enable) { |
| 45 | ctl.access_high = 1; |
| 46 | res = tb_eeprom_ctl_write(sw, &ctl); |
| 47 | if (res) |
| 48 | return res; |
| 49 | ctl.access_low = 0; |
| 50 | return tb_eeprom_ctl_write(sw, &ctl); |
| 51 | } else { |
| 52 | ctl.access_low = 1; |
| 53 | res = tb_eeprom_ctl_write(sw, &ctl); |
| 54 | if (res) |
| 55 | return res; |
| 56 | ctl.access_high = 0; |
| 57 | return tb_eeprom_ctl_write(sw, &ctl); |
| 58 | } |
| 59 | } |
| 60 | |
| 61 | /** |
| 62 | * tb_eeprom_transfer - transfer one bit |
| 63 | * |
| 64 | * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in. |
| 65 | * If TB_EEPROM_OUT is passed, then ctl->data_out will be written. |
| 66 | */ |
| 67 | static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl, |
| 68 | enum tb_eeprom_transfer direction) |
| 69 | { |
| 70 | int res; |
| 71 | if (direction == TB_EEPROM_OUT) { |
| 72 | res = tb_eeprom_ctl_write(sw, ctl); |
| 73 | if (res) |
| 74 | return res; |
| 75 | } |
| 76 | ctl->clock = 1; |
| 77 | res = tb_eeprom_ctl_write(sw, ctl); |
| 78 | if (res) |
| 79 | return res; |
| 80 | if (direction == TB_EEPROM_IN) { |
| 81 | res = tb_eeprom_ctl_read(sw, ctl); |
| 82 | if (res) |
| 83 | return res; |
| 84 | } |
| 85 | ctl->clock = 0; |
| 86 | return tb_eeprom_ctl_write(sw, ctl); |
| 87 | } |
| 88 | |
| 89 | /** |
| 90 | * tb_eeprom_out - write one byte to the bus |
| 91 | */ |
| 92 | static int tb_eeprom_out(struct tb_switch *sw, u8 val) |
| 93 | { |
| 94 | struct tb_eeprom_ctl ctl; |
| 95 | int i; |
| 96 | int res = tb_eeprom_ctl_read(sw, &ctl); |
| 97 | if (res) |
| 98 | return res; |
| 99 | for (i = 0; i < 8; i++) { |
| 100 | ctl.data_out = val & 0x80; |
| 101 | res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT); |
| 102 | if (res) |
| 103 | return res; |
| 104 | val <<= 1; |
| 105 | } |
| 106 | return 0; |
| 107 | } |
| 108 | |
| 109 | /** |
| 110 | * tb_eeprom_in - read one byte from the bus |
| 111 | */ |
| 112 | static int tb_eeprom_in(struct tb_switch *sw, u8 *val) |
| 113 | { |
| 114 | struct tb_eeprom_ctl ctl; |
| 115 | int i; |
| 116 | int res = tb_eeprom_ctl_read(sw, &ctl); |
| 117 | if (res) |
| 118 | return res; |
| 119 | *val = 0; |
| 120 | for (i = 0; i < 8; i++) { |
| 121 | *val <<= 1; |
| 122 | res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN); |
| 123 | if (res) |
| 124 | return res; |
| 125 | *val |= ctl.data_in; |
| 126 | } |
| 127 | return 0; |
| 128 | } |
| 129 | |
| 130 | /** |
| 131 | * tb_eeprom_read_n - read count bytes from offset into val |
| 132 | */ |
| 133 | static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val, |
| 134 | size_t count) |
| 135 | { |
| 136 | int i, res; |
| 137 | res = tb_eeprom_active(sw, true); |
| 138 | if (res) |
| 139 | return res; |
| 140 | res = tb_eeprom_out(sw, 3); |
| 141 | if (res) |
| 142 | return res; |
| 143 | res = tb_eeprom_out(sw, offset >> 8); |
| 144 | if (res) |
| 145 | return res; |
| 146 | res = tb_eeprom_out(sw, offset); |
| 147 | if (res) |
| 148 | return res; |
| 149 | for (i = 0; i < count; i++) { |
| 150 | res = tb_eeprom_in(sw, val + i); |
| 151 | if (res) |
| 152 | return res; |
| 153 | } |
| 154 | return tb_eeprom_active(sw, false); |
| 155 | } |
| 156 | |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 157 | static u8 tb_crc8(u8 *data, int len) |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 158 | { |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 159 | int i, j; |
| 160 | u8 val = 0xff; |
| 161 | for (i = 0; i < len; i++) { |
| 162 | val ^= data[i]; |
| 163 | for (j = 0; j < 8; j++) |
| 164 | val = (val << 1) ^ ((val & 0x80) ? 7 : 0); |
| 165 | } |
| 166 | return val; |
| 167 | } |
| 168 | |
| 169 | static u32 tb_crc32(void *data, size_t len) |
| 170 | { |
| 171 | return ~__crc32c_le(~0, data, len); |
| 172 | } |
| 173 | |
| 174 | #define TB_DROM_DATA_START 13 |
| 175 | struct tb_drom_header { |
| 176 | /* BYTE 0 */ |
| 177 | u8 uid_crc8; /* checksum for uid */ |
| 178 | /* BYTES 1-8 */ |
| 179 | u64 uid; |
| 180 | /* BYTES 9-12 */ |
| 181 | u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */ |
| 182 | /* BYTE 13 */ |
| 183 | u8 device_rom_revision; /* should be <= 1 */ |
| 184 | u16 data_len:10; |
| 185 | u8 __unknown1:6; |
| 186 | /* BYTES 16-21 */ |
| 187 | u16 vendor_id; |
| 188 | u16 model_id; |
| 189 | u8 model_rev; |
| 190 | u8 eeprom_rev; |
| 191 | } __packed; |
| 192 | |
| 193 | enum tb_drom_entry_type { |
Andreas Noever | e712077 | 2014-06-20 21:42:24 +0200 | [diff] [blame] | 194 | /* force unsigned to prevent "one-bit signed bitfield" warning */ |
| 195 | TB_DROM_ENTRY_GENERIC = 0U, |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 196 | TB_DROM_ENTRY_PORT, |
| 197 | }; |
| 198 | |
| 199 | struct tb_drom_entry_header { |
| 200 | u8 len; |
| 201 | u8 index:6; |
| 202 | bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */ |
| 203 | enum tb_drom_entry_type type:1; |
| 204 | } __packed; |
| 205 | |
| 206 | struct tb_drom_entry_port { |
| 207 | /* BYTES 0-1 */ |
| 208 | struct tb_drom_entry_header header; |
| 209 | /* BYTE 2 */ |
| 210 | u8 dual_link_port_rid:4; |
| 211 | u8 link_nr:1; |
| 212 | u8 unknown1:2; |
| 213 | bool has_dual_link_port:1; |
| 214 | |
| 215 | /* BYTE 3 */ |
| 216 | u8 dual_link_port_nr:6; |
| 217 | u8 unknown2:2; |
| 218 | |
| 219 | /* BYTES 4 - 5 TODO decode */ |
| 220 | u8 micro2:4; |
| 221 | u8 micro1:4; |
| 222 | u8 micro3; |
| 223 | |
Lukas Wunner | aae20bb | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 224 | /* BYTES 6-7, TODO: verify (find hardware that has these set) */ |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 225 | u8 peer_port_rid:4; |
| 226 | u8 unknown3:3; |
| 227 | bool has_peer_port:1; |
| 228 | u8 peer_port_nr:6; |
| 229 | u8 unknown4:2; |
| 230 | } __packed; |
| 231 | |
| 232 | |
| 233 | /** |
| 234 | * tb_eeprom_get_drom_offset - get drom offset within eeprom |
| 235 | */ |
Andreas Noever | e0f5501 | 2014-06-20 21:42:25 +0200 | [diff] [blame] | 236 | static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 237 | { |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 238 | struct tb_cap_plug_events cap; |
| 239 | int res; |
| 240 | if (!sw->cap_plug_events) { |
| 241 | tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n"); |
| 242 | return -ENOSYS; |
| 243 | } |
| 244 | res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events, |
| 245 | sizeof(cap) / 4); |
| 246 | if (res) |
| 247 | return res; |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 248 | |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 249 | if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) { |
| 250 | tb_sw_warn(sw, "no NVM\n"); |
| 251 | return -ENOSYS; |
| 252 | } |
| 253 | |
| 254 | if (cap.drom_offset > 0xffff) { |
| 255 | tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n", |
| 256 | cap.drom_offset); |
| 257 | return -ENXIO; |
| 258 | } |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 259 | *offset = cap.drom_offset; |
| 260 | return 0; |
| 261 | } |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 262 | |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 263 | /** |
| 264 | * tb_drom_read_uid_only - read uid directly from drom |
| 265 | * |
| 266 | * Does not use the cached copy in sw->drom. Used during resume to check switch |
| 267 | * identity. |
| 268 | */ |
| 269 | int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid) |
| 270 | { |
| 271 | u8 data[9]; |
| 272 | u16 drom_offset; |
| 273 | u8 crc; |
| 274 | int res = tb_eeprom_get_drom_offset(sw, &drom_offset); |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 275 | if (res) |
| 276 | return res; |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 277 | |
| 278 | /* read uid */ |
| 279 | res = tb_eeprom_read_n(sw, drom_offset, data, 9); |
| 280 | if (res) |
| 281 | return res; |
| 282 | |
| 283 | crc = tb_crc8(data + 1, 8); |
| 284 | if (crc != data[0]) { |
| 285 | tb_sw_warn(sw, "uid crc8 missmatch (expected: %#x, got: %#x)\n", |
| 286 | data[0], crc); |
| 287 | return -EIO; |
| 288 | } |
| 289 | |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 290 | *uid = *(u64 *)(data+1); |
| 291 | return 0; |
| 292 | } |
| 293 | |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 294 | static void tb_drom_parse_port_entry(struct tb_port *port, |
| 295 | struct tb_drom_entry_port *entry) |
| 296 | { |
| 297 | port->link_nr = entry->link_nr; |
| 298 | if (entry->has_dual_link_port) |
| 299 | port->dual_link_port = |
| 300 | &port->sw->ports[entry->dual_link_port_nr]; |
| 301 | } |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 302 | |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 303 | static int tb_drom_parse_entry(struct tb_switch *sw, |
| 304 | struct tb_drom_entry_header *header) |
| 305 | { |
| 306 | struct tb_port *port; |
| 307 | int res; |
| 308 | enum tb_port_type type; |
Andreas Noever | c90553b | 2014-06-03 22:04:11 +0200 | [diff] [blame] | 309 | |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 310 | if (header->type != TB_DROM_ENTRY_PORT) |
| 311 | return 0; |
| 312 | |
| 313 | port = &sw->ports[header->index]; |
| 314 | port->disabled = header->port_disabled; |
| 315 | if (port->disabled) |
| 316 | return 0; |
| 317 | |
| 318 | res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1); |
| 319 | if (res) |
| 320 | return res; |
| 321 | type &= 0xffffff; |
| 322 | |
| 323 | if (type == TB_TYPE_PORT) { |
| 324 | struct tb_drom_entry_port *entry = (void *) header; |
| 325 | if (header->len != sizeof(*entry)) { |
| 326 | tb_sw_warn(sw, |
Arnd Bergmann | 3543fb7 | 2014-06-20 15:52:11 +0200 | [diff] [blame] | 327 | "port entry has size %#x (expected %#zx)\n", |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 328 | header->len, sizeof(struct tb_drom_entry_port)); |
| 329 | return -EIO; |
| 330 | } |
| 331 | tb_drom_parse_port_entry(port, entry); |
| 332 | } |
| 333 | return 0; |
| 334 | } |
| 335 | |
| 336 | /** |
| 337 | * tb_drom_parse_entries - parse the linked list of drom entries |
| 338 | * |
| 339 | * Drom must have been copied to sw->drom. |
| 340 | */ |
| 341 | static int tb_drom_parse_entries(struct tb_switch *sw) |
| 342 | { |
| 343 | struct tb_drom_header *header = (void *) sw->drom; |
| 344 | u16 pos = sizeof(*header); |
| 345 | u16 drom_size = header->data_len + TB_DROM_DATA_START; |
| 346 | |
| 347 | while (pos < drom_size) { |
| 348 | struct tb_drom_entry_header *entry = (void *) (sw->drom + pos); |
| 349 | if (pos + 1 == drom_size || pos + entry->len > drom_size |
| 350 | || !entry->len) { |
| 351 | tb_sw_warn(sw, "drom buffer overrun, aborting\n"); |
| 352 | return -EIO; |
| 353 | } |
| 354 | |
| 355 | tb_drom_parse_entry(sw, entry); |
| 356 | |
| 357 | pos += entry->len; |
| 358 | } |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | /** |
| 363 | * tb_drom_read - copy drom to sw->drom and parse it |
| 364 | */ |
| 365 | int tb_drom_read(struct tb_switch *sw) |
| 366 | { |
| 367 | u16 drom_offset; |
| 368 | u16 size; |
| 369 | u32 crc; |
| 370 | struct tb_drom_header *header; |
| 371 | int res; |
| 372 | if (sw->drom) |
| 373 | return 0; |
| 374 | |
| 375 | if (tb_route(sw) == 0) { |
| 376 | /* |
| 377 | * The root switch contains only a dummy drom (header only, |
| 378 | * no entries). Hardcode the configuration here. |
| 379 | */ |
| 380 | tb_drom_read_uid_only(sw, &sw->uid); |
| 381 | |
| 382 | sw->ports[1].link_nr = 0; |
| 383 | sw->ports[2].link_nr = 1; |
| 384 | sw->ports[1].dual_link_port = &sw->ports[2]; |
| 385 | sw->ports[2].dual_link_port = &sw->ports[1]; |
| 386 | |
| 387 | sw->ports[3].link_nr = 0; |
| 388 | sw->ports[4].link_nr = 1; |
| 389 | sw->ports[3].dual_link_port = &sw->ports[4]; |
| 390 | sw->ports[4].dual_link_port = &sw->ports[3]; |
Lukas Wunner | 19bf4d4 | 2016-03-20 13:57:20 +0100 | [diff] [blame] | 391 | |
| 392 | /* Port 5 is inaccessible on this gen 1 controller */ |
| 393 | if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE) |
| 394 | sw->ports[5].disabled = true; |
| 395 | |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 396 | return 0; |
| 397 | } |
| 398 | |
| 399 | res = tb_eeprom_get_drom_offset(sw, &drom_offset); |
| 400 | if (res) |
| 401 | return res; |
| 402 | |
| 403 | res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2); |
| 404 | if (res) |
| 405 | return res; |
| 406 | size &= 0x3ff; |
| 407 | size += TB_DROM_DATA_START; |
| 408 | tb_sw_info(sw, "reading drom (length: %#x)\n", size); |
| 409 | if (size < sizeof(*header)) { |
| 410 | tb_sw_warn(sw, "drom too small, aborting\n"); |
| 411 | return -EIO; |
| 412 | } |
| 413 | |
| 414 | sw->drom = kzalloc(size, GFP_KERNEL); |
| 415 | if (!sw->drom) |
| 416 | return -ENOMEM; |
| 417 | res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size); |
| 418 | if (res) |
| 419 | goto err; |
| 420 | |
| 421 | header = (void *) sw->drom; |
| 422 | |
| 423 | if (header->data_len + TB_DROM_DATA_START != size) { |
| 424 | tb_sw_warn(sw, "drom size mismatch, aborting\n"); |
| 425 | goto err; |
| 426 | } |
| 427 | |
| 428 | crc = tb_crc8((u8 *) &header->uid, 8); |
| 429 | if (crc != header->uid_crc8) { |
| 430 | tb_sw_warn(sw, |
| 431 | "drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n", |
| 432 | header->uid_crc8, crc); |
| 433 | goto err; |
| 434 | } |
| 435 | sw->uid = header->uid; |
| 436 | |
| 437 | crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); |
| 438 | if (crc != header->data_crc32) { |
| 439 | tb_sw_warn(sw, |
| 440 | "drom data crc32 mismatch (expected: %#x, got: %#x), aborting\n", |
| 441 | header->data_crc32, crc); |
| 442 | goto err; |
| 443 | } |
| 444 | |
| 445 | if (header->device_rom_revision > 1) |
| 446 | tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n", |
| 447 | header->device_rom_revision); |
| 448 | |
| 449 | return tb_drom_parse_entries(sw); |
| 450 | err: |
| 451 | kfree(sw->drom); |
Andreas Noever | 2ffa9a5 | 2016-04-10 12:48:27 +0200 | [diff] [blame] | 452 | sw->drom = NULL; |
Andreas Noever | cd22e73 | 2014-06-12 23:11:46 +0200 | [diff] [blame] | 453 | return -EIO; |
| 454 | |
| 455 | } |