1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt driver - eeprom access
4 *
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/property.h>
12 #include <linux/slab.h>
13 #include "tb.h"
14
15 /*
16 * tb_eeprom_ctl_write() - write control word
17 */
tb_eeprom_ctl_write(struct tb_switch * sw,struct tb_eeprom_ctl * ctl)18 static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
19 {
20 return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1);
21 }
22
23 /*
24 * tb_eeprom_ctl_write() - read control word
25 */
tb_eeprom_ctl_read(struct tb_switch * sw,struct tb_eeprom_ctl * ctl)26 static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
27 {
28 return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1);
29 }
30
31 enum tb_eeprom_transfer {
32 TB_EEPROM_IN,
33 TB_EEPROM_OUT,
34 };
35
36 /*
37 * tb_eeprom_active - enable rom access
38 *
39 * WARNING: Always disable access after usage. Otherwise the controller will
40 * fail to reprobe.
41 */
tb_eeprom_active(struct tb_switch * sw,bool enable)42 static int tb_eeprom_active(struct tb_switch *sw, bool enable)
43 {
44 struct tb_eeprom_ctl ctl;
45 int res = tb_eeprom_ctl_read(sw, &ctl);
46 if (res)
47 return res;
48 if (enable) {
49 ctl.bit_banging_enable = 1;
50 res = tb_eeprom_ctl_write(sw, &ctl);
51 if (res)
52 return res;
53 ctl.fl_cs = 0;
54 return tb_eeprom_ctl_write(sw, &ctl);
55 } else {
56 ctl.fl_cs = 1;
57 res = tb_eeprom_ctl_write(sw, &ctl);
58 if (res)
59 return res;
60 ctl.bit_banging_enable = 0;
61 return tb_eeprom_ctl_write(sw, &ctl);
62 }
63 }
64
65 /*
66 * tb_eeprom_transfer - transfer one bit
67 *
68 * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->fl_do.
69 * If TB_EEPROM_OUT is passed, then ctl->fl_di will be written.
70 */
tb_eeprom_transfer(struct tb_switch * sw,struct tb_eeprom_ctl * ctl,enum tb_eeprom_transfer direction)71 static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
72 enum tb_eeprom_transfer direction)
73 {
74 int res;
75 if (direction == TB_EEPROM_OUT) {
76 res = tb_eeprom_ctl_write(sw, ctl);
77 if (res)
78 return res;
79 }
80 ctl->fl_sk = 1;
81 res = tb_eeprom_ctl_write(sw, ctl);
82 if (res)
83 return res;
84 if (direction == TB_EEPROM_IN) {
85 res = tb_eeprom_ctl_read(sw, ctl);
86 if (res)
87 return res;
88 }
89 ctl->fl_sk = 0;
90 return tb_eeprom_ctl_write(sw, ctl);
91 }
92
93 /*
94 * tb_eeprom_out - write one byte to the bus
95 */
tb_eeprom_out(struct tb_switch * sw,u8 val)96 static int tb_eeprom_out(struct tb_switch *sw, u8 val)
97 {
98 struct tb_eeprom_ctl ctl;
99 int i;
100 int res = tb_eeprom_ctl_read(sw, &ctl);
101 if (res)
102 return res;
103 for (i = 0; i < 8; i++) {
104 ctl.fl_di = val & 0x80;
105 res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
106 if (res)
107 return res;
108 val <<= 1;
109 }
110 return 0;
111 }
112
113 /*
114 * tb_eeprom_in - read one byte from the bus
115 */
tb_eeprom_in(struct tb_switch * sw,u8 * val)116 static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
117 {
118 struct tb_eeprom_ctl ctl;
119 int i;
120 int res = tb_eeprom_ctl_read(sw, &ctl);
121 if (res)
122 return res;
123 *val = 0;
124 for (i = 0; i < 8; i++) {
125 *val <<= 1;
126 res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
127 if (res)
128 return res;
129 *val |= ctl.fl_do;
130 }
131 return 0;
132 }
133
134 /*
135 * tb_eeprom_get_drom_offset - get drom offset within eeprom
136 */
tb_eeprom_get_drom_offset(struct tb_switch * sw,u16 * offset)137 static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
138 {
139 struct tb_cap_plug_events cap;
140 int res;
141
142 if (!sw->cap_plug_events) {
143 tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
144 return -ENODEV;
145 }
146 res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
147 sizeof(cap) / 4);
148 if (res)
149 return res;
150
151 if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
152 tb_sw_warn(sw, "no NVM\n");
153 return -ENODEV;
154 }
155
156 if (cap.drom_offset > 0xffff) {
157 tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
158 cap.drom_offset);
159 return -ENXIO;
160 }
161 *offset = cap.drom_offset;
162 return 0;
163 }
164
165 /*
166 * tb_eeprom_read_n - read count bytes from offset into val
167 */
tb_eeprom_read_n(struct tb_switch * sw,u16 offset,u8 * val,size_t count)168 static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
169 size_t count)
170 {
171 u16 drom_offset;
172 int i, res;
173
174 res = tb_eeprom_get_drom_offset(sw, &drom_offset);
175 if (res)
176 return res;
177
178 offset += drom_offset;
179
180 res = tb_eeprom_active(sw, true);
181 if (res)
182 return res;
183 res = tb_eeprom_out(sw, 3);
184 if (res)
185 return res;
186 res = tb_eeprom_out(sw, offset >> 8);
187 if (res)
188 return res;
189 res = tb_eeprom_out(sw, offset);
190 if (res)
191 return res;
192 for (i = 0; i < count; i++) {
193 res = tb_eeprom_in(sw, val + i);
194 if (res)
195 return res;
196 }
197 return tb_eeprom_active(sw, false);
198 }
199
tb_crc8(u8 * data,int len)200 static u8 tb_crc8(u8 *data, int len)
201 {
202 int i, j;
203 u8 val = 0xff;
204 for (i = 0; i < len; i++) {
205 val ^= data[i];
206 for (j = 0; j < 8; j++)
207 val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
208 }
209 return val;
210 }
211
tb_crc32(void * data,size_t len)212 static u32 tb_crc32(void *data, size_t len)
213 {
214 return ~__crc32c_le(~0, data, len);
215 }
216
217 #define TB_DROM_DATA_START 13
218 #define TB_DROM_HEADER_SIZE 22
219 #define USB4_DROM_HEADER_SIZE 16
220
221 struct tb_drom_header {
222 /* BYTE 0 */
223 u8 uid_crc8; /* checksum for uid */
224 /* BYTES 1-8 */
225 u64 uid;
226 /* BYTES 9-12 */
227 u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
228 /* BYTE 13 */
229 u8 device_rom_revision; /* should be <= 1 */
230 u16 data_len:12;
231 u8 reserved:4;
232 /* BYTES 16-21 - Only for TBT DROM, nonexistent in USB4 DROM */
233 u16 vendor_id;
234 u16 model_id;
235 u8 model_rev;
236 u8 eeprom_rev;
237 } __packed;
238
239 enum tb_drom_entry_type {
240 /* force unsigned to prevent "one-bit signed bitfield" warning */
241 TB_DROM_ENTRY_GENERIC = 0U,
242 TB_DROM_ENTRY_PORT,
243 };
244
245 struct tb_drom_entry_header {
246 u8 len;
247 u8 index:6;
248 bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
249 enum tb_drom_entry_type type:1;
250 } __packed;
251
252 struct tb_drom_entry_generic {
253 struct tb_drom_entry_header header;
254 u8 data[];
255 } __packed;
256
257 struct tb_drom_entry_port {
258 /* BYTES 0-1 */
259 struct tb_drom_entry_header header;
260 /* BYTE 2 */
261 u8 dual_link_port_rid:4;
262 u8 link_nr:1;
263 u8 unknown1:2;
264 bool has_dual_link_port:1;
265
266 /* BYTE 3 */
267 u8 dual_link_port_nr:6;
268 u8 unknown2:2;
269
270 /* BYTES 4 - 5 TODO decode */
271 u8 micro2:4;
272 u8 micro1:4;
273 u8 micro3;
274
275 /* BYTES 6-7, TODO: verify (find hardware that has these set) */
276 u8 peer_port_rid:4;
277 u8 unknown3:3;
278 bool has_peer_port:1;
279 u8 peer_port_nr:6;
280 u8 unknown4:2;
281 } __packed;
282
283 /* USB4 product descriptor */
284 struct tb_drom_entry_desc {
285 struct tb_drom_entry_header header;
286 u16 bcdUSBSpec;
287 u16 idVendor;
288 u16 idProduct;
289 u16 bcdProductFWRevision;
290 u32 TID;
291 u8 productHWRevision;
292 };
293
294 /**
295 * tb_drom_read_uid_only() - Read UID directly from DROM
296 * @sw: Router whose UID to read
297 * @uid: UID is placed here
298 *
299 * Does not use the cached copy in sw->drom. Used during resume to check switch
300 * identity.
301 */
tb_drom_read_uid_only(struct tb_switch * sw,u64 * uid)302 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
303 {
304 u8 data[9];
305 u8 crc;
306 int res;
307
308 /* read uid */
309 res = tb_eeprom_read_n(sw, 0, data, 9);
310 if (res)
311 return res;
312
313 crc = tb_crc8(data + 1, 8);
314 if (crc != data[0]) {
315 tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
316 data[0], crc);
317 return -EIO;
318 }
319
320 *uid = *(u64 *)(data+1);
321 return 0;
322 }
323
tb_drom_parse_entry_generic(struct tb_switch * sw,struct tb_drom_entry_header * header)324 static int tb_drom_parse_entry_generic(struct tb_switch *sw,
325 struct tb_drom_entry_header *header)
326 {
327 const struct tb_drom_entry_generic *entry =
328 (const struct tb_drom_entry_generic *)header;
329
330 switch (header->index) {
331 case 1:
332 /* Length includes 2 bytes header so remove it before copy */
333 sw->vendor_name = kstrndup(entry->data,
334 header->len - sizeof(*header), GFP_KERNEL);
335 if (!sw->vendor_name)
336 return -ENOMEM;
337 break;
338
339 case 2:
340 sw->device_name = kstrndup(entry->data,
341 header->len - sizeof(*header), GFP_KERNEL);
342 if (!sw->device_name)
343 return -ENOMEM;
344 break;
345 case 9: {
346 const struct tb_drom_entry_desc *desc =
347 (const struct tb_drom_entry_desc *)entry;
348
349 if (!sw->vendor && !sw->device) {
350 sw->vendor = desc->idVendor;
351 sw->device = desc->idProduct;
352 }
353 break;
354 }
355 }
356
357 return 0;
358 }
359
tb_drom_parse_entry_port(struct tb_switch * sw,struct tb_drom_entry_header * header)360 static int tb_drom_parse_entry_port(struct tb_switch *sw,
361 struct tb_drom_entry_header *header)
362 {
363 struct tb_port *port;
364 int res;
365 enum tb_port_type type;
366
367 /*
368 * Some DROMs list more ports than the controller actually has
369 * so we skip those but allow the parser to continue.
370 */
371 if (header->index > sw->config.max_port_number) {
372 dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
373 return 0;
374 }
375
376 port = &sw->ports[header->index];
377 port->disabled = header->port_disabled;
378 if (port->disabled)
379 return 0;
380
381 res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
382 if (res)
383 return res;
384 type &= 0xffffff;
385
386 if (type == TB_TYPE_PORT) {
387 struct tb_drom_entry_port *entry = (void *) header;
388 if (header->len != sizeof(*entry)) {
389 tb_sw_warn(sw,
390 "port entry has size %#x (expected %#zx)\n",
391 header->len, sizeof(struct tb_drom_entry_port));
392 return -EIO;
393 }
394 port->link_nr = entry->link_nr;
395 if (entry->has_dual_link_port)
396 port->dual_link_port =
397 &port->sw->ports[entry->dual_link_port_nr];
398 }
399 return 0;
400 }
401
402 /*
403 * tb_drom_parse_entries - parse the linked list of drom entries
404 *
405 * Drom must have been copied to sw->drom.
406 */
tb_drom_parse_entries(struct tb_switch * sw,size_t header_size)407 static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size)
408 {
409 struct tb_drom_header *header = (void *) sw->drom;
410 u16 pos = header_size;
411 u16 drom_size = header->data_len + TB_DROM_DATA_START;
412 int res;
413
414 while (pos < drom_size) {
415 struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
416 if (pos + 1 == drom_size || pos + entry->len > drom_size
417 || !entry->len) {
418 tb_sw_warn(sw, "DROM buffer overrun\n");
419 return -EILSEQ;
420 }
421
422 switch (entry->type) {
423 case TB_DROM_ENTRY_GENERIC:
424 res = tb_drom_parse_entry_generic(sw, entry);
425 break;
426 case TB_DROM_ENTRY_PORT:
427 res = tb_drom_parse_entry_port(sw, entry);
428 break;
429 }
430 if (res)
431 return res;
432
433 pos += entry->len;
434 }
435 return 0;
436 }
437
438 /*
439 * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
440 */
tb_drom_copy_efi(struct tb_switch * sw,u16 * size)441 static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
442 {
443 struct device *dev = &sw->tb->nhi->pdev->dev;
444 int len, res;
445
446 len = device_property_count_u8(dev, "ThunderboltDROM");
447 if (len < 0 || len < sizeof(struct tb_drom_header))
448 return -EINVAL;
449
450 sw->drom = kmalloc(len, GFP_KERNEL);
451 if (!sw->drom)
452 return -ENOMEM;
453
454 res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
455 len);
456 if (res)
457 goto err;
458
459 *size = ((struct tb_drom_header *)sw->drom)->data_len +
460 TB_DROM_DATA_START;
461 if (*size > len)
462 goto err;
463
464 return 0;
465
466 err:
467 kfree(sw->drom);
468 sw->drom = NULL;
469 return -EINVAL;
470 }
471
tb_drom_copy_nvm(struct tb_switch * sw,u16 * size)472 static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
473 {
474 u32 drom_offset;
475 int ret;
476
477 if (!sw->dma_port)
478 return -ENODEV;
479
480 ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
481 sw->cap_plug_events + 12, 1);
482 if (ret)
483 return ret;
484
485 if (!drom_offset)
486 return -ENODEV;
487
488 ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
489 sizeof(*size));
490 if (ret)
491 return ret;
492
493 /* Size includes CRC8 + UID + CRC32 */
494 *size += 1 + 8 + 4;
495 sw->drom = kzalloc(*size, GFP_KERNEL);
496 if (!sw->drom)
497 return -ENOMEM;
498
499 ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
500 if (ret)
501 goto err_free;
502
503 /*
504 * Read UID from the minimal DROM because the one in NVM is just
505 * a placeholder.
506 */
507 tb_drom_read_uid_only(sw, &sw->uid);
508 return 0;
509
510 err_free:
511 kfree(sw->drom);
512 sw->drom = NULL;
513 return ret;
514 }
515
usb4_copy_host_drom(struct tb_switch * sw,u16 * size)516 static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size)
517 {
518 int ret;
519
520 ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
521 if (ret)
522 return ret;
523
524 /* Size includes CRC8 + UID + CRC32 */
525 *size += 1 + 8 + 4;
526 sw->drom = kzalloc(*size, GFP_KERNEL);
527 if (!sw->drom)
528 return -ENOMEM;
529
530 ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
531 if (ret) {
532 kfree(sw->drom);
533 sw->drom = NULL;
534 }
535
536 return ret;
537 }
538
tb_drom_read_n(struct tb_switch * sw,u16 offset,u8 * val,size_t count)539 static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
540 size_t count)
541 {
542 if (tb_switch_is_usb4(sw))
543 return usb4_switch_drom_read(sw, offset, val, count);
544 return tb_eeprom_read_n(sw, offset, val, count);
545 }
546
tb_drom_parse(struct tb_switch * sw)547 static int tb_drom_parse(struct tb_switch *sw)
548 {
549 const struct tb_drom_header *header =
550 (const struct tb_drom_header *)sw->drom;
551 u32 crc;
552
553 crc = tb_crc8((u8 *) &header->uid, 8);
554 if (crc != header->uid_crc8) {
555 tb_sw_warn(sw,
556 "DROM UID CRC8 mismatch (expected: %#x, got: %#x)\n",
557 header->uid_crc8, crc);
558 return -EILSEQ;
559 }
560 if (!sw->uid)
561 sw->uid = header->uid;
562 sw->vendor = header->vendor_id;
563 sw->device = header->model_id;
564
565 crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
566 if (crc != header->data_crc32) {
567 tb_sw_warn(sw,
568 "DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
569 header->data_crc32, crc);
570 }
571
572 return tb_drom_parse_entries(sw, TB_DROM_HEADER_SIZE);
573 }
574
usb4_drom_parse(struct tb_switch * sw)575 static int usb4_drom_parse(struct tb_switch *sw)
576 {
577 const struct tb_drom_header *header =
578 (const struct tb_drom_header *)sw->drom;
579 u32 crc;
580
581 crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
582 if (crc != header->data_crc32) {
583 tb_sw_warn(sw,
584 "DROM data CRC32 mismatch (expected: %#x, got: %#x), aborting\n",
585 header->data_crc32, crc);
586 return -EINVAL;
587 }
588
589 return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE);
590 }
591
592 /**
593 * tb_drom_read() - Copy DROM to sw->drom and parse it
594 * @sw: Router whose DROM to read and parse
595 *
596 * This function reads router DROM and if successful parses the entries and
597 * populates the fields in @sw accordingly. Can be called for any router
598 * generation.
599 *
600 * Returns %0 in case of success and negative errno otherwise.
601 */
tb_drom_read(struct tb_switch * sw)602 int tb_drom_read(struct tb_switch *sw)
603 {
604 u16 size;
605 struct tb_drom_header *header;
606 int res, retries = 1;
607
608 if (sw->drom)
609 return 0;
610
611 if (tb_route(sw) == 0) {
612 /*
613 * Apple's NHI EFI driver supplies a DROM for the root switch
614 * in a device property. Use it if available.
615 */
616 if (tb_drom_copy_efi(sw, &size) == 0)
617 goto parse;
618
619 /* Non-Apple hardware has the DROM as part of NVM */
620 if (tb_drom_copy_nvm(sw, &size) == 0)
621 goto parse;
622
623 /*
624 * USB4 hosts may support reading DROM through router
625 * operations.
626 */
627 if (tb_switch_is_usb4(sw)) {
628 usb4_switch_read_uid(sw, &sw->uid);
629 if (!usb4_copy_host_drom(sw, &size))
630 goto parse;
631 } else {
632 /*
633 * The root switch contains only a dummy drom
634 * (header only, no entries). Hardcode the
635 * configuration here.
636 */
637 tb_drom_read_uid_only(sw, &sw->uid);
638 }
639
640 return 0;
641 }
642
643 res = tb_drom_read_n(sw, 14, (u8 *) &size, 2);
644 if (res)
645 return res;
646 size &= 0x3ff;
647 size += TB_DROM_DATA_START;
648 tb_sw_dbg(sw, "reading drom (length: %#x)\n", size);
649 if (size < sizeof(*header)) {
650 tb_sw_warn(sw, "drom too small, aborting\n");
651 return -EIO;
652 }
653
654 sw->drom = kzalloc(size, GFP_KERNEL);
655 if (!sw->drom)
656 return -ENOMEM;
657 read:
658 res = tb_drom_read_n(sw, 0, sw->drom, size);
659 if (res)
660 goto err;
661
662 parse:
663 header = (void *) sw->drom;
664
665 if (header->data_len + TB_DROM_DATA_START != size) {
666 tb_sw_warn(sw, "drom size mismatch\n");
667 if (retries--) {
668 msleep(100);
669 goto read;
670 }
671 goto err;
672 }
673
674 tb_sw_dbg(sw, "DROM version: %d\n", header->device_rom_revision);
675
676 switch (header->device_rom_revision) {
677 case 3:
678 res = usb4_drom_parse(sw);
679 break;
680 default:
681 tb_sw_warn(sw, "DROM device_rom_revision %#x unknown\n",
682 header->device_rom_revision);
683 fallthrough;
684 case 1:
685 res = tb_drom_parse(sw);
686 break;
687 }
688
689 /* If the DROM parsing fails, wait a moment and retry once */
690 if (res == -EILSEQ && retries--) {
691 tb_sw_warn(sw, "parsing DROM failed\n");
692 msleep(100);
693 goto read;
694 }
695
696 if (!res)
697 return 0;
698
699 err:
700 kfree(sw->drom);
701 sw->drom = NULL;
702 return -EIO;
703 }
704