1 /*
2 * Device probing and sysfs code.
3 *
4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21 #include <linux/bug.h>
22 #include <linux/ctype.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/errno.h>
26 #include <linux/firewire.h>
27 #include <linux/firewire-constants.h>
28 #include <linux/idr.h>
29 #include <linux/jiffies.h>
30 #include <linux/kobject.h>
31 #include <linux/list.h>
32 #include <linux/mod_devicetable.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/rwsem.h>
36 #include <linux/slab.h>
37 #include <linux/spinlock.h>
38 #include <linux/string.h>
39 #include <linux/workqueue.h>
40
41 #include <asm/atomic.h>
42 #include <asm/byteorder.h>
43 #include <asm/system.h>
44
45 #include "core.h"
46
fw_csr_iterator_init(struct fw_csr_iterator * ci,const u32 * p)47 void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
48 {
49 ci->p = p + 1;
50 ci->end = ci->p + (p[0] >> 16);
51 }
52 EXPORT_SYMBOL(fw_csr_iterator_init);
53
fw_csr_iterator_next(struct fw_csr_iterator * ci,int * key,int * value)54 int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
55 {
56 *key = *ci->p >> 24;
57 *value = *ci->p & 0xffffff;
58
59 return ci->p++ < ci->end;
60 }
61 EXPORT_SYMBOL(fw_csr_iterator_next);
62
search_leaf(const u32 * directory,int search_key)63 static const u32 *search_leaf(const u32 *directory, int search_key)
64 {
65 struct fw_csr_iterator ci;
66 int last_key = 0, key, value;
67
68 fw_csr_iterator_init(&ci, directory);
69 while (fw_csr_iterator_next(&ci, &key, &value)) {
70 if (last_key == search_key &&
71 key == (CSR_DESCRIPTOR | CSR_LEAF))
72 return ci.p - 1 + value;
73
74 last_key = key;
75 }
76
77 return NULL;
78 }
79
textual_leaf_to_string(const u32 * block,char * buf,size_t size)80 static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
81 {
82 unsigned int quadlets, i;
83 char c;
84
85 if (!size || !buf)
86 return -EINVAL;
87
88 quadlets = min(block[0] >> 16, 256U);
89 if (quadlets < 2)
90 return -ENODATA;
91
92 if (block[1] != 0 || block[2] != 0)
93 /* unknown language/character set */
94 return -ENODATA;
95
96 block += 3;
97 quadlets -= 2;
98 for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
99 c = block[i / 4] >> (24 - 8 * (i % 4));
100 if (c == '\0')
101 break;
102 buf[i] = c;
103 }
104 buf[i] = '\0';
105
106 return i;
107 }
108
109 /**
110 * fw_csr_string() - reads a string from the configuration ROM
111 * @directory: e.g. root directory or unit directory
112 * @key: the key of the preceding directory entry
113 * @buf: where to put the string
114 * @size: size of @buf, in bytes
115 *
116 * The string is taken from a minimal ASCII text descriptor leaf after
117 * the immediate entry with @key. The string is zero-terminated.
118 * Returns strlen(buf) or a negative error code.
119 */
fw_csr_string(const u32 * directory,int key,char * buf,size_t size)120 int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
121 {
122 const u32 *leaf = search_leaf(directory, key);
123 if (!leaf)
124 return -ENOENT;
125
126 return textual_leaf_to_string(leaf, buf, size);
127 }
128 EXPORT_SYMBOL(fw_csr_string);
129
get_ids(const u32 * directory,int * id)130 static void get_ids(const u32 *directory, int *id)
131 {
132 struct fw_csr_iterator ci;
133 int key, value;
134
135 fw_csr_iterator_init(&ci, directory);
136 while (fw_csr_iterator_next(&ci, &key, &value)) {
137 switch (key) {
138 case CSR_VENDOR: id[0] = value; break;
139 case CSR_MODEL: id[1] = value; break;
140 case CSR_SPECIFIER_ID: id[2] = value; break;
141 case CSR_VERSION: id[3] = value; break;
142 }
143 }
144 }
145
get_modalias_ids(struct fw_unit * unit,int * id)146 static void get_modalias_ids(struct fw_unit *unit, int *id)
147 {
148 get_ids(&fw_parent_device(unit)->config_rom[5], id);
149 get_ids(unit->directory, id);
150 }
151
match_ids(const struct ieee1394_device_id * id_table,int * id)152 static bool match_ids(const struct ieee1394_device_id *id_table, int *id)
153 {
154 int match = 0;
155
156 if (id[0] == id_table->vendor_id)
157 match |= IEEE1394_MATCH_VENDOR_ID;
158 if (id[1] == id_table->model_id)
159 match |= IEEE1394_MATCH_MODEL_ID;
160 if (id[2] == id_table->specifier_id)
161 match |= IEEE1394_MATCH_SPECIFIER_ID;
162 if (id[3] == id_table->version)
163 match |= IEEE1394_MATCH_VERSION;
164
165 return (match & id_table->match_flags) == id_table->match_flags;
166 }
167
168 static bool is_fw_unit(struct device *dev);
169
fw_unit_match(struct device * dev,struct device_driver * drv)170 static int fw_unit_match(struct device *dev, struct device_driver *drv)
171 {
172 const struct ieee1394_device_id *id_table =
173 container_of(drv, struct fw_driver, driver)->id_table;
174 int id[] = {0, 0, 0, 0};
175
176 /* We only allow binding to fw_units. */
177 if (!is_fw_unit(dev))
178 return 0;
179
180 get_modalias_ids(fw_unit(dev), id);
181
182 for (; id_table->match_flags != 0; id_table++)
183 if (match_ids(id_table, id))
184 return 1;
185
186 return 0;
187 }
188
get_modalias(struct fw_unit * unit,char * buffer,size_t buffer_size)189 static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
190 {
191 int id[] = {0, 0, 0, 0};
192
193 get_modalias_ids(unit, id);
194
195 return snprintf(buffer, buffer_size,
196 "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
197 id[0], id[1], id[2], id[3]);
198 }
199
fw_unit_uevent(struct device * dev,struct kobj_uevent_env * env)200 static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
201 {
202 struct fw_unit *unit = fw_unit(dev);
203 char modalias[64];
204
205 get_modalias(unit, modalias, sizeof(modalias));
206
207 if (add_uevent_var(env, "MODALIAS=%s", modalias))
208 return -ENOMEM;
209
210 return 0;
211 }
212
213 struct bus_type fw_bus_type = {
214 .name = "firewire",
215 .match = fw_unit_match,
216 };
217 EXPORT_SYMBOL(fw_bus_type);
218
fw_device_enable_phys_dma(struct fw_device * device)219 int fw_device_enable_phys_dma(struct fw_device *device)
220 {
221 int generation = device->generation;
222
223 /* device->node_id, accessed below, must not be older than generation */
224 smp_rmb();
225
226 return device->card->driver->enable_phys_dma(device->card,
227 device->node_id,
228 generation);
229 }
230 EXPORT_SYMBOL(fw_device_enable_phys_dma);
231
232 struct config_rom_attribute {
233 struct device_attribute attr;
234 u32 key;
235 };
236
show_immediate(struct device * dev,struct device_attribute * dattr,char * buf)237 static ssize_t show_immediate(struct device *dev,
238 struct device_attribute *dattr, char *buf)
239 {
240 struct config_rom_attribute *attr =
241 container_of(dattr, struct config_rom_attribute, attr);
242 struct fw_csr_iterator ci;
243 const u32 *dir;
244 int key, value, ret = -ENOENT;
245
246 down_read(&fw_device_rwsem);
247
248 if (is_fw_unit(dev))
249 dir = fw_unit(dev)->directory;
250 else
251 dir = fw_device(dev)->config_rom + 5;
252
253 fw_csr_iterator_init(&ci, dir);
254 while (fw_csr_iterator_next(&ci, &key, &value))
255 if (attr->key == key) {
256 ret = snprintf(buf, buf ? PAGE_SIZE : 0,
257 "0x%06x\n", value);
258 break;
259 }
260
261 up_read(&fw_device_rwsem);
262
263 return ret;
264 }
265
266 #define IMMEDIATE_ATTR(name, key) \
267 { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
268
show_text_leaf(struct device * dev,struct device_attribute * dattr,char * buf)269 static ssize_t show_text_leaf(struct device *dev,
270 struct device_attribute *dattr, char *buf)
271 {
272 struct config_rom_attribute *attr =
273 container_of(dattr, struct config_rom_attribute, attr);
274 const u32 *dir;
275 size_t bufsize;
276 char dummy_buf[2];
277 int ret;
278
279 down_read(&fw_device_rwsem);
280
281 if (is_fw_unit(dev))
282 dir = fw_unit(dev)->directory;
283 else
284 dir = fw_device(dev)->config_rom + 5;
285
286 if (buf) {
287 bufsize = PAGE_SIZE - 1;
288 } else {
289 buf = dummy_buf;
290 bufsize = 1;
291 }
292
293 ret = fw_csr_string(dir, attr->key, buf, bufsize);
294
295 if (ret >= 0) {
296 /* Strip trailing whitespace and add newline. */
297 while (ret > 0 && isspace(buf[ret - 1]))
298 ret--;
299 strcpy(buf + ret, "\n");
300 ret++;
301 }
302
303 up_read(&fw_device_rwsem);
304
305 return ret;
306 }
307
308 #define TEXT_LEAF_ATTR(name, key) \
309 { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
310
311 static struct config_rom_attribute config_rom_attributes[] = {
312 IMMEDIATE_ATTR(vendor, CSR_VENDOR),
313 IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
314 IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
315 IMMEDIATE_ATTR(version, CSR_VERSION),
316 IMMEDIATE_ATTR(model, CSR_MODEL),
317 TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
318 TEXT_LEAF_ATTR(model_name, CSR_MODEL),
319 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
320 };
321
init_fw_attribute_group(struct device * dev,struct device_attribute * attrs,struct fw_attribute_group * group)322 static void init_fw_attribute_group(struct device *dev,
323 struct device_attribute *attrs,
324 struct fw_attribute_group *group)
325 {
326 struct device_attribute *attr;
327 int i, j;
328
329 for (j = 0; attrs[j].attr.name != NULL; j++)
330 group->attrs[j] = &attrs[j].attr;
331
332 for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
333 attr = &config_rom_attributes[i].attr;
334 if (attr->show(dev, attr, NULL) < 0)
335 continue;
336 group->attrs[j++] = &attr->attr;
337 }
338
339 group->attrs[j] = NULL;
340 group->groups[0] = &group->group;
341 group->groups[1] = NULL;
342 group->group.attrs = group->attrs;
343 dev->groups = (const struct attribute_group **) group->groups;
344 }
345
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)346 static ssize_t modalias_show(struct device *dev,
347 struct device_attribute *attr, char *buf)
348 {
349 struct fw_unit *unit = fw_unit(dev);
350 int length;
351
352 length = get_modalias(unit, buf, PAGE_SIZE);
353 strcpy(buf + length, "\n");
354
355 return length + 1;
356 }
357
rom_index_show(struct device * dev,struct device_attribute * attr,char * buf)358 static ssize_t rom_index_show(struct device *dev,
359 struct device_attribute *attr, char *buf)
360 {
361 struct fw_device *device = fw_device(dev->parent);
362 struct fw_unit *unit = fw_unit(dev);
363
364 return snprintf(buf, PAGE_SIZE, "%d\n",
365 (int)(unit->directory - device->config_rom));
366 }
367
368 static struct device_attribute fw_unit_attributes[] = {
369 __ATTR_RO(modalias),
370 __ATTR_RO(rom_index),
371 __ATTR_NULL,
372 };
373
config_rom_show(struct device * dev,struct device_attribute * attr,char * buf)374 static ssize_t config_rom_show(struct device *dev,
375 struct device_attribute *attr, char *buf)
376 {
377 struct fw_device *device = fw_device(dev);
378 size_t length;
379
380 down_read(&fw_device_rwsem);
381 length = device->config_rom_length * 4;
382 memcpy(buf, device->config_rom, length);
383 up_read(&fw_device_rwsem);
384
385 return length;
386 }
387
guid_show(struct device * dev,struct device_attribute * attr,char * buf)388 static ssize_t guid_show(struct device *dev,
389 struct device_attribute *attr, char *buf)
390 {
391 struct fw_device *device = fw_device(dev);
392 int ret;
393
394 down_read(&fw_device_rwsem);
395 ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n",
396 device->config_rom[3], device->config_rom[4]);
397 up_read(&fw_device_rwsem);
398
399 return ret;
400 }
401
units_sprintf(char * buf,const u32 * directory)402 static int units_sprintf(char *buf, const u32 *directory)
403 {
404 struct fw_csr_iterator ci;
405 int key, value;
406 int specifier_id = 0;
407 int version = 0;
408
409 fw_csr_iterator_init(&ci, directory);
410 while (fw_csr_iterator_next(&ci, &key, &value)) {
411 switch (key) {
412 case CSR_SPECIFIER_ID:
413 specifier_id = value;
414 break;
415 case CSR_VERSION:
416 version = value;
417 break;
418 }
419 }
420
421 return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
422 }
423
units_show(struct device * dev,struct device_attribute * attr,char * buf)424 static ssize_t units_show(struct device *dev,
425 struct device_attribute *attr, char *buf)
426 {
427 struct fw_device *device = fw_device(dev);
428 struct fw_csr_iterator ci;
429 int key, value, i = 0;
430
431 down_read(&fw_device_rwsem);
432 fw_csr_iterator_init(&ci, &device->config_rom[5]);
433 while (fw_csr_iterator_next(&ci, &key, &value)) {
434 if (key != (CSR_UNIT | CSR_DIRECTORY))
435 continue;
436 i += units_sprintf(&buf[i], ci.p + value - 1);
437 if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
438 break;
439 }
440 up_read(&fw_device_rwsem);
441
442 if (i)
443 buf[i - 1] = '\n';
444
445 return i;
446 }
447
448 static struct device_attribute fw_device_attributes[] = {
449 __ATTR_RO(config_rom),
450 __ATTR_RO(guid),
451 __ATTR_RO(units),
452 __ATTR_NULL,
453 };
454
read_rom(struct fw_device * device,int generation,int index,u32 * data)455 static int read_rom(struct fw_device *device,
456 int generation, int index, u32 *data)
457 {
458 int rcode;
459
460 /* device->node_id, accessed below, must not be older than generation */
461 smp_rmb();
462
463 rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST,
464 device->node_id, generation, device->max_speed,
465 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4,
466 data, 4);
467 be32_to_cpus(data);
468
469 return rcode;
470 }
471
472 #define MAX_CONFIG_ROM_SIZE 256
473
474 /*
475 * Read the bus info block, perform a speed probe, and read all of the rest of
476 * the config ROM. We do all this with a cached bus generation. If the bus
477 * generation changes under us, read_config_rom will fail and get retried.
478 * It's better to start all over in this case because the node from which we
479 * are reading the ROM may have changed the ROM during the reset.
480 */
read_config_rom(struct fw_device * device,int generation)481 static int read_config_rom(struct fw_device *device, int generation)
482 {
483 const u32 *old_rom, *new_rom;
484 u32 *rom, *stack;
485 u32 sp, key;
486 int i, end, length, ret = -1;
487
488 rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
489 sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
490 if (rom == NULL)
491 return -ENOMEM;
492
493 stack = &rom[MAX_CONFIG_ROM_SIZE];
494 memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
495
496 device->max_speed = SCODE_100;
497
498 /* First read the bus info block. */
499 for (i = 0; i < 5; i++) {
500 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
501 goto out;
502 /*
503 * As per IEEE1212 7.2, during power-up, devices can
504 * reply with a 0 for the first quadlet of the config
505 * rom to indicate that they are booting (for example,
506 * if the firmware is on the disk of a external
507 * harddisk). In that case we just fail, and the
508 * retry mechanism will try again later.
509 */
510 if (i == 0 && rom[i] == 0)
511 goto out;
512 }
513
514 device->max_speed = device->node->max_speed;
515
516 /*
517 * Determine the speed of
518 * - devices with link speed less than PHY speed,
519 * - devices with 1394b PHY (unless only connected to 1394a PHYs),
520 * - all devices if there are 1394b repeaters.
521 * Note, we cannot use the bus info block's link_spd as starting point
522 * because some buggy firmwares set it lower than necessary and because
523 * 1394-1995 nodes do not have the field.
524 */
525 if ((rom[2] & 0x7) < device->max_speed ||
526 device->max_speed == SCODE_BETA ||
527 device->card->beta_repeaters_present) {
528 u32 dummy;
529
530 /* for S1600 and S3200 */
531 if (device->max_speed == SCODE_BETA)
532 device->max_speed = device->card->link_speed;
533
534 while (device->max_speed > SCODE_100) {
535 if (read_rom(device, generation, 0, &dummy) ==
536 RCODE_COMPLETE)
537 break;
538 device->max_speed--;
539 }
540 }
541
542 /*
543 * Now parse the config rom. The config rom is a recursive
544 * directory structure so we parse it using a stack of
545 * references to the blocks that make up the structure. We
546 * push a reference to the root directory on the stack to
547 * start things off.
548 */
549 length = i;
550 sp = 0;
551 stack[sp++] = 0xc0000005;
552 while (sp > 0) {
553 /*
554 * Pop the next block reference of the stack. The
555 * lower 24 bits is the offset into the config rom,
556 * the upper 8 bits are the type of the reference the
557 * block.
558 */
559 key = stack[--sp];
560 i = key & 0xffffff;
561 if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
562 goto out;
563
564 /* Read header quadlet for the block to get the length. */
565 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
566 goto out;
567 end = i + (rom[i] >> 16) + 1;
568 if (end > MAX_CONFIG_ROM_SIZE) {
569 /*
570 * This block extends outside the config ROM which is
571 * a firmware bug. Ignore this whole block, i.e.
572 * simply set a fake block length of 0.
573 */
574 fw_error("skipped invalid ROM block %x at %llx\n",
575 rom[i],
576 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
577 rom[i] = 0;
578 end = i;
579 }
580 i++;
581
582 /*
583 * Now read in the block. If this is a directory
584 * block, check the entries as we read them to see if
585 * it references another block, and push it in that case.
586 */
587 for (; i < end; i++) {
588 if (read_rom(device, generation, i, &rom[i]) !=
589 RCODE_COMPLETE)
590 goto out;
591
592 if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
593 continue;
594 /*
595 * Offset points outside the ROM. May be a firmware
596 * bug or an Extended ROM entry (IEEE 1212-2001 clause
597 * 7.7.18). Simply overwrite this pointer here by a
598 * fake immediate entry so that later iterators over
599 * the ROM don't have to check offsets all the time.
600 */
601 if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
602 fw_error("skipped unsupported ROM entry %x at %llx\n",
603 rom[i],
604 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
605 rom[i] = 0;
606 continue;
607 }
608 stack[sp++] = i + rom[i];
609 }
610 if (length < i)
611 length = i;
612 }
613
614 old_rom = device->config_rom;
615 new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
616 if (new_rom == NULL)
617 goto out;
618
619 down_write(&fw_device_rwsem);
620 device->config_rom = new_rom;
621 device->config_rom_length = length;
622 up_write(&fw_device_rwsem);
623
624 kfree(old_rom);
625 ret = 0;
626 device->max_rec = rom[2] >> 12 & 0xf;
627 device->cmc = rom[2] >> 30 & 1;
628 device->irmc = rom[2] >> 31 & 1;
629 out:
630 kfree(rom);
631
632 return ret;
633 }
634
fw_unit_release(struct device * dev)635 static void fw_unit_release(struct device *dev)
636 {
637 struct fw_unit *unit = fw_unit(dev);
638
639 kfree(unit);
640 }
641
642 static struct device_type fw_unit_type = {
643 .uevent = fw_unit_uevent,
644 .release = fw_unit_release,
645 };
646
is_fw_unit(struct device * dev)647 static bool is_fw_unit(struct device *dev)
648 {
649 return dev->type == &fw_unit_type;
650 }
651
create_units(struct fw_device * device)652 static void create_units(struct fw_device *device)
653 {
654 struct fw_csr_iterator ci;
655 struct fw_unit *unit;
656 int key, value, i;
657
658 i = 0;
659 fw_csr_iterator_init(&ci, &device->config_rom[5]);
660 while (fw_csr_iterator_next(&ci, &key, &value)) {
661 if (key != (CSR_UNIT | CSR_DIRECTORY))
662 continue;
663
664 /*
665 * Get the address of the unit directory and try to
666 * match the drivers id_tables against it.
667 */
668 unit = kzalloc(sizeof(*unit), GFP_KERNEL);
669 if (unit == NULL) {
670 fw_error("failed to allocate memory for unit\n");
671 continue;
672 }
673
674 unit->directory = ci.p + value - 1;
675 unit->device.bus = &fw_bus_type;
676 unit->device.type = &fw_unit_type;
677 unit->device.parent = &device->device;
678 dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
679
680 BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
681 ARRAY_SIZE(fw_unit_attributes) +
682 ARRAY_SIZE(config_rom_attributes));
683 init_fw_attribute_group(&unit->device,
684 fw_unit_attributes,
685 &unit->attribute_group);
686
687 if (device_register(&unit->device) < 0)
688 goto skip_unit;
689
690 continue;
691
692 skip_unit:
693 kfree(unit);
694 }
695 }
696
shutdown_unit(struct device * device,void * data)697 static int shutdown_unit(struct device *device, void *data)
698 {
699 device_unregister(device);
700
701 return 0;
702 }
703
704 /*
705 * fw_device_rwsem acts as dual purpose mutex:
706 * - serializes accesses to fw_device_idr,
707 * - serializes accesses to fw_device.config_rom/.config_rom_length and
708 * fw_unit.directory, unless those accesses happen at safe occasions
709 */
710 DECLARE_RWSEM(fw_device_rwsem);
711
712 DEFINE_IDR(fw_device_idr);
713 int fw_cdev_major;
714
fw_device_get_by_devt(dev_t devt)715 struct fw_device *fw_device_get_by_devt(dev_t devt)
716 {
717 struct fw_device *device;
718
719 down_read(&fw_device_rwsem);
720 device = idr_find(&fw_device_idr, MINOR(devt));
721 if (device)
722 fw_device_get(device);
723 up_read(&fw_device_rwsem);
724
725 return device;
726 }
727
728 /*
729 * These defines control the retry behavior for reading the config
730 * rom. It shouldn't be necessary to tweak these; if the device
731 * doesn't respond to a config rom read within 10 seconds, it's not
732 * going to respond at all. As for the initial delay, a lot of
733 * devices will be able to respond within half a second after bus
734 * reset. On the other hand, it's not really worth being more
735 * aggressive than that, since it scales pretty well; if 10 devices
736 * are plugged in, they're all getting read within one second.
737 */
738
739 #define MAX_RETRIES 10
740 #define RETRY_DELAY (3 * HZ)
741 #define INITIAL_DELAY (HZ / 2)
742 #define SHUTDOWN_DELAY (2 * HZ)
743
fw_device_shutdown(struct work_struct * work)744 static void fw_device_shutdown(struct work_struct *work)
745 {
746 struct fw_device *device =
747 container_of(work, struct fw_device, work.work);
748 int minor = MINOR(device->device.devt);
749
750 if (time_before64(get_jiffies_64(),
751 device->card->reset_jiffies + SHUTDOWN_DELAY)
752 && !list_empty(&device->card->link)) {
753 schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
754 return;
755 }
756
757 if (atomic_cmpxchg(&device->state,
758 FW_DEVICE_GONE,
759 FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
760 return;
761
762 fw_device_cdev_remove(device);
763 device_for_each_child(&device->device, NULL, shutdown_unit);
764 device_unregister(&device->device);
765
766 down_write(&fw_device_rwsem);
767 idr_remove(&fw_device_idr, minor);
768 up_write(&fw_device_rwsem);
769
770 fw_device_put(device);
771 }
772
fw_device_release(struct device * dev)773 static void fw_device_release(struct device *dev)
774 {
775 struct fw_device *device = fw_device(dev);
776 struct fw_card *card = device->card;
777 unsigned long flags;
778
779 /*
780 * Take the card lock so we don't set this to NULL while a
781 * FW_NODE_UPDATED callback is being handled or while the
782 * bus manager work looks at this node.
783 */
784 spin_lock_irqsave(&card->lock, flags);
785 device->node->data = NULL;
786 spin_unlock_irqrestore(&card->lock, flags);
787
788 fw_node_put(device->node);
789 kfree(device->config_rom);
790 kfree(device);
791 fw_card_put(card);
792 }
793
794 static struct device_type fw_device_type = {
795 .release = fw_device_release,
796 };
797
is_fw_device(struct device * dev)798 static bool is_fw_device(struct device *dev)
799 {
800 return dev->type == &fw_device_type;
801 }
802
update_unit(struct device * dev,void * data)803 static int update_unit(struct device *dev, void *data)
804 {
805 struct fw_unit *unit = fw_unit(dev);
806 struct fw_driver *driver = (struct fw_driver *)dev->driver;
807
808 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
809 device_lock(dev);
810 driver->update(unit);
811 device_unlock(dev);
812 }
813
814 return 0;
815 }
816
fw_device_update(struct work_struct * work)817 static void fw_device_update(struct work_struct *work)
818 {
819 struct fw_device *device =
820 container_of(work, struct fw_device, work.work);
821
822 fw_device_cdev_update(device);
823 device_for_each_child(&device->device, NULL, update_unit);
824 }
825
826 /*
827 * If a device was pending for deletion because its node went away but its
828 * bus info block and root directory header matches that of a newly discovered
829 * device, revive the existing fw_device.
830 * The newly allocated fw_device becomes obsolete instead.
831 */
lookup_existing_device(struct device * dev,void * data)832 static int lookup_existing_device(struct device *dev, void *data)
833 {
834 struct fw_device *old = fw_device(dev);
835 struct fw_device *new = data;
836 struct fw_card *card = new->card;
837 int match = 0;
838
839 if (!is_fw_device(dev))
840 return 0;
841
842 down_read(&fw_device_rwsem); /* serialize config_rom access */
843 spin_lock_irq(&card->lock); /* serialize node access */
844
845 if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
846 atomic_cmpxchg(&old->state,
847 FW_DEVICE_GONE,
848 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
849 struct fw_node *current_node = new->node;
850 struct fw_node *obsolete_node = old->node;
851
852 new->node = obsolete_node;
853 new->node->data = new;
854 old->node = current_node;
855 old->node->data = old;
856
857 old->max_speed = new->max_speed;
858 old->node_id = current_node->node_id;
859 smp_wmb(); /* update node_id before generation */
860 old->generation = card->generation;
861 old->config_rom_retries = 0;
862 fw_notify("rediscovered device %s\n", dev_name(dev));
863
864 PREPARE_DELAYED_WORK(&old->work, fw_device_update);
865 schedule_delayed_work(&old->work, 0);
866
867 if (current_node == card->root_node)
868 fw_schedule_bm_work(card, 0);
869
870 match = 1;
871 }
872
873 spin_unlock_irq(&card->lock);
874 up_read(&fw_device_rwsem);
875
876 return match;
877 }
878
879 enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
880
set_broadcast_channel(struct fw_device * device,int generation)881 static void set_broadcast_channel(struct fw_device *device, int generation)
882 {
883 struct fw_card *card = device->card;
884 __be32 data;
885 int rcode;
886
887 if (!card->broadcast_channel_allocated)
888 return;
889
890 /*
891 * The Broadcast_Channel Valid bit is required by nodes which want to
892 * transmit on this channel. Such transmissions are practically
893 * exclusive to IP over 1394 (RFC 2734). IP capable nodes are required
894 * to be IRM capable and have a max_rec of 8 or more. We use this fact
895 * to narrow down to which nodes we send Broadcast_Channel updates.
896 */
897 if (!device->irmc || device->max_rec < 8)
898 return;
899
900 /*
901 * Some 1394-1995 nodes crash if this 1394a-2000 register is written.
902 * Perform a read test first.
903 */
904 if (device->bc_implemented == BC_UNKNOWN) {
905 rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
906 device->node_id, generation, device->max_speed,
907 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
908 &data, 4);
909 switch (rcode) {
910 case RCODE_COMPLETE:
911 if (data & cpu_to_be32(1 << 31)) {
912 device->bc_implemented = BC_IMPLEMENTED;
913 break;
914 }
915 /* else fall through to case address error */
916 case RCODE_ADDRESS_ERROR:
917 device->bc_implemented = BC_UNIMPLEMENTED;
918 }
919 }
920
921 if (device->bc_implemented == BC_IMPLEMENTED) {
922 data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
923 BROADCAST_CHANNEL_VALID);
924 fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
925 device->node_id, generation, device->max_speed,
926 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
927 &data, 4);
928 }
929 }
930
fw_device_set_broadcast_channel(struct device * dev,void * gen)931 int fw_device_set_broadcast_channel(struct device *dev, void *gen)
932 {
933 if (is_fw_device(dev))
934 set_broadcast_channel(fw_device(dev), (long)gen);
935
936 return 0;
937 }
938
fw_device_init(struct work_struct * work)939 static void fw_device_init(struct work_struct *work)
940 {
941 struct fw_device *device =
942 container_of(work, struct fw_device, work.work);
943 struct device *revived_dev;
944 int minor, ret;
945
946 /*
947 * All failure paths here set node->data to NULL, so that we
948 * don't try to do device_for_each_child() on a kfree()'d
949 * device.
950 */
951
952 if (read_config_rom(device, device->generation) < 0) {
953 if (device->config_rom_retries < MAX_RETRIES &&
954 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
955 device->config_rom_retries++;
956 schedule_delayed_work(&device->work, RETRY_DELAY);
957 } else {
958 if (device->node->link_on)
959 fw_notify("giving up on config rom for node id %x\n",
960 device->node_id);
961 if (device->node == device->card->root_node)
962 fw_schedule_bm_work(device->card, 0);
963 fw_device_release(&device->device);
964 }
965 return;
966 }
967
968 revived_dev = device_find_child(device->card->device,
969 device, lookup_existing_device);
970 if (revived_dev) {
971 put_device(revived_dev);
972 fw_device_release(&device->device);
973
974 return;
975 }
976
977 device_initialize(&device->device);
978
979 fw_device_get(device);
980 down_write(&fw_device_rwsem);
981 ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
982 idr_get_new(&fw_device_idr, device, &minor) :
983 -ENOMEM;
984 up_write(&fw_device_rwsem);
985
986 if (ret < 0)
987 goto error;
988
989 device->device.bus = &fw_bus_type;
990 device->device.type = &fw_device_type;
991 device->device.parent = device->card->device;
992 device->device.devt = MKDEV(fw_cdev_major, minor);
993 dev_set_name(&device->device, "fw%d", minor);
994
995 BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
996 ARRAY_SIZE(fw_device_attributes) +
997 ARRAY_SIZE(config_rom_attributes));
998 init_fw_attribute_group(&device->device,
999 fw_device_attributes,
1000 &device->attribute_group);
1001
1002 if (device_add(&device->device)) {
1003 fw_error("Failed to add device.\n");
1004 goto error_with_cdev;
1005 }
1006
1007 create_units(device);
1008
1009 /*
1010 * Transition the device to running state. If it got pulled
1011 * out from under us while we did the intialization work, we
1012 * have to shut down the device again here. Normally, though,
1013 * fw_node_event will be responsible for shutting it down when
1014 * necessary. We have to use the atomic cmpxchg here to avoid
1015 * racing with the FW_NODE_DESTROYED case in
1016 * fw_node_event().
1017 */
1018 if (atomic_cmpxchg(&device->state,
1019 FW_DEVICE_INITIALIZING,
1020 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
1021 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1022 schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
1023 } else {
1024 if (device->config_rom_retries)
1025 fw_notify("created device %s: GUID %08x%08x, S%d00, "
1026 "%d config ROM retries\n",
1027 dev_name(&device->device),
1028 device->config_rom[3], device->config_rom[4],
1029 1 << device->max_speed,
1030 device->config_rom_retries);
1031 else
1032 fw_notify("created device %s: GUID %08x%08x, S%d00\n",
1033 dev_name(&device->device),
1034 device->config_rom[3], device->config_rom[4],
1035 1 << device->max_speed);
1036 device->config_rom_retries = 0;
1037
1038 set_broadcast_channel(device, device->generation);
1039 }
1040
1041 /*
1042 * Reschedule the IRM work if we just finished reading the
1043 * root node config rom. If this races with a bus reset we
1044 * just end up running the IRM work a couple of extra times -
1045 * pretty harmless.
1046 */
1047 if (device->node == device->card->root_node)
1048 fw_schedule_bm_work(device->card, 0);
1049
1050 return;
1051
1052 error_with_cdev:
1053 down_write(&fw_device_rwsem);
1054 idr_remove(&fw_device_idr, minor);
1055 up_write(&fw_device_rwsem);
1056 error:
1057 fw_device_put(device); /* fw_device_idr's reference */
1058
1059 put_device(&device->device); /* our reference */
1060 }
1061
1062 enum {
1063 REREAD_BIB_ERROR,
1064 REREAD_BIB_GONE,
1065 REREAD_BIB_UNCHANGED,
1066 REREAD_BIB_CHANGED,
1067 };
1068
1069 /* Reread and compare bus info block and header of root directory */
reread_config_rom(struct fw_device * device,int generation)1070 static int reread_config_rom(struct fw_device *device, int generation)
1071 {
1072 u32 q;
1073 int i;
1074
1075 for (i = 0; i < 6; i++) {
1076 if (read_rom(device, generation, i, &q) != RCODE_COMPLETE)
1077 return REREAD_BIB_ERROR;
1078
1079 if (i == 0 && q == 0)
1080 return REREAD_BIB_GONE;
1081
1082 if (q != device->config_rom[i])
1083 return REREAD_BIB_CHANGED;
1084 }
1085
1086 return REREAD_BIB_UNCHANGED;
1087 }
1088
fw_device_refresh(struct work_struct * work)1089 static void fw_device_refresh(struct work_struct *work)
1090 {
1091 struct fw_device *device =
1092 container_of(work, struct fw_device, work.work);
1093 struct fw_card *card = device->card;
1094 int node_id = device->node_id;
1095
1096 switch (reread_config_rom(device, device->generation)) {
1097 case REREAD_BIB_ERROR:
1098 if (device->config_rom_retries < MAX_RETRIES / 2 &&
1099 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
1100 device->config_rom_retries++;
1101 schedule_delayed_work(&device->work, RETRY_DELAY / 2);
1102
1103 return;
1104 }
1105 goto give_up;
1106
1107 case REREAD_BIB_GONE:
1108 goto gone;
1109
1110 case REREAD_BIB_UNCHANGED:
1111 if (atomic_cmpxchg(&device->state,
1112 FW_DEVICE_INITIALIZING,
1113 FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
1114 goto gone;
1115
1116 fw_device_update(work);
1117 device->config_rom_retries = 0;
1118 goto out;
1119
1120 case REREAD_BIB_CHANGED:
1121 break;
1122 }
1123
1124 /*
1125 * Something changed. We keep things simple and don't investigate
1126 * further. We just destroy all previous units and create new ones.
1127 */
1128 device_for_each_child(&device->device, NULL, shutdown_unit);
1129
1130 if (read_config_rom(device, device->generation) < 0) {
1131 if (device->config_rom_retries < MAX_RETRIES &&
1132 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
1133 device->config_rom_retries++;
1134 schedule_delayed_work(&device->work, RETRY_DELAY);
1135
1136 return;
1137 }
1138 goto give_up;
1139 }
1140
1141 fw_device_cdev_update(device);
1142 create_units(device);
1143
1144 /* Userspace may want to re-read attributes. */
1145 kobject_uevent(&device->device.kobj, KOBJ_CHANGE);
1146
1147 if (atomic_cmpxchg(&device->state,
1148 FW_DEVICE_INITIALIZING,
1149 FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
1150 goto gone;
1151
1152 fw_notify("refreshed device %s\n", dev_name(&device->device));
1153 device->config_rom_retries = 0;
1154 goto out;
1155
1156 give_up:
1157 fw_notify("giving up on refresh of device %s\n", dev_name(&device->device));
1158 gone:
1159 atomic_set(&device->state, FW_DEVICE_GONE);
1160 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1161 schedule_delayed_work(&device->work, SHUTDOWN_DELAY);
1162 out:
1163 if (node_id == card->root_node->node_id)
1164 fw_schedule_bm_work(card, 0);
1165 }
1166
fw_node_event(struct fw_card * card,struct fw_node * node,int event)1167 void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
1168 {
1169 struct fw_device *device;
1170
1171 switch (event) {
1172 case FW_NODE_CREATED:
1173 /*
1174 * Attempt to scan the node, regardless whether its self ID has
1175 * the L (link active) flag set or not. Some broken devices
1176 * send L=0 but have an up-and-running link; others send L=1
1177 * without actually having a link.
1178 */
1179 create:
1180 device = kzalloc(sizeof(*device), GFP_ATOMIC);
1181 if (device == NULL)
1182 break;
1183
1184 /*
1185 * Do minimal intialization of the device here, the
1186 * rest will happen in fw_device_init().
1187 *
1188 * Attention: A lot of things, even fw_device_get(),
1189 * cannot be done before fw_device_init() finished!
1190 * You can basically just check device->state and
1191 * schedule work until then, but only while holding
1192 * card->lock.
1193 */
1194 atomic_set(&device->state, FW_DEVICE_INITIALIZING);
1195 device->card = fw_card_get(card);
1196 device->node = fw_node_get(node);
1197 device->node_id = node->node_id;
1198 device->generation = card->generation;
1199 device->is_local = node == card->local_node;
1200 mutex_init(&device->client_list_mutex);
1201 INIT_LIST_HEAD(&device->client_list);
1202
1203 /*
1204 * Set the node data to point back to this device so
1205 * FW_NODE_UPDATED callbacks can update the node_id
1206 * and generation for the device.
1207 */
1208 node->data = device;
1209
1210 /*
1211 * Many devices are slow to respond after bus resets,
1212 * especially if they are bus powered and go through
1213 * power-up after getting plugged in. We schedule the
1214 * first config rom scan half a second after bus reset.
1215 */
1216 INIT_DELAYED_WORK(&device->work, fw_device_init);
1217 schedule_delayed_work(&device->work, INITIAL_DELAY);
1218 break;
1219
1220 case FW_NODE_INITIATED_RESET:
1221 case FW_NODE_LINK_ON:
1222 device = node->data;
1223 if (device == NULL)
1224 goto create;
1225
1226 device->node_id = node->node_id;
1227 smp_wmb(); /* update node_id before generation */
1228 device->generation = card->generation;
1229 if (atomic_cmpxchg(&device->state,
1230 FW_DEVICE_RUNNING,
1231 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
1232 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
1233 schedule_delayed_work(&device->work,
1234 device->is_local ? 0 : INITIAL_DELAY);
1235 }
1236 break;
1237
1238 case FW_NODE_UPDATED:
1239 device = node->data;
1240 if (device == NULL)
1241 break;
1242
1243 device->node_id = node->node_id;
1244 smp_wmb(); /* update node_id before generation */
1245 device->generation = card->generation;
1246 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
1247 PREPARE_DELAYED_WORK(&device->work, fw_device_update);
1248 schedule_delayed_work(&device->work, 0);
1249 }
1250 break;
1251
1252 case FW_NODE_DESTROYED:
1253 case FW_NODE_LINK_OFF:
1254 if (!node->data)
1255 break;
1256
1257 /*
1258 * Destroy the device associated with the node. There
1259 * are two cases here: either the device is fully
1260 * initialized (FW_DEVICE_RUNNING) or we're in the
1261 * process of reading its config rom
1262 * (FW_DEVICE_INITIALIZING). If it is fully
1263 * initialized we can reuse device->work to schedule a
1264 * full fw_device_shutdown(). If not, there's work
1265 * scheduled to read it's config rom, and we just put
1266 * the device in shutdown state to have that code fail
1267 * to create the device.
1268 */
1269 device = node->data;
1270 if (atomic_xchg(&device->state,
1271 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
1272 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
1273 schedule_delayed_work(&device->work,
1274 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
1275 }
1276 break;
1277 }
1278 }
1279