core-topology.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Incremental bus scan, based on bus topology
  4. *
  5. * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
  6. */
  7. #include <linux/bug.h>
  8. #include <linux/errno.h>
  9. #include <linux/firewire.h>
  10. #include <linux/firewire-constants.h>
  11. #include <linux/jiffies.h>
  12. #include <linux/kernel.h>
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/atomic.h>
  18. #include <asm/byteorder.h>
  19. #include "core.h"
  20. #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
  21. #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
  22. #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
  23. #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
  24. #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
  25. #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
  26. #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
  27. #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
  28. #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
  29. #define SELFID_PORT_CHILD 0x3
  30. #define SELFID_PORT_PARENT 0x2
  31. #define SELFID_PORT_NCONN 0x1
  32. #define SELFID_PORT_NONE 0x0
  33. static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
  34. {
  35. u32 q;
  36. int port_type, shift, seq;
  37. *total_port_count = 0;
  38. *child_port_count = 0;
  39. shift = 6;
  40. q = *sid;
  41. seq = 0;
  42. while (1) {
  43. port_type = (q >> shift) & 0x03;
  44. switch (port_type) {
  45. case SELFID_PORT_CHILD:
  46. (*child_port_count)++;
  47. fallthrough;
  48. case SELFID_PORT_PARENT:
  49. case SELFID_PORT_NCONN:
  50. (*total_port_count)++;
  51. case SELFID_PORT_NONE:
  52. break;
  53. }
  54. shift -= 2;
  55. if (shift == 0) {
  56. if (!SELF_ID_MORE_PACKETS(q))
  57. return sid + 1;
  58. shift = 16;
  59. sid++;
  60. q = *sid;
  61. /*
  62. * Check that the extra packets actually are
  63. * extended self ID packets and that the
  64. * sequence numbers in the extended self ID
  65. * packets increase as expected.
  66. */
  67. if (!SELF_ID_EXTENDED(q) ||
  68. seq != SELF_ID_EXT_SEQUENCE(q))
  69. return NULL;
  70. seq++;
  71. }
  72. }
  73. }
  74. static int get_port_type(u32 *sid, int port_index)
  75. {
  76. int index, shift;
  77. index = (port_index + 5) / 8;
  78. shift = 16 - ((port_index + 5) & 7) * 2;
  79. return (sid[index] >> shift) & 0x03;
  80. }
  81. static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
  82. {
  83. struct fw_node *node;
  84. node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
  85. if (node == NULL)
  86. return NULL;
  87. node->color = color;
  88. node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
  89. node->link_on = SELF_ID_LINK_ON(sid);
  90. node->phy_speed = SELF_ID_PHY_SPEED(sid);
  91. node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
  92. node->port_count = port_count;
  93. refcount_set(&node->ref_count, 1);
  94. INIT_LIST_HEAD(&node->link);
  95. return node;
  96. }
  97. /*
  98. * Compute the maximum hop count for this node and it's children. The
  99. * maximum hop count is the maximum number of connections between any
  100. * two nodes in the subtree rooted at this node. We need this for
  101. * setting the gap count. As we build the tree bottom up in
  102. * build_tree() below, this is fairly easy to do: for each node we
  103. * maintain the max hop count and the max depth, ie the number of hops
  104. * to the furthest leaf. Computing the max hop count breaks down into
  105. * two cases: either the path goes through this node, in which case
  106. * the hop count is the sum of the two biggest child depths plus 2.
  107. * Or it could be the case that the max hop path is entirely
  108. * containted in a child tree, in which case the max hop count is just
  109. * the max hop count of this child.
  110. */
  111. static void update_hop_count(struct fw_node *node)
  112. {
  113. int depths[2] = { -1, -1 };
  114. int max_child_hops = 0;
  115. int i;
  116. for (i = 0; i < node->port_count; i++) {
  117. if (node->ports[i] == NULL)
  118. continue;
  119. if (node->ports[i]->max_hops > max_child_hops)
  120. max_child_hops = node->ports[i]->max_hops;
  121. if (node->ports[i]->max_depth > depths[0]) {
  122. depths[1] = depths[0];
  123. depths[0] = node->ports[i]->max_depth;
  124. } else if (node->ports[i]->max_depth > depths[1])
  125. depths[1] = node->ports[i]->max_depth;
  126. }
  127. node->max_depth = depths[0] + 1;
  128. node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
  129. }
  130. static inline struct fw_node *fw_node(struct list_head *l)
  131. {
  132. return list_entry(l, struct fw_node, link);
  133. }
  134. /*
  135. * This function builds the tree representation of the topology given
  136. * by the self IDs from the latest bus reset. During the construction
  137. * of the tree, the function checks that the self IDs are valid and
  138. * internally consistent. On success this function returns the
  139. * fw_node corresponding to the local card otherwise NULL.
  140. */
  141. static struct fw_node *build_tree(struct fw_card *card,
  142. u32 *sid, int self_id_count)
  143. {
  144. struct fw_node *node, *child, *local_node, *irm_node;
  145. struct list_head stack, *h;
  146. u32 *next_sid, *end, q;
  147. int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
  148. int gap_count;
  149. bool beta_repeaters_present;
  150. local_node = NULL;
  151. node = NULL;
  152. INIT_LIST_HEAD(&stack);
  153. stack_depth = 0;
  154. end = sid + self_id_count;
  155. phy_id = 0;
  156. irm_node = NULL;
  157. gap_count = SELF_ID_GAP_COUNT(*sid);
  158. beta_repeaters_present = false;
  159. while (sid < end) {
  160. next_sid = count_ports(sid, &port_count, &child_port_count);
  161. if (next_sid == NULL) {
  162. fw_err(card, "inconsistent extended self IDs\n");
  163. return NULL;
  164. }
  165. q = *sid;
  166. if (phy_id != SELF_ID_PHY_ID(q)) {
  167. fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
  168. phy_id, SELF_ID_PHY_ID(q));
  169. return NULL;
  170. }
  171. if (child_port_count > stack_depth) {
  172. fw_err(card, "topology stack underflow\n");
  173. return NULL;
  174. }
  175. /*
  176. * Seek back from the top of our stack to find the
  177. * start of the child nodes for this node.
  178. */
  179. for (i = 0, h = &stack; i < child_port_count; i++)
  180. h = h->prev;
  181. /*
  182. * When the stack is empty, this yields an invalid value,
  183. * but that pointer will never be dereferenced.
  184. */
  185. child = fw_node(h);
  186. node = fw_node_create(q, port_count, card->color);
  187. if (node == NULL) {
  188. fw_err(card, "out of memory while building topology\n");
  189. return NULL;
  190. }
  191. if (phy_id == (card->node_id & 0x3f))
  192. local_node = node;
  193. if (SELF_ID_CONTENDER(q))
  194. irm_node = node;
  195. parent_count = 0;
  196. for (i = 0; i < port_count; i++) {
  197. switch (get_port_type(sid, i)) {
  198. case SELFID_PORT_PARENT:
  199. /*
  200. * Who's your daddy? We dont know the
  201. * parent node at this time, so we
  202. * temporarily abuse node->color for
  203. * remembering the entry in the
  204. * node->ports array where the parent
  205. * node should be. Later, when we
  206. * handle the parent node, we fix up
  207. * the reference.
  208. */
  209. parent_count++;
  210. node->color = i;
  211. break;
  212. case SELFID_PORT_CHILD:
  213. node->ports[i] = child;
  214. /*
  215. * Fix up parent reference for this
  216. * child node.
  217. */
  218. child->ports[child->color] = node;
  219. child->color = card->color;
  220. child = fw_node(child->link.next);
  221. break;
  222. }
  223. }
  224. /*
  225. * Check that the node reports exactly one parent
  226. * port, except for the root, which of course should
  227. * have no parents.
  228. */
  229. if ((next_sid == end && parent_count != 0) ||
  230. (next_sid < end && parent_count != 1)) {
  231. fw_err(card, "parent port inconsistency for node %d: "
  232. "parent_count=%d\n", phy_id, parent_count);
  233. return NULL;
  234. }
  235. /* Pop the child nodes off the stack and push the new node. */
  236. __list_del(h->prev, &stack);
  237. list_add_tail(&node->link, &stack);
  238. stack_depth += 1 - child_port_count;
  239. if (node->phy_speed == SCODE_BETA &&
  240. parent_count + child_port_count > 1)
  241. beta_repeaters_present = true;
  242. /*
  243. * If PHYs report different gap counts, set an invalid count
  244. * which will force a gap count reconfiguration and a reset.
  245. */
  246. if (SELF_ID_GAP_COUNT(q) != gap_count)
  247. gap_count = 0;
  248. update_hop_count(node);
  249. sid = next_sid;
  250. phy_id++;
  251. }
  252. card->root_node = node;
  253. card->irm_node = irm_node;
  254. card->gap_count = gap_count;
  255. card->beta_repeaters_present = beta_repeaters_present;
  256. return local_node;
  257. }
  258. typedef void (*fw_node_callback_t)(struct fw_card * card,
  259. struct fw_node * node,
  260. struct fw_node * parent);
  261. static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
  262. fw_node_callback_t callback)
  263. {
  264. struct list_head list;
  265. struct fw_node *node, *next, *child, *parent;
  266. int i;
  267. INIT_LIST_HEAD(&list);
  268. fw_node_get(root);
  269. list_add_tail(&root->link, &list);
  270. parent = NULL;
  271. list_for_each_entry(node, &list, link) {
  272. node->color = card->color;
  273. for (i = 0; i < node->port_count; i++) {
  274. child = node->ports[i];
  275. if (!child)
  276. continue;
  277. if (child->color == card->color)
  278. parent = child;
  279. else {
  280. fw_node_get(child);
  281. list_add_tail(&child->link, &list);
  282. }
  283. }
  284. callback(card, node, parent);
  285. }
  286. list_for_each_entry_safe(node, next, &list, link)
  287. fw_node_put(node);
  288. }
  289. static void report_lost_node(struct fw_card *card,
  290. struct fw_node *node, struct fw_node *parent)
  291. {
  292. fw_node_event(card, node, FW_NODE_DESTROYED);
  293. fw_node_put(node);
  294. /* Topology has changed - reset bus manager retry counter */
  295. card->bm_retries = 0;
  296. }
  297. static void report_found_node(struct fw_card *card,
  298. struct fw_node *node, struct fw_node *parent)
  299. {
  300. int b_path = (node->phy_speed == SCODE_BETA);
  301. if (parent != NULL) {
  302. /* min() macro doesn't work here with gcc 3.4 */
  303. node->max_speed = parent->max_speed < node->phy_speed ?
  304. parent->max_speed : node->phy_speed;
  305. node->b_path = parent->b_path && b_path;
  306. } else {
  307. node->max_speed = node->phy_speed;
  308. node->b_path = b_path;
  309. }
  310. fw_node_event(card, node, FW_NODE_CREATED);
  311. /* Topology has changed - reset bus manager retry counter */
  312. card->bm_retries = 0;
  313. }
  314. void fw_destroy_nodes(struct fw_card *card)
  315. {
  316. unsigned long flags;
  317. spin_lock_irqsave(&card->lock, flags);
  318. card->color++;
  319. if (card->local_node != NULL)
  320. for_each_fw_node(card, card->local_node, report_lost_node);
  321. card->local_node = NULL;
  322. spin_unlock_irqrestore(&card->lock, flags);
  323. }
  324. static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
  325. {
  326. struct fw_node *tree;
  327. int i;
  328. tree = node1->ports[port];
  329. node0->ports[port] = tree;
  330. for (i = 0; i < tree->port_count; i++) {
  331. if (tree->ports[i] == node1) {
  332. tree->ports[i] = node0;
  333. break;
  334. }
  335. }
  336. }
  337. /*
  338. * Compare the old topology tree for card with the new one specified by root.
  339. * Queue the nodes and mark them as either found, lost or updated.
  340. * Update the nodes in the card topology tree as we go.
  341. */
  342. static void update_tree(struct fw_card *card, struct fw_node *root)
  343. {
  344. struct list_head list0, list1;
  345. struct fw_node *node0, *node1, *next1;
  346. int i, event;
  347. INIT_LIST_HEAD(&list0);
  348. list_add_tail(&card->local_node->link, &list0);
  349. INIT_LIST_HEAD(&list1);
  350. list_add_tail(&root->link, &list1);
  351. node0 = fw_node(list0.next);
  352. node1 = fw_node(list1.next);
  353. while (&node0->link != &list0) {
  354. WARN_ON(node0->port_count != node1->port_count);
  355. if (node0->link_on && !node1->link_on)
  356. event = FW_NODE_LINK_OFF;
  357. else if (!node0->link_on && node1->link_on)
  358. event = FW_NODE_LINK_ON;
  359. else if (node1->initiated_reset && node1->link_on)
  360. event = FW_NODE_INITIATED_RESET;
  361. else
  362. event = FW_NODE_UPDATED;
  363. node0->node_id = node1->node_id;
  364. node0->color = card->color;
  365. node0->link_on = node1->link_on;
  366. node0->initiated_reset = node1->initiated_reset;
  367. node0->max_hops = node1->max_hops;
  368. node1->color = card->color;
  369. fw_node_event(card, node0, event);
  370. if (card->root_node == node1)
  371. card->root_node = node0;
  372. if (card->irm_node == node1)
  373. card->irm_node = node0;
  374. for (i = 0; i < node0->port_count; i++) {
  375. if (node0->ports[i] && node1->ports[i]) {
  376. /*
  377. * This port didn't change, queue the
  378. * connected node for further
  379. * investigation.
  380. */
  381. if (node0->ports[i]->color == card->color)
  382. continue;
  383. list_add_tail(&node0->ports[i]->link, &list0);
  384. list_add_tail(&node1->ports[i]->link, &list1);
  385. } else if (node0->ports[i]) {
  386. /*
  387. * The nodes connected here were
  388. * unplugged; unref the lost nodes and
  389. * queue FW_NODE_LOST callbacks for
  390. * them.
  391. */
  392. for_each_fw_node(card, node0->ports[i],
  393. report_lost_node);
  394. node0->ports[i] = NULL;
  395. } else if (node1->ports[i]) {
  396. /*
  397. * One or more node were connected to
  398. * this port. Move the new nodes into
  399. * the tree and queue FW_NODE_CREATED
  400. * callbacks for them.
  401. */
  402. move_tree(node0, node1, i);
  403. for_each_fw_node(card, node0->ports[i],
  404. report_found_node);
  405. }
  406. }
  407. node0 = fw_node(node0->link.next);
  408. next1 = fw_node(node1->link.next);
  409. fw_node_put(node1);
  410. node1 = next1;
  411. }
  412. }
  413. static void update_topology_map(struct fw_card *card,
  414. u32 *self_ids, int self_id_count)
  415. {
  416. int node_count = (card->root_node->node_id & 0x3f) + 1;
  417. __be32 *map = card->topology_map;
  418. *map++ = cpu_to_be32((self_id_count + 2) << 16);
  419. *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
  420. *map++ = cpu_to_be32((node_count << 16) | self_id_count);
  421. while (self_id_count--)
  422. *map++ = cpu_to_be32p(self_ids++);
  423. fw_compute_block_crc(card->topology_map);
  424. }
  425. void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
  426. int self_id_count, u32 *self_ids, bool bm_abdicate)
  427. {
  428. struct fw_node *local_node;
  429. unsigned long flags;
  430. /*
  431. * If the selfID buffer is not the immediate successor of the
  432. * previously processed one, we cannot reliably compare the
  433. * old and new topologies.
  434. */
  435. if (!is_next_generation(generation, card->generation) &&
  436. card->local_node != NULL) {
  437. fw_destroy_nodes(card);
  438. card->bm_retries = 0;
  439. }
  440. spin_lock_irqsave(&card->lock, flags);
  441. card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
  442. card->node_id = node_id;
  443. /*
  444. * Update node_id before generation to prevent anybody from using
  445. * a stale node_id together with a current generation.
  446. */
  447. smp_wmb();
  448. card->generation = generation;
  449. card->reset_jiffies = get_jiffies_64();
  450. card->bm_node_id = 0xffff;
  451. card->bm_abdicate = bm_abdicate;
  452. fw_schedule_bm_work(card, 0);
  453. local_node = build_tree(card, self_ids, self_id_count);
  454. update_topology_map(card, self_ids, self_id_count);
  455. card->color++;
  456. if (local_node == NULL) {
  457. fw_err(card, "topology build failed\n");
  458. /* FIXME: We need to issue a bus reset in this case. */
  459. } else if (card->local_node == NULL) {
  460. card->local_node = local_node;
  461. for_each_fw_node(card, local_node, report_found_node);
  462. } else {
  463. update_tree(card, local_node);
  464. }
  465. spin_unlock_irqrestore(&card->lock, flags);
  466. }
  467. EXPORT_SYMBOL(fw_core_handle_bus_reset);