bcast.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * net/tipc/bcast.c: TIPC broadcast code
  3. *
  4. * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
  5. * Copyright (c) 2004, Intel Corporation.
  6. * Copyright (c) 2005, 2010-2011, Wind River Systems
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions are met:
  11. *
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the names of the copyright holders nor the names of its
  18. * contributors may be used to endorse or promote products derived from
  19. * this software without specific prior written permission.
  20. *
  21. * Alternatively, this software may be distributed under the terms of the
  22. * GNU General Public License ("GPL") version 2 as published by the Free
  23. * Software Foundation.
  24. *
  25. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  26. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  27. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  28. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  29. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  32. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  33. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  34. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  35. * POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. #include <linux/tipc_config.h>
  38. #include "socket.h"
  39. #include "msg.h"
  40. #include "bcast.h"
  41. #include "link.h"
  42. #include "name_table.h"
  43. #define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
  44. #define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
  45. const char tipc_bclink_name[] = "broadcast-link";
  46. unsigned long sysctl_tipc_bc_retruni __read_mostly;
  47. /**
  48. * struct tipc_bc_base - base structure for keeping broadcast send state
  49. * @link: broadcast send link structure
  50. * @inputq: data input queue; will only carry SOCK_WAKEUP messages
  51. * @dests: array keeping number of reachable destinations per bearer
  52. * @primary_bearer: a bearer having links to all broadcast destinations, if any
  53. * @bcast_support: indicates if primary bearer, if any, supports broadcast
  54. * @force_bcast: forces broadcast for multicast traffic
  55. * @rcast_support: indicates if all peer nodes support replicast
  56. * @force_rcast: forces replicast for multicast traffic
  57. * @rc_ratio: dest count as percentage of cluster size where send method changes
  58. * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
  59. */
  60. struct tipc_bc_base {
  61. struct tipc_link *link;
  62. struct sk_buff_head inputq;
  63. int dests[MAX_BEARERS];
  64. int primary_bearer;
  65. bool bcast_support;
  66. bool force_bcast;
  67. bool rcast_support;
  68. bool force_rcast;
  69. int rc_ratio;
  70. int bc_threshold;
  71. };
  72. static struct tipc_bc_base *tipc_bc_base(struct net *net)
  73. {
  74. return tipc_net(net)->bcbase;
  75. }
  76. /* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
  77. * Note: the MTU is decremented to give room for a tunnel header, in
  78. * case the message needs to be sent as replicast
  79. */
  80. int tipc_bcast_get_mtu(struct net *net)
  81. {
  82. return tipc_link_mss(tipc_bc_sndlink(net));
  83. }
  84. void tipc_bcast_toggle_rcast(struct net *net, bool supp)
  85. {
  86. tipc_bc_base(net)->rcast_support = supp;
  87. }
  88. static void tipc_bcbase_calc_bc_threshold(struct net *net)
  89. {
  90. struct tipc_bc_base *bb = tipc_bc_base(net);
  91. int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
  92. bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
  93. }
  94. /* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
  95. * if any, and make it primary bearer
  96. */
  97. static void tipc_bcbase_select_primary(struct net *net)
  98. {
  99. struct tipc_bc_base *bb = tipc_bc_base(net);
  100. int all_dests = tipc_link_bc_peers(bb->link);
  101. int max_win = tipc_link_max_win(bb->link);
  102. int min_win = tipc_link_min_win(bb->link);
  103. int i, mtu, prim;
  104. bb->primary_bearer = INVALID_BEARER_ID;
  105. bb->bcast_support = true;
  106. if (!all_dests)
  107. return;
  108. for (i = 0; i < MAX_BEARERS; i++) {
  109. if (!bb->dests[i])
  110. continue;
  111. mtu = tipc_bearer_mtu(net, i);
  112. if (mtu < tipc_link_mtu(bb->link)) {
  113. tipc_link_set_mtu(bb->link, mtu);
  114. tipc_link_set_queue_limits(bb->link,
  115. min_win,
  116. max_win);
  117. }
  118. bb->bcast_support &= tipc_bearer_bcast_support(net, i);
  119. if (bb->dests[i] < all_dests)
  120. continue;
  121. bb->primary_bearer = i;
  122. /* Reduce risk that all nodes select same primary */
  123. if ((i ^ tipc_own_addr(net)) & 1)
  124. break;
  125. }
  126. prim = bb->primary_bearer;
  127. if (prim != INVALID_BEARER_ID)
  128. bb->bcast_support = tipc_bearer_bcast_support(net, prim);
  129. }
  130. void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
  131. {
  132. struct tipc_bc_base *bb = tipc_bc_base(net);
  133. tipc_bcast_lock(net);
  134. bb->dests[bearer_id]++;
  135. tipc_bcbase_select_primary(net);
  136. tipc_bcast_unlock(net);
  137. }
  138. void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
  139. {
  140. struct tipc_bc_base *bb = tipc_bc_base(net);
  141. tipc_bcast_lock(net);
  142. bb->dests[bearer_id]--;
  143. tipc_bcbase_select_primary(net);
  144. tipc_bcast_unlock(net);
  145. }
  146. /* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
  147. *
  148. * Note that number of reachable destinations, as indicated in the dests[]
  149. * array, may transitionally differ from the number of destinations indicated
  150. * in each sent buffer. We can sustain this. Excess destination nodes will
  151. * drop and never acknowledge the unexpected packets, and missing destinations
  152. * will either require retransmission (if they are just about to be added to
  153. * the bearer), or be removed from the buffer's 'ackers' counter (if they
  154. * just went down)
  155. */
  156. static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
  157. {
  158. int bearer_id;
  159. struct tipc_bc_base *bb = tipc_bc_base(net);
  160. struct sk_buff *skb, *_skb;
  161. struct sk_buff_head _xmitq;
  162. if (skb_queue_empty(xmitq))
  163. return;
  164. /* The typical case: at least one bearer has links to all nodes */
  165. bearer_id = bb->primary_bearer;
  166. if (bearer_id >= 0) {
  167. tipc_bearer_bc_xmit(net, bearer_id, xmitq);
  168. return;
  169. }
  170. /* We have to transmit across all bearers */
  171. __skb_queue_head_init(&_xmitq);
  172. for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
  173. if (!bb->dests[bearer_id])
  174. continue;
  175. skb_queue_walk(xmitq, skb) {
  176. _skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
  177. if (!_skb)
  178. break;
  179. __skb_queue_tail(&_xmitq, _skb);
  180. }
  181. tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
  182. }
  183. __skb_queue_purge(xmitq);
  184. __skb_queue_purge(&_xmitq);
  185. }
  186. static void tipc_bcast_select_xmit_method(struct net *net, int dests,
  187. struct tipc_mc_method *method)
  188. {
  189. struct tipc_bc_base *bb = tipc_bc_base(net);
  190. unsigned long exp = method->expires;
  191. /* Broadcast supported by used bearer/bearers? */
  192. if (!bb->bcast_support) {
  193. method->rcast = true;
  194. return;
  195. }
  196. /* Any destinations which don't support replicast ? */
  197. if (!bb->rcast_support) {
  198. method->rcast = false;
  199. return;
  200. }
  201. /* Can current method be changed ? */
  202. method->expires = jiffies + TIPC_METHOD_EXPIRE;
  203. if (method->mandatory)
  204. return;
  205. if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) &&
  206. time_before(jiffies, exp))
  207. return;
  208. /* Configuration as force 'broadcast' method */
  209. if (bb->force_bcast) {
  210. method->rcast = false;
  211. return;
  212. }
  213. /* Configuration as force 'replicast' method */
  214. if (bb->force_rcast) {
  215. method->rcast = true;
  216. return;
  217. }
  218. /* Configuration as 'autoselect' or default method */
  219. /* Determine method to use now */
  220. method->rcast = dests <= bb->bc_threshold;
  221. }
  222. /* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
  223. * @net: the applicable net namespace
  224. * @pkts: chain of buffers containing message
  225. * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
  226. * Consumes the buffer chain.
  227. * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
  228. */
  229. int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
  230. u16 *cong_link_cnt)
  231. {
  232. struct tipc_link *l = tipc_bc_sndlink(net);
  233. struct sk_buff_head xmitq;
  234. int rc = 0;
  235. __skb_queue_head_init(&xmitq);
  236. tipc_bcast_lock(net);
  237. if (tipc_link_bc_peers(l))
  238. rc = tipc_link_xmit(l, pkts, &xmitq);
  239. tipc_bcast_unlock(net);
  240. tipc_bcbase_xmit(net, &xmitq);
  241. __skb_queue_purge(pkts);
  242. if (rc == -ELINKCONG) {
  243. *cong_link_cnt = 1;
  244. rc = 0;
  245. }
  246. return rc;
  247. }
  248. /* tipc_rcast_xmit - replicate and send a message to given destination nodes
  249. * @net: the applicable net namespace
  250. * @pkts: chain of buffers containing message
  251. * @dests: list of destination nodes
  252. * @cong_link_cnt: returns number of congested links
  253. * @cong_links: returns identities of congested links
  254. * Returns 0 if success, otherwise errno
  255. */
  256. static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
  257. struct tipc_nlist *dests, u16 *cong_link_cnt)
  258. {
  259. struct tipc_dest *dst, *tmp;
  260. struct sk_buff_head _pkts;
  261. u32 dnode, selector;
  262. selector = msg_link_selector(buf_msg(skb_peek(pkts)));
  263. __skb_queue_head_init(&_pkts);
  264. list_for_each_entry_safe(dst, tmp, &dests->list, list) {
  265. dnode = dst->node;
  266. if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts))
  267. return -ENOMEM;
  268. /* Any other return value than -ELINKCONG is ignored */
  269. if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG)
  270. (*cong_link_cnt)++;
  271. }
  272. return 0;
  273. }
  274. /* tipc_mcast_send_sync - deliver a dummy message with SYN bit
  275. * @net: the applicable net namespace
  276. * @skb: socket buffer to copy
  277. * @method: send method to be used
  278. * @dests: destination nodes for message.
  279. * Returns 0 if success, otherwise errno
  280. */
  281. static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
  282. struct tipc_mc_method *method,
  283. struct tipc_nlist *dests)
  284. {
  285. struct tipc_msg *hdr, *_hdr;
  286. struct sk_buff_head tmpq;
  287. struct sk_buff *_skb;
  288. u16 cong_link_cnt;
  289. int rc = 0;
  290. /* Is a cluster supporting with new capabilities ? */
  291. if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL))
  292. return 0;
  293. hdr = buf_msg(skb);
  294. if (msg_user(hdr) == MSG_FRAGMENTER)
  295. hdr = msg_inner_hdr(hdr);
  296. if (msg_type(hdr) != TIPC_MCAST_MSG)
  297. return 0;
  298. /* Allocate dummy message */
  299. _skb = tipc_buf_acquire(MCAST_H_SIZE, GFP_KERNEL);
  300. if (!_skb)
  301. return -ENOMEM;
  302. /* Preparing for 'synching' header */
  303. msg_set_syn(hdr, 1);
  304. /* Copy skb's header into a dummy header */
  305. skb_copy_to_linear_data(_skb, hdr, MCAST_H_SIZE);
  306. skb_orphan(_skb);
  307. /* Reverse method for dummy message */
  308. _hdr = buf_msg(_skb);
  309. msg_set_size(_hdr, MCAST_H_SIZE);
  310. msg_set_is_rcast(_hdr, !msg_is_rcast(hdr));
  311. msg_set_errcode(_hdr, TIPC_ERR_NO_PORT);
  312. __skb_queue_head_init(&tmpq);
  313. __skb_queue_tail(&tmpq, _skb);
  314. if (method->rcast)
  315. rc = tipc_bcast_xmit(net, &tmpq, &cong_link_cnt);
  316. else
  317. rc = tipc_rcast_xmit(net, &tmpq, dests, &cong_link_cnt);
  318. /* This queue should normally be empty by now */
  319. __skb_queue_purge(&tmpq);
  320. return rc;
  321. }
  322. /* tipc_mcast_xmit - deliver message to indicated destination nodes
  323. * and to identified node local sockets
  324. * @net: the applicable net namespace
  325. * @pkts: chain of buffers containing message
  326. * @method: send method to be used
  327. * @dests: destination nodes for message.
  328. * @cong_link_cnt: returns number of encountered congested destination links
  329. * Consumes buffer chain.
  330. * Returns 0 if success, otherwise errno
  331. */
  332. int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
  333. struct tipc_mc_method *method, struct tipc_nlist *dests,
  334. u16 *cong_link_cnt)
  335. {
  336. struct sk_buff_head inputq, localq;
  337. bool rcast = method->rcast;
  338. struct tipc_msg *hdr;
  339. struct sk_buff *skb;
  340. int rc = 0;
  341. skb_queue_head_init(&inputq);
  342. __skb_queue_head_init(&localq);
  343. /* Clone packets before they are consumed by next call */
  344. if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
  345. rc = -ENOMEM;
  346. goto exit;
  347. }
  348. /* Send according to determined transmit method */
  349. if (dests->remote) {
  350. tipc_bcast_select_xmit_method(net, dests->remote, method);
  351. skb = skb_peek(pkts);
  352. hdr = buf_msg(skb);
  353. if (msg_user(hdr) == MSG_FRAGMENTER)
  354. hdr = msg_inner_hdr(hdr);
  355. msg_set_is_rcast(hdr, method->rcast);
  356. /* Switch method ? */
  357. if (rcast != method->rcast) {
  358. rc = tipc_mcast_send_sync(net, skb, method, dests);
  359. if (unlikely(rc)) {
  360. pr_err("Unable to send SYN: method %d, rc %d\n",
  361. rcast, rc);
  362. goto exit;
  363. }
  364. }
  365. if (method->rcast)
  366. rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
  367. else
  368. rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
  369. }
  370. if (dests->local) {
  371. tipc_loopback_trace(net, &localq);
  372. tipc_sk_mcast_rcv(net, &localq, &inputq);
  373. }
  374. exit:
  375. /* This queue should normally be empty by now */
  376. __skb_queue_purge(pkts);
  377. return rc;
  378. }
  379. /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
  380. *
  381. * RCU is locked, no other locks set
  382. */
  383. int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
  384. {
  385. struct tipc_msg *hdr = buf_msg(skb);
  386. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  387. struct sk_buff_head xmitq;
  388. int rc;
  389. __skb_queue_head_init(&xmitq);
  390. if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
  391. kfree_skb(skb);
  392. return 0;
  393. }
  394. tipc_bcast_lock(net);
  395. if (msg_user(hdr) == BCAST_PROTOCOL)
  396. rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
  397. else
  398. rc = tipc_link_rcv(l, skb, NULL);
  399. tipc_bcast_unlock(net);
  400. tipc_bcbase_xmit(net, &xmitq);
  401. /* Any socket wakeup messages ? */
  402. if (!skb_queue_empty(inputq))
  403. tipc_sk_rcv(net, inputq);
  404. return rc;
  405. }
  406. /* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
  407. *
  408. * RCU is locked, no other locks set
  409. */
  410. void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
  411. struct tipc_msg *hdr)
  412. {
  413. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  414. u16 acked = msg_bcast_ack(hdr);
  415. struct sk_buff_head xmitq;
  416. /* Ignore bc acks sent by peer before bcast synch point was received */
  417. if (msg_bc_ack_invalid(hdr))
  418. return;
  419. __skb_queue_head_init(&xmitq);
  420. tipc_bcast_lock(net);
  421. tipc_link_bc_ack_rcv(l, acked, 0, NULL, &xmitq, NULL);
  422. tipc_bcast_unlock(net);
  423. tipc_bcbase_xmit(net, &xmitq);
  424. /* Any socket wakeup messages ? */
  425. if (!skb_queue_empty(inputq))
  426. tipc_sk_rcv(net, inputq);
  427. }
  428. /* tipc_bcast_synch_rcv - check and update rcv link with peer's send state
  429. *
  430. * RCU is locked, no other locks set
  431. */
  432. int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
  433. struct tipc_msg *hdr,
  434. struct sk_buff_head *retrq)
  435. {
  436. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  437. struct tipc_gap_ack_blks *ga;
  438. struct sk_buff_head xmitq;
  439. int rc = 0;
  440. __skb_queue_head_init(&xmitq);
  441. tipc_bcast_lock(net);
  442. if (msg_type(hdr) != STATE_MSG) {
  443. tipc_link_bc_init_rcv(l, hdr);
  444. } else if (!msg_bc_ack_invalid(hdr)) {
  445. tipc_get_gap_ack_blks(&ga, l, hdr, false);
  446. if (!sysctl_tipc_bc_retruni)
  447. retrq = &xmitq;
  448. rc = tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr),
  449. msg_bc_gap(hdr), ga, &xmitq,
  450. retrq);
  451. rc |= tipc_link_bc_sync_rcv(l, hdr, &xmitq);
  452. }
  453. tipc_bcast_unlock(net);
  454. tipc_bcbase_xmit(net, &xmitq);
  455. /* Any socket wakeup messages ? */
  456. if (!skb_queue_empty(inputq))
  457. tipc_sk_rcv(net, inputq);
  458. return rc;
  459. }
  460. /* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
  461. *
  462. * RCU is locked, node lock is set
  463. */
  464. void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
  465. struct sk_buff_head *xmitq)
  466. {
  467. struct tipc_link *snd_l = tipc_bc_sndlink(net);
  468. tipc_bcast_lock(net);
  469. tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
  470. tipc_bcbase_select_primary(net);
  471. tipc_bcbase_calc_bc_threshold(net);
  472. tipc_bcast_unlock(net);
  473. }
  474. /* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
  475. *
  476. * RCU is locked, node lock is set
  477. */
  478. void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
  479. {
  480. struct tipc_link *snd_l = tipc_bc_sndlink(net);
  481. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  482. struct sk_buff_head xmitq;
  483. __skb_queue_head_init(&xmitq);
  484. tipc_bcast_lock(net);
  485. tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
  486. tipc_bcbase_select_primary(net);
  487. tipc_bcbase_calc_bc_threshold(net);
  488. tipc_bcast_unlock(net);
  489. tipc_bcbase_xmit(net, &xmitq);
  490. /* Any socket wakeup messages ? */
  491. if (!skb_queue_empty(inputq))
  492. tipc_sk_rcv(net, inputq);
  493. }
  494. int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l)
  495. {
  496. if (!l)
  497. return -ENOPROTOOPT;
  498. tipc_bcast_lock(net);
  499. tipc_link_reset_stats(l);
  500. tipc_bcast_unlock(net);
  501. return 0;
  502. }
  503. static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win)
  504. {
  505. struct tipc_link *l = tipc_bc_sndlink(net);
  506. if (!l)
  507. return -ENOPROTOOPT;
  508. if (max_win < BCLINK_WIN_MIN)
  509. max_win = BCLINK_WIN_MIN;
  510. if (max_win > TIPC_MAX_LINK_WIN)
  511. return -EINVAL;
  512. tipc_bcast_lock(net);
  513. tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win);
  514. tipc_bcast_unlock(net);
  515. return 0;
  516. }
  517. static int tipc_bc_link_set_broadcast_mode(struct net *net, u32 bc_mode)
  518. {
  519. struct tipc_bc_base *bb = tipc_bc_base(net);
  520. switch (bc_mode) {
  521. case BCLINK_MODE_BCAST:
  522. if (!bb->bcast_support)
  523. return -ENOPROTOOPT;
  524. bb->force_bcast = true;
  525. bb->force_rcast = false;
  526. break;
  527. case BCLINK_MODE_RCAST:
  528. if (!bb->rcast_support)
  529. return -ENOPROTOOPT;
  530. bb->force_bcast = false;
  531. bb->force_rcast = true;
  532. break;
  533. case BCLINK_MODE_SEL:
  534. if (!bb->bcast_support || !bb->rcast_support)
  535. return -ENOPROTOOPT;
  536. bb->force_bcast = false;
  537. bb->force_rcast = false;
  538. break;
  539. default:
  540. return -EINVAL;
  541. }
  542. return 0;
  543. }
  544. static int tipc_bc_link_set_broadcast_ratio(struct net *net, u32 bc_ratio)
  545. {
  546. struct tipc_bc_base *bb = tipc_bc_base(net);
  547. if (!bb->bcast_support || !bb->rcast_support)
  548. return -ENOPROTOOPT;
  549. if (bc_ratio > 100 || bc_ratio <= 0)
  550. return -EINVAL;
  551. bb->rc_ratio = bc_ratio;
  552. tipc_bcast_lock(net);
  553. tipc_bcbase_calc_bc_threshold(net);
  554. tipc_bcast_unlock(net);
  555. return 0;
  556. }
  557. int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
  558. {
  559. int err;
  560. u32 win;
  561. u32 bc_mode;
  562. u32 bc_ratio;
  563. struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
  564. if (!attrs[TIPC_NLA_LINK_PROP])
  565. return -EINVAL;
  566. err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
  567. if (err)
  568. return err;
  569. if (!props[TIPC_NLA_PROP_WIN] &&
  570. !props[TIPC_NLA_PROP_BROADCAST] &&
  571. !props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
  572. return -EOPNOTSUPP;
  573. }
  574. if (props[TIPC_NLA_PROP_BROADCAST]) {
  575. bc_mode = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST]);
  576. err = tipc_bc_link_set_broadcast_mode(net, bc_mode);
  577. }
  578. if (!err && props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
  579. bc_ratio = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST_RATIO]);
  580. err = tipc_bc_link_set_broadcast_ratio(net, bc_ratio);
  581. }
  582. if (!err && props[TIPC_NLA_PROP_WIN]) {
  583. win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
  584. err = tipc_bc_link_set_queue_limits(net, win);
  585. }
  586. return err;
  587. }
  588. int tipc_bcast_init(struct net *net)
  589. {
  590. struct tipc_net *tn = tipc_net(net);
  591. struct tipc_bc_base *bb = NULL;
  592. struct tipc_link *l = NULL;
  593. bb = kzalloc(sizeof(*bb), GFP_KERNEL);
  594. if (!bb)
  595. goto enomem;
  596. tn->bcbase = bb;
  597. spin_lock_init(&tipc_net(net)->bclock);
  598. if (!tipc_link_bc_create(net, 0, 0, NULL,
  599. one_page_mtu,
  600. BCLINK_WIN_DEFAULT,
  601. BCLINK_WIN_DEFAULT,
  602. 0,
  603. &bb->inputq,
  604. NULL,
  605. NULL,
  606. &l))
  607. goto enomem;
  608. bb->link = l;
  609. tn->bcl = l;
  610. bb->rc_ratio = 10;
  611. bb->rcast_support = true;
  612. return 0;
  613. enomem:
  614. kfree(bb);
  615. kfree(l);
  616. return -ENOMEM;
  617. }
  618. void tipc_bcast_stop(struct net *net)
  619. {
  620. struct tipc_net *tn = net_generic(net, tipc_net_id);
  621. synchronize_net();
  622. kfree(tn->bcbase);
  623. kfree(tn->bcl);
  624. }
  625. void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
  626. {
  627. memset(nl, 0, sizeof(*nl));
  628. INIT_LIST_HEAD(&nl->list);
  629. nl->self = self;
  630. }
  631. void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
  632. {
  633. if (node == nl->self)
  634. nl->local = true;
  635. else if (tipc_dest_push(&nl->list, node, 0))
  636. nl->remote++;
  637. }
  638. void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
  639. {
  640. if (node == nl->self)
  641. nl->local = false;
  642. else if (tipc_dest_del(&nl->list, node, 0))
  643. nl->remote--;
  644. }
  645. void tipc_nlist_purge(struct tipc_nlist *nl)
  646. {
  647. tipc_dest_list_purge(&nl->list);
  648. nl->remote = 0;
  649. nl->local = false;
  650. }
  651. u32 tipc_bcast_get_mode(struct net *net)
  652. {
  653. struct tipc_bc_base *bb = tipc_bc_base(net);
  654. if (bb->force_bcast)
  655. return BCLINK_MODE_BCAST;
  656. if (bb->force_rcast)
  657. return BCLINK_MODE_RCAST;
  658. if (bb->bcast_support && bb->rcast_support)
  659. return BCLINK_MODE_SEL;
  660. return 0;
  661. }
  662. u32 tipc_bcast_get_broadcast_ratio(struct net *net)
  663. {
  664. struct tipc_bc_base *bb = tipc_bc_base(net);
  665. return bb->rc_ratio;
  666. }
  667. void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
  668. struct sk_buff_head *inputq)
  669. {
  670. struct sk_buff *skb, *_skb, *tmp;
  671. struct tipc_msg *hdr, *_hdr;
  672. bool match = false;
  673. u32 node, port;
  674. skb = skb_peek(inputq);
  675. if (!skb)
  676. return;
  677. hdr = buf_msg(skb);
  678. if (likely(!msg_is_syn(hdr) && skb_queue_empty(defq)))
  679. return;
  680. node = msg_orignode(hdr);
  681. if (node == tipc_own_addr(net))
  682. return;
  683. port = msg_origport(hdr);
  684. /* Has the twin SYN message already arrived ? */
  685. skb_queue_walk(defq, _skb) {
  686. _hdr = buf_msg(_skb);
  687. if (msg_orignode(_hdr) != node)
  688. continue;
  689. if (msg_origport(_hdr) != port)
  690. continue;
  691. match = true;
  692. break;
  693. }
  694. if (!match) {
  695. if (!msg_is_syn(hdr))
  696. return;
  697. __skb_dequeue(inputq);
  698. __skb_queue_tail(defq, skb);
  699. return;
  700. }
  701. /* Deliver non-SYN message from other link, otherwise queue it */
  702. if (!msg_is_syn(hdr)) {
  703. if (msg_is_rcast(hdr) != msg_is_rcast(_hdr))
  704. return;
  705. __skb_dequeue(inputq);
  706. __skb_queue_tail(defq, skb);
  707. return;
  708. }
  709. /* Queue non-SYN/SYN message from same link */
  710. if (msg_is_rcast(hdr) == msg_is_rcast(_hdr)) {
  711. __skb_dequeue(inputq);
  712. __skb_queue_tail(defq, skb);
  713. return;
  714. }
  715. /* Matching SYN messages => return the one with data, if any */
  716. __skb_unlink(_skb, defq);
  717. if (msg_data_sz(hdr)) {
  718. kfree_skb(_skb);
  719. } else {
  720. __skb_dequeue(inputq);
  721. kfree_skb(skb);
  722. __skb_queue_tail(inputq, _skb);
  723. }
  724. /* Deliver subsequent non-SYN messages from same peer */
  725. skb_queue_walk_safe(defq, _skb, tmp) {
  726. _hdr = buf_msg(_skb);
  727. if (msg_orignode(_hdr) != node)
  728. continue;
  729. if (msg_origport(_hdr) != port)
  730. continue;
  731. if (msg_is_syn(_hdr))
  732. break;
  733. __skb_unlink(_skb, defq);
  734. __skb_queue_tail(inputq, _skb);
  735. }
  736. }