routing.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (C) 2007-2020 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. */
  6. #include "routing.h"
  7. #include "main.h"
  8. #include <linux/atomic.h>
  9. #include <linux/byteorder/generic.h>
  10. #include <linux/compiler.h>
  11. #include <linux/errno.h>
  12. #include <linux/etherdevice.h>
  13. #include <linux/if_ether.h>
  14. #include <linux/jiffies.h>
  15. #include <linux/kref.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/printk.h>
  18. #include <linux/rculist.h>
  19. #include <linux/rcupdate.h>
  20. #include <linux/skbuff.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/stddef.h>
  23. #include <uapi/linux/batadv_packet.h>
  24. #include "bitarray.h"
  25. #include "bridge_loop_avoidance.h"
  26. #include "distributed-arp-table.h"
  27. #include "fragmentation.h"
  28. #include "hard-interface.h"
  29. #include "icmp_socket.h"
  30. #include "log.h"
  31. #include "network-coding.h"
  32. #include "originator.h"
  33. #include "send.h"
  34. #include "soft-interface.h"
  35. #include "tp_meter.h"
  36. #include "translation-table.h"
  37. #include "tvlv.h"
  38. static int batadv_route_unicast_packet(struct sk_buff *skb,
  39. struct batadv_hard_iface *recv_if);
  40. /**
  41. * _batadv_update_route() - set the router for this originator
  42. * @bat_priv: the bat priv with all the soft interface information
  43. * @orig_node: orig node which is to be configured
  44. * @recv_if: the receive interface for which this route is set
  45. * @neigh_node: neighbor which should be the next router
  46. *
  47. * This function does not perform any error checks
  48. */
  49. static void _batadv_update_route(struct batadv_priv *bat_priv,
  50. struct batadv_orig_node *orig_node,
  51. struct batadv_hard_iface *recv_if,
  52. struct batadv_neigh_node *neigh_node)
  53. {
  54. struct batadv_orig_ifinfo *orig_ifinfo;
  55. struct batadv_neigh_node *curr_router;
  56. orig_ifinfo = batadv_orig_ifinfo_get(orig_node, recv_if);
  57. if (!orig_ifinfo)
  58. return;
  59. spin_lock_bh(&orig_node->neigh_list_lock);
  60. /* curr_router used earlier may not be the current orig_ifinfo->router
  61. * anymore because it was dereferenced outside of the neigh_list_lock
  62. * protected region. After the new best neighbor has replace the current
  63. * best neighbor the reference counter needs to decrease. Consequently,
  64. * the code needs to ensure the curr_router variable contains a pointer
  65. * to the replaced best neighbor.
  66. */
  67. /* increase refcount of new best neighbor */
  68. if (neigh_node)
  69. kref_get(&neigh_node->refcount);
  70. curr_router = rcu_replace_pointer(orig_ifinfo->router, neigh_node,
  71. true);
  72. spin_unlock_bh(&orig_node->neigh_list_lock);
  73. batadv_orig_ifinfo_put(orig_ifinfo);
  74. /* route deleted */
  75. if (curr_router && !neigh_node) {
  76. batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
  77. "Deleting route towards: %pM\n", orig_node->orig);
  78. batadv_tt_global_del_orig(bat_priv, orig_node, -1,
  79. "Deleted route towards originator");
  80. /* route added */
  81. } else if (!curr_router && neigh_node) {
  82. batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
  83. "Adding route towards: %pM (via %pM)\n",
  84. orig_node->orig, neigh_node->addr);
  85. /* route changed */
  86. } else if (neigh_node && curr_router) {
  87. batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
  88. "Changing route towards: %pM (now via %pM - was via %pM)\n",
  89. orig_node->orig, neigh_node->addr,
  90. curr_router->addr);
  91. }
  92. /* decrease refcount of previous best neighbor */
  93. if (curr_router)
  94. batadv_neigh_node_put(curr_router);
  95. }
  96. /**
  97. * batadv_update_route() - set the router for this originator
  98. * @bat_priv: the bat priv with all the soft interface information
  99. * @orig_node: orig node which is to be configured
  100. * @recv_if: the receive interface for which this route is set
  101. * @neigh_node: neighbor which should be the next router
  102. */
  103. void batadv_update_route(struct batadv_priv *bat_priv,
  104. struct batadv_orig_node *orig_node,
  105. struct batadv_hard_iface *recv_if,
  106. struct batadv_neigh_node *neigh_node)
  107. {
  108. struct batadv_neigh_node *router = NULL;
  109. if (!orig_node)
  110. goto out;
  111. router = batadv_orig_router_get(orig_node, recv_if);
  112. if (router != neigh_node)
  113. _batadv_update_route(bat_priv, orig_node, recv_if, neigh_node);
  114. out:
  115. if (router)
  116. batadv_neigh_node_put(router);
  117. }
  118. /**
  119. * batadv_window_protected() - checks whether the host restarted and is in the
  120. * protection time.
  121. * @bat_priv: the bat priv with all the soft interface information
  122. * @seq_num_diff: difference between the current/received sequence number and
  123. * the last sequence number
  124. * @seq_old_max_diff: maximum age of sequence number not considered as restart
  125. * @last_reset: jiffies timestamp of the last reset, will be updated when reset
  126. * is detected
  127. * @protection_started: is set to true if the protection window was started,
  128. * doesn't change otherwise.
  129. *
  130. * Return:
  131. * false if the packet is to be accepted.
  132. * true if the packet is to be ignored.
  133. */
  134. bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
  135. s32 seq_old_max_diff, unsigned long *last_reset,
  136. bool *protection_started)
  137. {
  138. if (seq_num_diff <= -seq_old_max_diff ||
  139. seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
  140. if (!batadv_has_timed_out(*last_reset,
  141. BATADV_RESET_PROTECTION_MS))
  142. return true;
  143. *last_reset = jiffies;
  144. if (protection_started)
  145. *protection_started = true;
  146. batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
  147. "old packet received, start protection\n");
  148. }
  149. return false;
  150. }
  151. /**
  152. * batadv_check_management_packet() - Check preconditions for management packets
  153. * @skb: incoming packet buffer
  154. * @hard_iface: incoming hard interface
  155. * @header_len: minimal header length of packet type
  156. *
  157. * Return: true when management preconditions are met, false otherwise
  158. */
  159. bool batadv_check_management_packet(struct sk_buff *skb,
  160. struct batadv_hard_iface *hard_iface,
  161. int header_len)
  162. {
  163. struct ethhdr *ethhdr;
  164. /* drop packet if it has not necessary minimum size */
  165. if (unlikely(!pskb_may_pull(skb, header_len)))
  166. return false;
  167. ethhdr = eth_hdr(skb);
  168. /* packet with broadcast indication but unicast recipient */
  169. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  170. return false;
  171. /* packet with invalid sender address */
  172. if (!is_valid_ether_addr(ethhdr->h_source))
  173. return false;
  174. /* create a copy of the skb, if needed, to modify it. */
  175. if (skb_cow(skb, 0) < 0)
  176. return false;
  177. /* keep skb linear */
  178. if (skb_linearize(skb) < 0)
  179. return false;
  180. return true;
  181. }
  182. /**
  183. * batadv_recv_my_icmp_packet() - receive an icmp packet locally
  184. * @bat_priv: the bat priv with all the soft interface information
  185. * @skb: icmp packet to process
  186. *
  187. * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
  188. * otherwise.
  189. */
  190. static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
  191. struct sk_buff *skb)
  192. {
  193. struct batadv_hard_iface *primary_if = NULL;
  194. struct batadv_orig_node *orig_node = NULL;
  195. struct batadv_icmp_header *icmph;
  196. int res, ret = NET_RX_DROP;
  197. icmph = (struct batadv_icmp_header *)skb->data;
  198. switch (icmph->msg_type) {
  199. case BATADV_ECHO_REPLY:
  200. case BATADV_DESTINATION_UNREACHABLE:
  201. case BATADV_TTL_EXCEEDED:
  202. /* receive the packet */
  203. if (skb_linearize(skb) < 0)
  204. break;
  205. batadv_socket_receive_packet(icmph, skb->len);
  206. break;
  207. case BATADV_ECHO_REQUEST:
  208. /* answer echo request (ping) */
  209. primary_if = batadv_primary_if_get_selected(bat_priv);
  210. if (!primary_if)
  211. goto out;
  212. /* get routing information */
  213. orig_node = batadv_orig_hash_find(bat_priv, icmph->orig);
  214. if (!orig_node)
  215. goto out;
  216. /* create a copy of the skb, if needed, to modify it. */
  217. if (skb_cow(skb, ETH_HLEN) < 0)
  218. goto out;
  219. icmph = (struct batadv_icmp_header *)skb->data;
  220. ether_addr_copy(icmph->dst, icmph->orig);
  221. ether_addr_copy(icmph->orig, primary_if->net_dev->dev_addr);
  222. icmph->msg_type = BATADV_ECHO_REPLY;
  223. icmph->ttl = BATADV_TTL;
  224. res = batadv_send_skb_to_orig(skb, orig_node, NULL);
  225. if (res == NET_XMIT_SUCCESS)
  226. ret = NET_RX_SUCCESS;
  227. /* skb was consumed */
  228. skb = NULL;
  229. break;
  230. case BATADV_TP:
  231. if (!pskb_may_pull(skb, sizeof(struct batadv_icmp_tp_packet)))
  232. goto out;
  233. batadv_tp_meter_recv(bat_priv, skb);
  234. ret = NET_RX_SUCCESS;
  235. /* skb was consumed */
  236. skb = NULL;
  237. goto out;
  238. default:
  239. /* drop unknown type */
  240. goto out;
  241. }
  242. out:
  243. if (primary_if)
  244. batadv_hardif_put(primary_if);
  245. if (orig_node)
  246. batadv_orig_node_put(orig_node);
  247. kfree_skb(skb);
  248. return ret;
  249. }
  250. static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
  251. struct sk_buff *skb)
  252. {
  253. struct batadv_hard_iface *primary_if = NULL;
  254. struct batadv_orig_node *orig_node = NULL;
  255. struct batadv_icmp_packet *icmp_packet;
  256. int res, ret = NET_RX_DROP;
  257. icmp_packet = (struct batadv_icmp_packet *)skb->data;
  258. /* send TTL exceeded if packet is an echo request (traceroute) */
  259. if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
  260. pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
  261. icmp_packet->orig, icmp_packet->dst);
  262. goto out;
  263. }
  264. primary_if = batadv_primary_if_get_selected(bat_priv);
  265. if (!primary_if)
  266. goto out;
  267. /* get routing information */
  268. orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
  269. if (!orig_node)
  270. goto out;
  271. /* create a copy of the skb, if needed, to modify it. */
  272. if (skb_cow(skb, ETH_HLEN) < 0)
  273. goto out;
  274. icmp_packet = (struct batadv_icmp_packet *)skb->data;
  275. ether_addr_copy(icmp_packet->dst, icmp_packet->orig);
  276. ether_addr_copy(icmp_packet->orig, primary_if->net_dev->dev_addr);
  277. icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
  278. icmp_packet->ttl = BATADV_TTL;
  279. res = batadv_send_skb_to_orig(skb, orig_node, NULL);
  280. if (res == NET_RX_SUCCESS)
  281. ret = NET_XMIT_SUCCESS;
  282. /* skb was consumed */
  283. skb = NULL;
  284. out:
  285. if (primary_if)
  286. batadv_hardif_put(primary_if);
  287. if (orig_node)
  288. batadv_orig_node_put(orig_node);
  289. kfree_skb(skb);
  290. return ret;
  291. }
  292. /**
  293. * batadv_recv_icmp_packet() - Process incoming icmp packet
  294. * @skb: incoming packet buffer
  295. * @recv_if: incoming hard interface
  296. *
  297. * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
  298. */
  299. int batadv_recv_icmp_packet(struct sk_buff *skb,
  300. struct batadv_hard_iface *recv_if)
  301. {
  302. struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  303. struct batadv_icmp_header *icmph;
  304. struct batadv_icmp_packet_rr *icmp_packet_rr;
  305. struct ethhdr *ethhdr;
  306. struct batadv_orig_node *orig_node = NULL;
  307. int hdr_size = sizeof(struct batadv_icmp_header);
  308. int res, ret = NET_RX_DROP;
  309. /* drop packet if it has not necessary minimum size */
  310. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  311. goto free_skb;
  312. ethhdr = eth_hdr(skb);
  313. /* packet with unicast indication but non-unicast recipient */
  314. if (!is_valid_ether_addr(ethhdr->h_dest))
  315. goto free_skb;
  316. /* packet with broadcast/multicast sender address */
  317. if (is_multicast_ether_addr(ethhdr->h_source))
  318. goto free_skb;
  319. /* not for me */
  320. if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
  321. goto free_skb;
  322. icmph = (struct batadv_icmp_header *)skb->data;
  323. /* add record route information if not full */
  324. if ((icmph->msg_type == BATADV_ECHO_REPLY ||
  325. icmph->msg_type == BATADV_ECHO_REQUEST) &&
  326. skb->len >= sizeof(struct batadv_icmp_packet_rr)) {
  327. if (skb_linearize(skb) < 0)
  328. goto free_skb;
  329. /* create a copy of the skb, if needed, to modify it. */
  330. if (skb_cow(skb, ETH_HLEN) < 0)
  331. goto free_skb;
  332. ethhdr = eth_hdr(skb);
  333. icmph = (struct batadv_icmp_header *)skb->data;
  334. icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
  335. if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
  336. goto free_skb;
  337. ether_addr_copy(icmp_packet_rr->rr[icmp_packet_rr->rr_cur],
  338. ethhdr->h_dest);
  339. icmp_packet_rr->rr_cur++;
  340. }
  341. /* packet for me */
  342. if (batadv_is_my_mac(bat_priv, icmph->dst))
  343. return batadv_recv_my_icmp_packet(bat_priv, skb);
  344. /* TTL exceeded */
  345. if (icmph->ttl < 2)
  346. return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
  347. /* get routing information */
  348. orig_node = batadv_orig_hash_find(bat_priv, icmph->dst);
  349. if (!orig_node)
  350. goto free_skb;
  351. /* create a copy of the skb, if needed, to modify it. */
  352. if (skb_cow(skb, ETH_HLEN) < 0)
  353. goto put_orig_node;
  354. icmph = (struct batadv_icmp_header *)skb->data;
  355. /* decrement ttl */
  356. icmph->ttl--;
  357. /* route it */
  358. res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
  359. if (res == NET_XMIT_SUCCESS)
  360. ret = NET_RX_SUCCESS;
  361. /* skb was consumed */
  362. skb = NULL;
  363. put_orig_node:
  364. if (orig_node)
  365. batadv_orig_node_put(orig_node);
  366. free_skb:
  367. kfree_skb(skb);
  368. return ret;
  369. }
  370. /**
  371. * batadv_check_unicast_packet() - Check for malformed unicast packets
  372. * @bat_priv: the bat priv with all the soft interface information
  373. * @skb: packet to check
  374. * @hdr_size: size of header to pull
  375. *
  376. * Checks for short header and bad addresses in the given packet.
  377. *
  378. * Return: negative value when check fails and 0 otherwise. The negative value
  379. * depends on the reason: -ENODATA for bad header, -EBADR for broadcast
  380. * destination or source, and -EREMOTE for non-local (other host) destination.
  381. */
  382. static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
  383. struct sk_buff *skb, int hdr_size)
  384. {
  385. struct ethhdr *ethhdr;
  386. /* drop packet if it has not necessary minimum size */
  387. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  388. return -ENODATA;
  389. ethhdr = eth_hdr(skb);
  390. /* packet with unicast indication but non-unicast recipient */
  391. if (!is_valid_ether_addr(ethhdr->h_dest))
  392. return -EBADR;
  393. /* packet with broadcast/multicast sender address */
  394. if (is_multicast_ether_addr(ethhdr->h_source))
  395. return -EBADR;
  396. /* not for me */
  397. if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
  398. return -EREMOTE;
  399. return 0;
  400. }
  401. /**
  402. * batadv_last_bonding_get() - Get last_bonding_candidate of orig_node
  403. * @orig_node: originator node whose last bonding candidate should be retrieved
  404. *
  405. * Return: last bonding candidate of router or NULL if not found
  406. *
  407. * The object is returned with refcounter increased by 1.
  408. */
  409. static struct batadv_orig_ifinfo *
  410. batadv_last_bonding_get(struct batadv_orig_node *orig_node)
  411. {
  412. struct batadv_orig_ifinfo *last_bonding_candidate;
  413. spin_lock_bh(&orig_node->neigh_list_lock);
  414. last_bonding_candidate = orig_node->last_bonding_candidate;
  415. if (last_bonding_candidate)
  416. kref_get(&last_bonding_candidate->refcount);
  417. spin_unlock_bh(&orig_node->neigh_list_lock);
  418. return last_bonding_candidate;
  419. }
  420. /**
  421. * batadv_last_bonding_replace() - Replace last_bonding_candidate of orig_node
  422. * @orig_node: originator node whose bonding candidates should be replaced
  423. * @new_candidate: new bonding candidate or NULL
  424. */
  425. static void
  426. batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
  427. struct batadv_orig_ifinfo *new_candidate)
  428. {
  429. struct batadv_orig_ifinfo *old_candidate;
  430. spin_lock_bh(&orig_node->neigh_list_lock);
  431. old_candidate = orig_node->last_bonding_candidate;
  432. if (new_candidate)
  433. kref_get(&new_candidate->refcount);
  434. orig_node->last_bonding_candidate = new_candidate;
  435. spin_unlock_bh(&orig_node->neigh_list_lock);
  436. if (old_candidate)
  437. batadv_orig_ifinfo_put(old_candidate);
  438. }
  439. /**
  440. * batadv_find_router() - find a suitable router for this originator
  441. * @bat_priv: the bat priv with all the soft interface information
  442. * @orig_node: the destination node
  443. * @recv_if: pointer to interface this packet was received on
  444. *
  445. * Return: the router which should be used for this orig_node on
  446. * this interface, or NULL if not available.
  447. */
  448. struct batadv_neigh_node *
  449. batadv_find_router(struct batadv_priv *bat_priv,
  450. struct batadv_orig_node *orig_node,
  451. struct batadv_hard_iface *recv_if)
  452. {
  453. struct batadv_algo_ops *bao = bat_priv->algo_ops;
  454. struct batadv_neigh_node *first_candidate_router = NULL;
  455. struct batadv_neigh_node *next_candidate_router = NULL;
  456. struct batadv_neigh_node *router, *cand_router = NULL;
  457. struct batadv_neigh_node *last_cand_router = NULL;
  458. struct batadv_orig_ifinfo *cand, *first_candidate = NULL;
  459. struct batadv_orig_ifinfo *next_candidate = NULL;
  460. struct batadv_orig_ifinfo *last_candidate;
  461. bool last_candidate_found = false;
  462. if (!orig_node)
  463. return NULL;
  464. router = batadv_orig_router_get(orig_node, recv_if);
  465. if (!router)
  466. return router;
  467. /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
  468. * and if activated.
  469. */
  470. if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
  471. return router;
  472. /* bonding: loop through the list of possible routers found
  473. * for the various outgoing interfaces and find a candidate after
  474. * the last chosen bonding candidate (next_candidate). If no such
  475. * router is found, use the first candidate found (the previously
  476. * chosen bonding candidate might have been the last one in the list).
  477. * If this can't be found either, return the previously chosen
  478. * router - obviously there are no other candidates.
  479. */
  480. rcu_read_lock();
  481. last_candidate = batadv_last_bonding_get(orig_node);
  482. if (last_candidate)
  483. last_cand_router = rcu_dereference(last_candidate->router);
  484. hlist_for_each_entry_rcu(cand, &orig_node->ifinfo_list, list) {
  485. /* acquire some structures and references ... */
  486. if (!kref_get_unless_zero(&cand->refcount))
  487. continue;
  488. cand_router = rcu_dereference(cand->router);
  489. if (!cand_router)
  490. goto next;
  491. if (!kref_get_unless_zero(&cand_router->refcount)) {
  492. cand_router = NULL;
  493. goto next;
  494. }
  495. /* alternative candidate should be good enough to be
  496. * considered
  497. */
  498. if (!bao->neigh.is_similar_or_better(cand_router,
  499. cand->if_outgoing, router,
  500. recv_if))
  501. goto next;
  502. /* don't use the same router twice */
  503. if (last_cand_router == cand_router)
  504. goto next;
  505. /* mark the first possible candidate */
  506. if (!first_candidate) {
  507. kref_get(&cand_router->refcount);
  508. kref_get(&cand->refcount);
  509. first_candidate = cand;
  510. first_candidate_router = cand_router;
  511. }
  512. /* check if the loop has already passed the previously selected
  513. * candidate ... this function should select the next candidate
  514. * AFTER the previously used bonding candidate.
  515. */
  516. if (!last_candidate || last_candidate_found) {
  517. next_candidate = cand;
  518. next_candidate_router = cand_router;
  519. break;
  520. }
  521. if (last_candidate == cand)
  522. last_candidate_found = true;
  523. next:
  524. /* free references */
  525. if (cand_router) {
  526. batadv_neigh_node_put(cand_router);
  527. cand_router = NULL;
  528. }
  529. batadv_orig_ifinfo_put(cand);
  530. }
  531. rcu_read_unlock();
  532. /* After finding candidates, handle the three cases:
  533. * 1) there is a next candidate, use that
  534. * 2) there is no next candidate, use the first of the list
  535. * 3) there is no candidate at all, return the default router
  536. */
  537. if (next_candidate) {
  538. batadv_neigh_node_put(router);
  539. kref_get(&next_candidate_router->refcount);
  540. router = next_candidate_router;
  541. batadv_last_bonding_replace(orig_node, next_candidate);
  542. } else if (first_candidate) {
  543. batadv_neigh_node_put(router);
  544. kref_get(&first_candidate_router->refcount);
  545. router = first_candidate_router;
  546. batadv_last_bonding_replace(orig_node, first_candidate);
  547. } else {
  548. batadv_last_bonding_replace(orig_node, NULL);
  549. }
  550. /* cleanup of candidates */
  551. if (first_candidate) {
  552. batadv_neigh_node_put(first_candidate_router);
  553. batadv_orig_ifinfo_put(first_candidate);
  554. }
  555. if (next_candidate) {
  556. batadv_neigh_node_put(next_candidate_router);
  557. batadv_orig_ifinfo_put(next_candidate);
  558. }
  559. if (last_candidate)
  560. batadv_orig_ifinfo_put(last_candidate);
  561. return router;
  562. }
  563. static int batadv_route_unicast_packet(struct sk_buff *skb,
  564. struct batadv_hard_iface *recv_if)
  565. {
  566. struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  567. struct batadv_orig_node *orig_node = NULL;
  568. struct batadv_unicast_packet *unicast_packet;
  569. struct ethhdr *ethhdr = eth_hdr(skb);
  570. int res, hdr_len, ret = NET_RX_DROP;
  571. unsigned int len;
  572. unicast_packet = (struct batadv_unicast_packet *)skb->data;
  573. /* TTL exceeded */
  574. if (unicast_packet->ttl < 2) {
  575. pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
  576. ethhdr->h_source, unicast_packet->dest);
  577. goto free_skb;
  578. }
  579. /* get routing information */
  580. orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest);
  581. if (!orig_node)
  582. goto free_skb;
  583. /* create a copy of the skb, if needed, to modify it. */
  584. if (skb_cow(skb, ETH_HLEN) < 0)
  585. goto put_orig_node;
  586. /* decrement ttl */
  587. unicast_packet = (struct batadv_unicast_packet *)skb->data;
  588. unicast_packet->ttl--;
  589. switch (unicast_packet->packet_type) {
  590. case BATADV_UNICAST_4ADDR:
  591. hdr_len = sizeof(struct batadv_unicast_4addr_packet);
  592. break;
  593. case BATADV_UNICAST:
  594. hdr_len = sizeof(struct batadv_unicast_packet);
  595. break;
  596. default:
  597. /* other packet types not supported - yet */
  598. hdr_len = -1;
  599. break;
  600. }
  601. if (hdr_len > 0)
  602. batadv_skb_set_priority(skb, hdr_len);
  603. len = skb->len;
  604. res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
  605. /* translate transmit result into receive result */
  606. if (res == NET_XMIT_SUCCESS) {
  607. ret = NET_RX_SUCCESS;
  608. /* skb was transmitted and consumed */
  609. batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
  610. batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
  611. len + ETH_HLEN);
  612. }
  613. /* skb was consumed */
  614. skb = NULL;
  615. put_orig_node:
  616. batadv_orig_node_put(orig_node);
  617. free_skb:
  618. kfree_skb(skb);
  619. return ret;
  620. }
  621. /**
  622. * batadv_reroute_unicast_packet() - update the unicast header for re-routing
  623. * @bat_priv: the bat priv with all the soft interface information
  624. * @skb: unicast packet to process
  625. * @unicast_packet: the unicast header to be updated
  626. * @dst_addr: the payload destination
  627. * @vid: VLAN identifier
  628. *
  629. * Search the translation table for dst_addr and update the unicast header with
  630. * the new corresponding information (originator address where the destination
  631. * client currently is and its known TTVN)
  632. *
  633. * Return: true if the packet header has been updated, false otherwise
  634. */
  635. static bool
  636. batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
  637. struct batadv_unicast_packet *unicast_packet,
  638. u8 *dst_addr, unsigned short vid)
  639. {
  640. struct batadv_orig_node *orig_node = NULL;
  641. struct batadv_hard_iface *primary_if = NULL;
  642. bool ret = false;
  643. u8 *orig_addr, orig_ttvn;
  644. if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
  645. primary_if = batadv_primary_if_get_selected(bat_priv);
  646. if (!primary_if)
  647. goto out;
  648. orig_addr = primary_if->net_dev->dev_addr;
  649. orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
  650. } else {
  651. orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
  652. vid);
  653. if (!orig_node)
  654. goto out;
  655. if (batadv_compare_eth(orig_node->orig, unicast_packet->dest))
  656. goto out;
  657. orig_addr = orig_node->orig;
  658. orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
  659. }
  660. /* update the packet header */
  661. skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
  662. ether_addr_copy(unicast_packet->dest, orig_addr);
  663. unicast_packet->ttvn = orig_ttvn;
  664. skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
  665. ret = true;
  666. out:
  667. if (primary_if)
  668. batadv_hardif_put(primary_if);
  669. if (orig_node)
  670. batadv_orig_node_put(orig_node);
  671. return ret;
  672. }
  673. static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
  674. struct sk_buff *skb, int hdr_len)
  675. {
  676. struct batadv_unicast_packet *unicast_packet;
  677. struct batadv_hard_iface *primary_if;
  678. struct batadv_orig_node *orig_node;
  679. u8 curr_ttvn, old_ttvn;
  680. struct ethhdr *ethhdr;
  681. unsigned short vid;
  682. int is_old_ttvn;
  683. /* check if there is enough data before accessing it */
  684. if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
  685. return false;
  686. /* create a copy of the skb (in case of for re-routing) to modify it. */
  687. if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
  688. return false;
  689. unicast_packet = (struct batadv_unicast_packet *)skb->data;
  690. vid = batadv_get_vid(skb, hdr_len);
  691. ethhdr = (struct ethhdr *)(skb->data + hdr_len);
  692. /* do not reroute multicast frames in a unicast header */
  693. if (is_multicast_ether_addr(ethhdr->h_dest))
  694. return true;
  695. /* check if the destination client was served by this node and it is now
  696. * roaming. In this case, it means that the node has got a ROAM_ADV
  697. * message and that it knows the new destination in the mesh to re-route
  698. * the packet to
  699. */
  700. if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
  701. if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
  702. ethhdr->h_dest, vid))
  703. batadv_dbg_ratelimited(BATADV_DBG_TT,
  704. bat_priv,
  705. "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
  706. unicast_packet->dest,
  707. ethhdr->h_dest);
  708. /* at this point the mesh destination should have been
  709. * substituted with the originator address found in the global
  710. * table. If not, let the packet go untouched anyway because
  711. * there is nothing the node can do
  712. */
  713. return true;
  714. }
  715. /* retrieve the TTVN known by this node for the packet destination. This
  716. * value is used later to check if the node which sent (or re-routed
  717. * last time) the packet had an updated information or not
  718. */
  719. curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
  720. if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
  721. orig_node = batadv_orig_hash_find(bat_priv,
  722. unicast_packet->dest);
  723. /* if it is not possible to find the orig_node representing the
  724. * destination, the packet can immediately be dropped as it will
  725. * not be possible to deliver it
  726. */
  727. if (!orig_node)
  728. return false;
  729. curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
  730. batadv_orig_node_put(orig_node);
  731. }
  732. /* check if the TTVN contained in the packet is fresher than what the
  733. * node knows
  734. */
  735. is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
  736. if (!is_old_ttvn)
  737. return true;
  738. old_ttvn = unicast_packet->ttvn;
  739. /* the packet was forged based on outdated network information. Its
  740. * destination can possibly be updated and forwarded towards the new
  741. * target host
  742. */
  743. if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
  744. ethhdr->h_dest, vid)) {
  745. batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
  746. "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
  747. unicast_packet->dest, ethhdr->h_dest,
  748. old_ttvn, curr_ttvn);
  749. return true;
  750. }
  751. /* the packet has not been re-routed: either the destination is
  752. * currently served by this node or there is no destination at all and
  753. * it is possible to drop the packet
  754. */
  755. if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
  756. return false;
  757. /* update the header in order to let the packet be delivered to this
  758. * node's soft interface
  759. */
  760. primary_if = batadv_primary_if_get_selected(bat_priv);
  761. if (!primary_if)
  762. return false;
  763. /* update the packet header */
  764. skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
  765. ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
  766. unicast_packet->ttvn = curr_ttvn;
  767. skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
  768. batadv_hardif_put(primary_if);
  769. return true;
  770. }
  771. /**
  772. * batadv_recv_unhandled_unicast_packet() - receive and process packets which
  773. * are in the unicast number space but not yet known to the implementation
  774. * @skb: unicast tvlv packet to process
  775. * @recv_if: pointer to interface this packet was received on
  776. *
  777. * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
  778. * otherwise.
  779. */
  780. int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
  781. struct batadv_hard_iface *recv_if)
  782. {
  783. struct batadv_unicast_packet *unicast_packet;
  784. struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  785. int check, hdr_size = sizeof(*unicast_packet);
  786. check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
  787. if (check < 0)
  788. goto free_skb;
  789. /* we don't know about this type, drop it. */
  790. unicast_packet = (struct batadv_unicast_packet *)skb->data;
  791. if (batadv_is_my_mac(bat_priv, unicast_packet->dest))
  792. goto free_skb;
  793. return batadv_route_unicast_packet(skb, recv_if);
  794. free_skb:
  795. kfree_skb(skb);
  796. return NET_RX_DROP;
  797. }
  798. /**
  799. * batadv_recv_unicast_packet() - Process incoming unicast packet
  800. * @skb: incoming packet buffer
  801. * @recv_if: incoming hard interface
  802. *
  803. * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
  804. */
  805. int batadv_recv_unicast_packet(struct sk_buff *skb,
  806. struct batadv_hard_iface *recv_if)
  807. {
  808. struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  809. struct batadv_unicast_packet *unicast_packet;
  810. struct batadv_unicast_4addr_packet *unicast_4addr_packet;
  811. u8 *orig_addr, *orig_addr_gw;
  812. struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
  813. int check, hdr_size = sizeof(*unicast_packet);
  814. enum batadv_subtype subtype;
  815. int ret = NET_RX_DROP;
  816. bool is4addr, is_gw;
  817. unicast_packet = (struct batadv_unicast_packet *)skb->data;
  818. is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
  819. /* the caller function should have already pulled 2 bytes */
  820. if (is4addr)
  821. hdr_size = sizeof(*unicast_4addr_packet);
  822. /* function returns -EREMOTE for promiscuous packets */
  823. check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
  824. /* Even though the packet is not for us, we might save it to use for
  825. * decoding a later received coded packet
  826. */
  827. if (check == -EREMOTE)
  828. batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
  829. if (check < 0)
  830. goto free_skb;
  831. if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
  832. goto free_skb;
  833. unicast_packet = (struct batadv_unicast_packet *)skb->data;
  834. /* packet for me */
  835. if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
  836. /* If this is a unicast packet from another backgone gw,
  837. * drop it.
  838. */
  839. orig_addr_gw = eth_hdr(skb)->h_source;
  840. orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
  841. if (orig_node_gw) {
  842. is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
  843. hdr_size);
  844. batadv_orig_node_put(orig_node_gw);
  845. if (is_gw) {
  846. batadv_dbg(BATADV_DBG_BLA, bat_priv,
  847. "%s(): Dropped unicast pkt received from another backbone gw %pM.\n",
  848. __func__, orig_addr_gw);
  849. goto free_skb;
  850. }
  851. }
  852. if (is4addr) {
  853. unicast_4addr_packet =
  854. (struct batadv_unicast_4addr_packet *)skb->data;
  855. subtype = unicast_4addr_packet->subtype;
  856. batadv_dat_inc_counter(bat_priv, subtype);
  857. /* Only payload data should be considered for speedy
  858. * join. For example, DAT also uses unicast 4addr
  859. * types, but those packets should not be considered
  860. * for speedy join, since the clients do not actually
  861. * reside at the sending originator.
  862. */
  863. if (subtype == BATADV_P_DATA) {
  864. orig_addr = unicast_4addr_packet->src;
  865. orig_node = batadv_orig_hash_find(bat_priv,
  866. orig_addr);
  867. }
  868. }
  869. if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
  870. hdr_size))
  871. goto rx_success;
  872. if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb,
  873. hdr_size))
  874. goto rx_success;
  875. batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
  876. batadv_interface_rx(recv_if->soft_iface, skb, hdr_size,
  877. orig_node);
  878. rx_success:
  879. if (orig_node)
  880. batadv_orig_node_put(orig_node);
  881. return NET_RX_SUCCESS;
  882. }
  883. ret = batadv_route_unicast_packet(skb, recv_if);
  884. /* skb was consumed */
  885. skb = NULL;
  886. free_skb:
  887. kfree_skb(skb);
  888. return ret;
  889. }
  890. /**
  891. * batadv_recv_unicast_tvlv() - receive and process unicast tvlv packets
  892. * @skb: unicast tvlv packet to process
  893. * @recv_if: pointer to interface this packet was received on
  894. *
  895. * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
  896. * otherwise.
  897. */
  898. int batadv_recv_unicast_tvlv(struct sk_buff *skb,
  899. struct batadv_hard_iface *recv_if)
  900. {
  901. struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  902. struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
  903. unsigned char *tvlv_buff;
  904. u16 tvlv_buff_len;
  905. int hdr_size = sizeof(*unicast_tvlv_packet);
  906. int ret = NET_RX_DROP;
  907. if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
  908. goto free_skb;
  909. /* the header is likely to be modified while forwarding */
  910. if (skb_cow(skb, hdr_size) < 0)
  911. goto free_skb;
  912. /* packet needs to be linearized to access the tvlv content */
  913. if (skb_linearize(skb) < 0)
  914. goto free_skb;
  915. unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
  916. tvlv_buff = (unsigned char *)(skb->data + hdr_size);
  917. tvlv_buff_len = ntohs(unicast_tvlv_packet->tvlv_len);
  918. if (tvlv_buff_len > skb->len - hdr_size)
  919. goto free_skb;
  920. ret = batadv_tvlv_containers_process(bat_priv, false, NULL,
  921. unicast_tvlv_packet->src,
  922. unicast_tvlv_packet->dst,
  923. tvlv_buff, tvlv_buff_len);
  924. if (ret != NET_RX_SUCCESS) {
  925. ret = batadv_route_unicast_packet(skb, recv_if);
  926. /* skb was consumed */
  927. skb = NULL;
  928. }
  929. free_skb:
  930. kfree_skb(skb);
  931. return ret;
  932. }
  933. /**
  934. * batadv_recv_frag_packet() - process received fragment
  935. * @skb: the received fragment
  936. * @recv_if: interface that the skb is received on
  937. *
  938. * This function does one of the three following things: 1) Forward fragment, if
  939. * the assembled packet will exceed our MTU; 2) Buffer fragment, if we still
  940. * lack further fragments; 3) Merge fragments, if we have all needed parts.
  941. *
  942. * Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
  943. */
  944. int batadv_recv_frag_packet(struct sk_buff *skb,
  945. struct batadv_hard_iface *recv_if)
  946. {
  947. struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  948. struct batadv_orig_node *orig_node_src = NULL;
  949. struct batadv_frag_packet *frag_packet;
  950. int ret = NET_RX_DROP;
  951. if (batadv_check_unicast_packet(bat_priv, skb,
  952. sizeof(*frag_packet)) < 0)
  953. goto free_skb;
  954. frag_packet = (struct batadv_frag_packet *)skb->data;
  955. orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
  956. if (!orig_node_src)
  957. goto free_skb;
  958. skb->priority = frag_packet->priority + 256;
  959. /* Route the fragment if it is not for us and too big to be merged. */
  960. if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
  961. batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
  962. /* skb was consumed */
  963. skb = NULL;
  964. ret = NET_RX_SUCCESS;
  965. goto put_orig_node;
  966. }
  967. batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
  968. batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
  969. /* Add fragment to buffer and merge if possible. */
  970. if (!batadv_frag_skb_buffer(&skb, orig_node_src))
  971. goto put_orig_node;
  972. /* Deliver merged packet to the appropriate handler, if it was
  973. * merged
  974. */
  975. if (skb) {
  976. batadv_batman_skb_recv(skb, recv_if->net_dev,
  977. &recv_if->batman_adv_ptype, NULL);
  978. /* skb was consumed */
  979. skb = NULL;
  980. }
  981. ret = NET_RX_SUCCESS;
  982. put_orig_node:
  983. batadv_orig_node_put(orig_node_src);
  984. free_skb:
  985. kfree_skb(skb);
  986. return ret;
  987. }
  988. /**
  989. * batadv_recv_bcast_packet() - Process incoming broadcast packet
  990. * @skb: incoming packet buffer
  991. * @recv_if: incoming hard interface
  992. *
  993. * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
  994. */
  995. int batadv_recv_bcast_packet(struct sk_buff *skb,
  996. struct batadv_hard_iface *recv_if)
  997. {
  998. struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  999. struct batadv_orig_node *orig_node = NULL;
  1000. struct batadv_bcast_packet *bcast_packet;
  1001. struct ethhdr *ethhdr;
  1002. int hdr_size = sizeof(*bcast_packet);
  1003. int ret = NET_RX_DROP;
  1004. s32 seq_diff;
  1005. u32 seqno;
  1006. /* drop packet if it has not necessary minimum size */
  1007. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1008. goto free_skb;
  1009. ethhdr = eth_hdr(skb);
  1010. /* packet with broadcast indication but unicast recipient */
  1011. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1012. goto free_skb;
  1013. /* packet with broadcast/multicast sender address */
  1014. if (is_multicast_ether_addr(ethhdr->h_source))
  1015. goto free_skb;
  1016. /* ignore broadcasts sent by myself */
  1017. if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
  1018. goto free_skb;
  1019. bcast_packet = (struct batadv_bcast_packet *)skb->data;
  1020. /* ignore broadcasts originated by myself */
  1021. if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
  1022. goto free_skb;
  1023. if (bcast_packet->ttl < 2)
  1024. goto free_skb;
  1025. orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
  1026. if (!orig_node)
  1027. goto free_skb;
  1028. spin_lock_bh(&orig_node->bcast_seqno_lock);
  1029. seqno = ntohl(bcast_packet->seqno);
  1030. /* check whether the packet is a duplicate */
  1031. if (batadv_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  1032. seqno))
  1033. goto spin_unlock;
  1034. seq_diff = seqno - orig_node->last_bcast_seqno;
  1035. /* check whether the packet is old and the host just restarted. */
  1036. if (batadv_window_protected(bat_priv, seq_diff,
  1037. BATADV_BCAST_MAX_AGE,
  1038. &orig_node->bcast_seqno_reset, NULL))
  1039. goto spin_unlock;
  1040. /* mark broadcast in flood history, update window position
  1041. * if required.
  1042. */
  1043. if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1044. orig_node->last_bcast_seqno = seqno;
  1045. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1046. /* check whether this has been sent by another originator before */
  1047. if (batadv_bla_check_bcast_duplist(bat_priv, skb))
  1048. goto free_skb;
  1049. batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet));
  1050. /* rebroadcast packet */
  1051. batadv_add_bcast_packet_to_list(bat_priv, skb, 1, false);
  1052. /* don't hand the broadcast up if it is from an originator
  1053. * from the same backbone.
  1054. */
  1055. if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
  1056. goto free_skb;
  1057. if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size))
  1058. goto rx_success;
  1059. if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size))
  1060. goto rx_success;
  1061. batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
  1062. /* broadcast for me */
  1063. batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node);
  1064. rx_success:
  1065. ret = NET_RX_SUCCESS;
  1066. goto out;
  1067. spin_unlock:
  1068. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1069. free_skb:
  1070. kfree_skb(skb);
  1071. out:
  1072. if (orig_node)
  1073. batadv_orig_node_put(orig_node);
  1074. return ret;
  1075. }