ipvlan_core.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
  3. */
  4. #include "ipvlan.h"
  5. static u32 ipvlan_jhash_secret __read_mostly;
  6. void ipvlan_init_secret(void)
  7. {
  8. net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
  9. }
  10. void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
  11. unsigned int len, bool success, bool mcast)
  12. {
  13. if (likely(success)) {
  14. struct ipvl_pcpu_stats *pcptr;
  15. pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
  16. u64_stats_update_begin(&pcptr->syncp);
  17. pcptr->rx_pkts++;
  18. pcptr->rx_bytes += len;
  19. if (mcast)
  20. pcptr->rx_mcast++;
  21. u64_stats_update_end(&pcptr->syncp);
  22. } else {
  23. this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
  24. }
  25. }
  26. EXPORT_SYMBOL_GPL(ipvlan_count_rx);
  27. #if IS_ENABLED(CONFIG_IPV6)
  28. static u8 ipvlan_get_v6_hash(const void *iaddr)
  29. {
  30. const struct in6_addr *ip6_addr = iaddr;
  31. return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
  32. IPVLAN_HASH_MASK;
  33. }
  34. #else
  35. static u8 ipvlan_get_v6_hash(const void *iaddr)
  36. {
  37. return 0;
  38. }
  39. #endif
  40. static u8 ipvlan_get_v4_hash(const void *iaddr)
  41. {
  42. const struct in_addr *ip4_addr = iaddr;
  43. return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
  44. IPVLAN_HASH_MASK;
  45. }
  46. static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
  47. {
  48. if (!is_v6 && addr->atype == IPVL_IPV4) {
  49. struct in_addr *i4addr = (struct in_addr *)iaddr;
  50. return addr->ip4addr.s_addr == i4addr->s_addr;
  51. #if IS_ENABLED(CONFIG_IPV6)
  52. } else if (is_v6 && addr->atype == IPVL_IPV6) {
  53. struct in6_addr *i6addr = (struct in6_addr *)iaddr;
  54. return ipv6_addr_equal(&addr->ip6addr, i6addr);
  55. #endif
  56. }
  57. return false;
  58. }
  59. static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
  60. const void *iaddr, bool is_v6)
  61. {
  62. struct ipvl_addr *addr;
  63. u8 hash;
  64. hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
  65. ipvlan_get_v4_hash(iaddr);
  66. hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
  67. if (addr_equal(is_v6, addr, iaddr))
  68. return addr;
  69. return NULL;
  70. }
  71. void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
  72. {
  73. struct ipvl_port *port = ipvlan->port;
  74. u8 hash;
  75. hash = (addr->atype == IPVL_IPV6) ?
  76. ipvlan_get_v6_hash(&addr->ip6addr) :
  77. ipvlan_get_v4_hash(&addr->ip4addr);
  78. if (hlist_unhashed(&addr->hlnode))
  79. hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
  80. }
  81. void ipvlan_ht_addr_del(struct ipvl_addr *addr)
  82. {
  83. hlist_del_init_rcu(&addr->hlnode);
  84. }
  85. struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
  86. const void *iaddr, bool is_v6)
  87. {
  88. struct ipvl_addr *addr, *ret = NULL;
  89. rcu_read_lock();
  90. list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
  91. if (addr_equal(is_v6, addr, iaddr)) {
  92. ret = addr;
  93. break;
  94. }
  95. }
  96. rcu_read_unlock();
  97. return ret;
  98. }
  99. bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
  100. {
  101. struct ipvl_dev *ipvlan;
  102. bool ret = false;
  103. rcu_read_lock();
  104. list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
  105. if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
  106. ret = true;
  107. break;
  108. }
  109. }
  110. rcu_read_unlock();
  111. return ret;
  112. }
  113. void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
  114. {
  115. void *lyr3h = NULL;
  116. switch (skb->protocol) {
  117. case htons(ETH_P_ARP): {
  118. struct arphdr *arph;
  119. if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
  120. return NULL;
  121. arph = arp_hdr(skb);
  122. *type = IPVL_ARP;
  123. lyr3h = arph;
  124. break;
  125. }
  126. case htons(ETH_P_IP): {
  127. u32 pktlen;
  128. struct iphdr *ip4h;
  129. if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
  130. return NULL;
  131. ip4h = ip_hdr(skb);
  132. pktlen = ntohs(ip4h->tot_len);
  133. if (ip4h->ihl < 5 || ip4h->version != 4)
  134. return NULL;
  135. if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
  136. return NULL;
  137. *type = IPVL_IPV4;
  138. lyr3h = ip4h;
  139. break;
  140. }
  141. #if IS_ENABLED(CONFIG_IPV6)
  142. case htons(ETH_P_IPV6): {
  143. struct ipv6hdr *ip6h;
  144. if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
  145. return NULL;
  146. ip6h = ipv6_hdr(skb);
  147. if (ip6h->version != 6)
  148. return NULL;
  149. *type = IPVL_IPV6;
  150. lyr3h = ip6h;
  151. /* Only Neighbour Solicitation pkts need different treatment */
  152. if (ipv6_addr_any(&ip6h->saddr) &&
  153. ip6h->nexthdr == NEXTHDR_ICMP) {
  154. struct icmp6hdr *icmph;
  155. if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
  156. return NULL;
  157. ip6h = ipv6_hdr(skb);
  158. icmph = (struct icmp6hdr *)(ip6h + 1);
  159. if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
  160. /* Need to access the ipv6 address in body */
  161. if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
  162. + sizeof(struct in6_addr))))
  163. return NULL;
  164. ip6h = ipv6_hdr(skb);
  165. icmph = (struct icmp6hdr *)(ip6h + 1);
  166. }
  167. *type = IPVL_ICMPV6;
  168. lyr3h = icmph;
  169. }
  170. break;
  171. }
  172. #endif
  173. default:
  174. return NULL;
  175. }
  176. return lyr3h;
  177. }
  178. unsigned int ipvlan_mac_hash(const unsigned char *addr)
  179. {
  180. u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
  181. ipvlan_jhash_secret);
  182. return hash & IPVLAN_MAC_FILTER_MASK;
  183. }
  184. void ipvlan_process_multicast(struct work_struct *work)
  185. {
  186. struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
  187. struct ethhdr *ethh;
  188. struct ipvl_dev *ipvlan;
  189. struct sk_buff *skb, *nskb;
  190. struct sk_buff_head list;
  191. unsigned int len;
  192. unsigned int mac_hash;
  193. int ret;
  194. u8 pkt_type;
  195. bool tx_pkt;
  196. __skb_queue_head_init(&list);
  197. spin_lock_bh(&port->backlog.lock);
  198. skb_queue_splice_tail_init(&port->backlog, &list);
  199. spin_unlock_bh(&port->backlog.lock);
  200. while ((skb = __skb_dequeue(&list)) != NULL) {
  201. struct net_device *dev = skb->dev;
  202. bool consumed = false;
  203. ethh = eth_hdr(skb);
  204. tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
  205. mac_hash = ipvlan_mac_hash(ethh->h_dest);
  206. if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
  207. pkt_type = PACKET_BROADCAST;
  208. else
  209. pkt_type = PACKET_MULTICAST;
  210. rcu_read_lock();
  211. list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
  212. if (tx_pkt && (ipvlan->dev == skb->dev))
  213. continue;
  214. if (!test_bit(mac_hash, ipvlan->mac_filters))
  215. continue;
  216. if (!(ipvlan->dev->flags & IFF_UP))
  217. continue;
  218. ret = NET_RX_DROP;
  219. len = skb->len + ETH_HLEN;
  220. nskb = skb_clone(skb, GFP_ATOMIC);
  221. local_bh_disable();
  222. if (nskb) {
  223. consumed = true;
  224. nskb->pkt_type = pkt_type;
  225. nskb->dev = ipvlan->dev;
  226. if (tx_pkt)
  227. ret = dev_forward_skb(ipvlan->dev, nskb);
  228. else
  229. ret = netif_rx(nskb);
  230. }
  231. ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
  232. local_bh_enable();
  233. }
  234. rcu_read_unlock();
  235. if (tx_pkt) {
  236. /* If the packet originated here, send it out. */
  237. skb->dev = port->dev;
  238. skb->pkt_type = pkt_type;
  239. dev_queue_xmit(skb);
  240. } else {
  241. if (consumed)
  242. consume_skb(skb);
  243. else
  244. kfree_skb(skb);
  245. }
  246. if (dev)
  247. dev_put(dev);
  248. cond_resched();
  249. }
  250. }
  251. static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
  252. {
  253. bool xnet = true;
  254. if (dev)
  255. xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
  256. skb_scrub_packet(skb, xnet);
  257. if (dev)
  258. skb->dev = dev;
  259. }
  260. static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
  261. bool local)
  262. {
  263. struct ipvl_dev *ipvlan = addr->master;
  264. struct net_device *dev = ipvlan->dev;
  265. unsigned int len;
  266. rx_handler_result_t ret = RX_HANDLER_CONSUMED;
  267. bool success = false;
  268. struct sk_buff *skb = *pskb;
  269. len = skb->len + ETH_HLEN;
  270. /* Only packets exchanged between two local slaves need to have
  271. * device-up check as well as skb-share check.
  272. */
  273. if (local) {
  274. if (unlikely(!(dev->flags & IFF_UP))) {
  275. kfree_skb(skb);
  276. goto out;
  277. }
  278. skb = skb_share_check(skb, GFP_ATOMIC);
  279. if (!skb)
  280. goto out;
  281. *pskb = skb;
  282. }
  283. if (local) {
  284. skb->pkt_type = PACKET_HOST;
  285. if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
  286. success = true;
  287. } else {
  288. skb->dev = dev;
  289. ret = RX_HANDLER_ANOTHER;
  290. success = true;
  291. }
  292. out:
  293. ipvlan_count_rx(ipvlan, len, success, false);
  294. return ret;
  295. }
  296. struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
  297. int addr_type, bool use_dest)
  298. {
  299. struct ipvl_addr *addr = NULL;
  300. switch (addr_type) {
  301. #if IS_ENABLED(CONFIG_IPV6)
  302. case IPVL_IPV6: {
  303. struct ipv6hdr *ip6h;
  304. struct in6_addr *i6addr;
  305. ip6h = (struct ipv6hdr *)lyr3h;
  306. i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
  307. addr = ipvlan_ht_addr_lookup(port, i6addr, true);
  308. break;
  309. }
  310. case IPVL_ICMPV6: {
  311. struct nd_msg *ndmh;
  312. struct in6_addr *i6addr;
  313. /* Make sure that the NeighborSolicitation ICMPv6 packets
  314. * are handled to avoid DAD issue.
  315. */
  316. ndmh = (struct nd_msg *)lyr3h;
  317. if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
  318. i6addr = &ndmh->target;
  319. addr = ipvlan_ht_addr_lookup(port, i6addr, true);
  320. }
  321. break;
  322. }
  323. #endif
  324. case IPVL_IPV4: {
  325. struct iphdr *ip4h;
  326. __be32 *i4addr;
  327. ip4h = (struct iphdr *)lyr3h;
  328. i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
  329. addr = ipvlan_ht_addr_lookup(port, i4addr, false);
  330. break;
  331. }
  332. case IPVL_ARP: {
  333. struct arphdr *arph;
  334. unsigned char *arp_ptr;
  335. __be32 dip;
  336. arph = (struct arphdr *)lyr3h;
  337. arp_ptr = (unsigned char *)(arph + 1);
  338. if (use_dest)
  339. arp_ptr += (2 * port->dev->addr_len) + 4;
  340. else
  341. arp_ptr += port->dev->addr_len;
  342. memcpy(&dip, arp_ptr, 4);
  343. addr = ipvlan_ht_addr_lookup(port, &dip, false);
  344. break;
  345. }
  346. }
  347. return addr;
  348. }
  349. static int ipvlan_process_v4_outbound(struct sk_buff *skb)
  350. {
  351. const struct iphdr *ip4h = ip_hdr(skb);
  352. struct net_device *dev = skb->dev;
  353. struct net *net = dev_net(dev);
  354. struct rtable *rt;
  355. int err, ret = NET_XMIT_DROP;
  356. struct flowi4 fl4 = {
  357. .flowi4_oif = dev->ifindex,
  358. .flowi4_tos = RT_TOS(ip4h->tos),
  359. .flowi4_flags = FLOWI_FLAG_ANYSRC,
  360. .flowi4_mark = skb->mark,
  361. .daddr = ip4h->daddr,
  362. .saddr = ip4h->saddr,
  363. };
  364. rt = ip_route_output_flow(net, &fl4, NULL);
  365. if (IS_ERR(rt))
  366. goto err;
  367. if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
  368. ip_rt_put(rt);
  369. goto err;
  370. }
  371. skb_dst_set(skb, &rt->dst);
  372. err = ip_local_out(net, skb->sk, skb);
  373. if (unlikely(net_xmit_eval(err)))
  374. dev->stats.tx_errors++;
  375. else
  376. ret = NET_XMIT_SUCCESS;
  377. goto out;
  378. err:
  379. dev->stats.tx_errors++;
  380. kfree_skb(skb);
  381. out:
  382. return ret;
  383. }
  384. #if IS_ENABLED(CONFIG_IPV6)
  385. static int ipvlan_process_v6_outbound(struct sk_buff *skb)
  386. {
  387. const struct ipv6hdr *ip6h = ipv6_hdr(skb);
  388. struct net_device *dev = skb->dev;
  389. struct net *net = dev_net(dev);
  390. struct dst_entry *dst;
  391. int err, ret = NET_XMIT_DROP;
  392. struct flowi6 fl6 = {
  393. .flowi6_oif = dev->ifindex,
  394. .daddr = ip6h->daddr,
  395. .saddr = ip6h->saddr,
  396. .flowi6_flags = FLOWI_FLAG_ANYSRC,
  397. .flowlabel = ip6_flowinfo(ip6h),
  398. .flowi6_mark = skb->mark,
  399. .flowi6_proto = ip6h->nexthdr,
  400. };
  401. dst = ip6_route_output(net, NULL, &fl6);
  402. if (dst->error) {
  403. ret = dst->error;
  404. dst_release(dst);
  405. goto err;
  406. }
  407. skb_dst_set(skb, dst);
  408. err = ip6_local_out(net, skb->sk, skb);
  409. if (unlikely(net_xmit_eval(err)))
  410. dev->stats.tx_errors++;
  411. else
  412. ret = NET_XMIT_SUCCESS;
  413. goto out;
  414. err:
  415. dev->stats.tx_errors++;
  416. kfree_skb(skb);
  417. out:
  418. return ret;
  419. }
  420. #else
  421. static int ipvlan_process_v6_outbound(struct sk_buff *skb)
  422. {
  423. return NET_XMIT_DROP;
  424. }
  425. #endif
  426. static int ipvlan_process_outbound(struct sk_buff *skb)
  427. {
  428. struct ethhdr *ethh = eth_hdr(skb);
  429. int ret = NET_XMIT_DROP;
  430. /* The ipvlan is a pseudo-L2 device, so the packets that we receive
  431. * will have L2; which need to discarded and processed further
  432. * in the net-ns of the main-device.
  433. */
  434. if (skb_mac_header_was_set(skb)) {
  435. /* In this mode we dont care about
  436. * multicast and broadcast traffic */
  437. if (is_multicast_ether_addr(ethh->h_dest)) {
  438. pr_debug_ratelimited(
  439. "Dropped {multi|broad}cast of type=[%x]\n",
  440. ntohs(skb->protocol));
  441. kfree_skb(skb);
  442. goto out;
  443. }
  444. skb_pull(skb, sizeof(*ethh));
  445. skb->mac_header = (typeof(skb->mac_header))~0U;
  446. skb_reset_network_header(skb);
  447. }
  448. if (skb->protocol == htons(ETH_P_IPV6))
  449. ret = ipvlan_process_v6_outbound(skb);
  450. else if (skb->protocol == htons(ETH_P_IP))
  451. ret = ipvlan_process_v4_outbound(skb);
  452. else {
  453. pr_warn_ratelimited("Dropped outbound packet type=%x\n",
  454. ntohs(skb->protocol));
  455. kfree_skb(skb);
  456. }
  457. out:
  458. return ret;
  459. }
  460. static void ipvlan_multicast_enqueue(struct ipvl_port *port,
  461. struct sk_buff *skb, bool tx_pkt)
  462. {
  463. if (skb->protocol == htons(ETH_P_PAUSE)) {
  464. kfree_skb(skb);
  465. return;
  466. }
  467. /* Record that the deferred packet is from TX or RX path. By
  468. * looking at mac-addresses on packet will lead to erronus decisions.
  469. * (This would be true for a loopback-mode on master device or a
  470. * hair-pin mode of the switch.)
  471. */
  472. IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
  473. spin_lock(&port->backlog.lock);
  474. if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
  475. if (skb->dev)
  476. dev_hold(skb->dev);
  477. __skb_queue_tail(&port->backlog, skb);
  478. spin_unlock(&port->backlog.lock);
  479. schedule_work(&port->wq);
  480. } else {
  481. spin_unlock(&port->backlog.lock);
  482. atomic_long_inc(&skb->dev->rx_dropped);
  483. kfree_skb(skb);
  484. }
  485. }
  486. static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
  487. {
  488. const struct ipvl_dev *ipvlan = netdev_priv(dev);
  489. void *lyr3h;
  490. struct ipvl_addr *addr;
  491. int addr_type;
  492. lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
  493. if (!lyr3h)
  494. goto out;
  495. if (!ipvlan_is_vepa(ipvlan->port)) {
  496. addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
  497. if (addr) {
  498. if (ipvlan_is_private(ipvlan->port)) {
  499. consume_skb(skb);
  500. return NET_XMIT_DROP;
  501. }
  502. return ipvlan_rcv_frame(addr, &skb, true);
  503. }
  504. }
  505. out:
  506. ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
  507. return ipvlan_process_outbound(skb);
  508. }
  509. static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
  510. {
  511. const struct ipvl_dev *ipvlan = netdev_priv(dev);
  512. struct ethhdr *eth = eth_hdr(skb);
  513. struct ipvl_addr *addr;
  514. void *lyr3h;
  515. int addr_type;
  516. if (!ipvlan_is_vepa(ipvlan->port) &&
  517. ether_addr_equal(eth->h_dest, eth->h_source)) {
  518. lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
  519. if (lyr3h) {
  520. addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
  521. if (addr) {
  522. if (ipvlan_is_private(ipvlan->port)) {
  523. consume_skb(skb);
  524. return NET_XMIT_DROP;
  525. }
  526. return ipvlan_rcv_frame(addr, &skb, true);
  527. }
  528. }
  529. skb = skb_share_check(skb, GFP_ATOMIC);
  530. if (!skb)
  531. return NET_XMIT_DROP;
  532. /* Packet definitely does not belong to any of the
  533. * virtual devices, but the dest is local. So forward
  534. * the skb for the main-dev. At the RX side we just return
  535. * RX_PASS for it to be processed further on the stack.
  536. */
  537. return dev_forward_skb(ipvlan->phy_dev, skb);
  538. } else if (is_multicast_ether_addr(eth->h_dest)) {
  539. ipvlan_skb_crossing_ns(skb, NULL);
  540. ipvlan_multicast_enqueue(ipvlan->port, skb, true);
  541. return NET_XMIT_SUCCESS;
  542. }
  543. skb->dev = ipvlan->phy_dev;
  544. return dev_queue_xmit(skb);
  545. }
  546. int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
  547. {
  548. struct ipvl_dev *ipvlan = netdev_priv(dev);
  549. struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
  550. if (!port)
  551. goto out;
  552. if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
  553. goto out;
  554. switch(port->mode) {
  555. case IPVLAN_MODE_L2:
  556. return ipvlan_xmit_mode_l2(skb, dev);
  557. case IPVLAN_MODE_L3:
  558. #ifdef CONFIG_IPVLAN_L3S
  559. case IPVLAN_MODE_L3S:
  560. #endif
  561. return ipvlan_xmit_mode_l3(skb, dev);
  562. }
  563. /* Should not reach here */
  564. WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
  565. port->mode);
  566. out:
  567. kfree_skb(skb);
  568. return NET_XMIT_DROP;
  569. }
  570. static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
  571. {
  572. struct ethhdr *eth = eth_hdr(skb);
  573. struct ipvl_addr *addr;
  574. void *lyr3h;
  575. int addr_type;
  576. if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
  577. lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
  578. if (!lyr3h)
  579. return true;
  580. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
  581. if (addr)
  582. return false;
  583. }
  584. return true;
  585. }
  586. static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
  587. struct ipvl_port *port)
  588. {
  589. void *lyr3h;
  590. int addr_type;
  591. struct ipvl_addr *addr;
  592. struct sk_buff *skb = *pskb;
  593. rx_handler_result_t ret = RX_HANDLER_PASS;
  594. lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
  595. if (!lyr3h)
  596. goto out;
  597. addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
  598. if (addr)
  599. ret = ipvlan_rcv_frame(addr, pskb, false);
  600. out:
  601. return ret;
  602. }
  603. static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
  604. struct ipvl_port *port)
  605. {
  606. struct sk_buff *skb = *pskb;
  607. struct ethhdr *eth = eth_hdr(skb);
  608. rx_handler_result_t ret = RX_HANDLER_PASS;
  609. if (is_multicast_ether_addr(eth->h_dest)) {
  610. if (ipvlan_external_frame(skb, port)) {
  611. struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
  612. /* External frames are queued for device local
  613. * distribution, but a copy is given to master
  614. * straight away to avoid sending duplicates later
  615. * when work-queue processes this frame. This is
  616. * achieved by returning RX_HANDLER_PASS.
  617. */
  618. if (nskb) {
  619. ipvlan_skb_crossing_ns(nskb, NULL);
  620. ipvlan_multicast_enqueue(port, nskb, false);
  621. }
  622. }
  623. } else {
  624. /* Perform like l3 mode for non-multicast packet */
  625. ret = ipvlan_handle_mode_l3(pskb, port);
  626. }
  627. return ret;
  628. }
  629. rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
  630. {
  631. struct sk_buff *skb = *pskb;
  632. struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
  633. if (!port)
  634. return RX_HANDLER_PASS;
  635. switch (port->mode) {
  636. case IPVLAN_MODE_L2:
  637. return ipvlan_handle_mode_l2(pskb, port);
  638. case IPVLAN_MODE_L3:
  639. return ipvlan_handle_mode_l3(pskb, port);
  640. #ifdef CONFIG_IPVLAN_L3S
  641. case IPVLAN_MODE_L3S:
  642. return RX_HANDLER_PASS;
  643. #endif
  644. }
  645. /* Should not reach here */
  646. WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
  647. port->mode);
  648. kfree_skb(skb);
  649. return RX_HANDLER_CONSUMED;
  650. }