receive.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  4. */
  5. #include "queueing.h"
  6. #include "device.h"
  7. #include "peer.h"
  8. #include "timers.h"
  9. #include "messages.h"
  10. #include "cookie.h"
  11. #include "socket.h"
  12. #include <linux/ip.h>
  13. #include <linux/ipv6.h>
  14. #include <linux/udp.h>
  15. #include <net/ip_tunnels.h>
  16. /* Must be called with bh disabled. */
  17. static void update_rx_stats(struct wg_peer *peer, size_t len)
  18. {
  19. struct pcpu_sw_netstats *tstats =
  20. get_cpu_ptr(peer->device->dev->tstats);
  21. u64_stats_update_begin(&tstats->syncp);
  22. ++tstats->rx_packets;
  23. tstats->rx_bytes += len;
  24. peer->rx_bytes += len;
  25. u64_stats_update_end(&tstats->syncp);
  26. put_cpu_ptr(tstats);
  27. }
  28. #define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)
  29. static size_t validate_header_len(struct sk_buff *skb)
  30. {
  31. if (unlikely(skb->len < sizeof(struct message_header)))
  32. return 0;
  33. if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) &&
  34. skb->len >= MESSAGE_MINIMUM_LENGTH)
  35. return sizeof(struct message_data);
  36. if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) &&
  37. skb->len == sizeof(struct message_handshake_initiation))
  38. return sizeof(struct message_handshake_initiation);
  39. if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) &&
  40. skb->len == sizeof(struct message_handshake_response))
  41. return sizeof(struct message_handshake_response);
  42. if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) &&
  43. skb->len == sizeof(struct message_handshake_cookie))
  44. return sizeof(struct message_handshake_cookie);
  45. return 0;
  46. }
  47. static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
  48. {
  49. size_t data_offset, data_len, header_len;
  50. struct udphdr *udp;
  51. if (unlikely(!wg_check_packet_protocol(skb) ||
  52. skb_transport_header(skb) < skb->head ||
  53. (skb_transport_header(skb) + sizeof(struct udphdr)) >
  54. skb_tail_pointer(skb)))
  55. return -EINVAL; /* Bogus IP header */
  56. udp = udp_hdr(skb);
  57. data_offset = (u8 *)udp - skb->data;
  58. if (unlikely(data_offset > U16_MAX ||
  59. data_offset + sizeof(struct udphdr) > skb->len))
  60. /* Packet has offset at impossible location or isn't big enough
  61. * to have UDP fields.
  62. */
  63. return -EINVAL;
  64. data_len = ntohs(udp->len);
  65. if (unlikely(data_len < sizeof(struct udphdr) ||
  66. data_len > skb->len - data_offset))
  67. /* UDP packet is reporting too small of a size or lying about
  68. * its size.
  69. */
  70. return -EINVAL;
  71. data_len -= sizeof(struct udphdr);
  72. data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data;
  73. if (unlikely(!pskb_may_pull(skb,
  74. data_offset + sizeof(struct message_header)) ||
  75. pskb_trim(skb, data_len + data_offset) < 0))
  76. return -EINVAL;
  77. skb_pull(skb, data_offset);
  78. if (unlikely(skb->len != data_len))
  79. /* Final len does not agree with calculated len */
  80. return -EINVAL;
  81. header_len = validate_header_len(skb);
  82. if (unlikely(!header_len))
  83. return -EINVAL;
  84. __skb_push(skb, data_offset);
  85. if (unlikely(!pskb_may_pull(skb, data_offset + header_len)))
  86. return -EINVAL;
  87. __skb_pull(skb, data_offset);
  88. return 0;
  89. }
  90. static void wg_receive_handshake_packet(struct wg_device *wg,
  91. struct sk_buff *skb)
  92. {
  93. enum cookie_mac_state mac_state;
  94. struct wg_peer *peer = NULL;
  95. /* This is global, so that our load calculation applies to the whole
  96. * system. We don't care about races with it at all.
  97. */
  98. static u64 last_under_load;
  99. bool packet_needs_cookie;
  100. bool under_load;
  101. if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) {
  102. net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n",
  103. wg->dev->name, skb);
  104. wg_cookie_message_consume(
  105. (struct message_handshake_cookie *)skb->data, wg);
  106. return;
  107. }
  108. under_load = atomic_read(&wg->handshake_queue_len) >=
  109. MAX_QUEUED_INCOMING_HANDSHAKES / 8;
  110. if (under_load) {
  111. last_under_load = ktime_get_coarse_boottime_ns();
  112. } else if (last_under_load) {
  113. under_load = !wg_birthdate_has_expired(last_under_load, 1);
  114. if (!under_load)
  115. last_under_load = 0;
  116. }
  117. mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
  118. under_load);
  119. if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
  120. (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) {
  121. packet_needs_cookie = false;
  122. } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) {
  123. packet_needs_cookie = true;
  124. } else {
  125. net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n",
  126. wg->dev->name, skb);
  127. return;
  128. }
  129. switch (SKB_TYPE_LE32(skb)) {
  130. case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): {
  131. struct message_handshake_initiation *message =
  132. (struct message_handshake_initiation *)skb->data;
  133. if (packet_needs_cookie) {
  134. wg_packet_send_handshake_cookie(wg, skb,
  135. message->sender_index);
  136. return;
  137. }
  138. peer = wg_noise_handshake_consume_initiation(message, wg);
  139. if (unlikely(!peer)) {
  140. net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n",
  141. wg->dev->name, skb);
  142. return;
  143. }
  144. wg_socket_set_peer_endpoint_from_skb(peer, skb);
  145. net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n",
  146. wg->dev->name, peer->internal_id,
  147. &peer->endpoint.addr);
  148. wg_packet_send_handshake_response(peer);
  149. break;
  150. }
  151. case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): {
  152. struct message_handshake_response *message =
  153. (struct message_handshake_response *)skb->data;
  154. if (packet_needs_cookie) {
  155. wg_packet_send_handshake_cookie(wg, skb,
  156. message->sender_index);
  157. return;
  158. }
  159. peer = wg_noise_handshake_consume_response(message, wg);
  160. if (unlikely(!peer)) {
  161. net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n",
  162. wg->dev->name, skb);
  163. return;
  164. }
  165. wg_socket_set_peer_endpoint_from_skb(peer, skb);
  166. net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n",
  167. wg->dev->name, peer->internal_id,
  168. &peer->endpoint.addr);
  169. if (wg_noise_handshake_begin_session(&peer->handshake,
  170. &peer->keypairs)) {
  171. wg_timers_session_derived(peer);
  172. wg_timers_handshake_complete(peer);
  173. /* Calling this function will either send any existing
  174. * packets in the queue and not send a keepalive, which
  175. * is the best case, Or, if there's nothing in the
  176. * queue, it will send a keepalive, in order to give
  177. * immediate confirmation of the session.
  178. */
  179. wg_packet_send_keepalive(peer);
  180. }
  181. break;
  182. }
  183. }
  184. if (unlikely(!peer)) {
  185. WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n");
  186. return;
  187. }
  188. local_bh_disable();
  189. update_rx_stats(peer, skb->len);
  190. local_bh_enable();
  191. wg_timers_any_authenticated_packet_received(peer);
  192. wg_timers_any_authenticated_packet_traversal(peer);
  193. wg_peer_put(peer);
  194. }
  195. void wg_packet_handshake_receive_worker(struct work_struct *work)
  196. {
  197. struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
  198. struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
  199. struct sk_buff *skb;
  200. while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
  201. wg_receive_handshake_packet(wg, skb);
  202. dev_kfree_skb(skb);
  203. atomic_dec(&wg->handshake_queue_len);
  204. cond_resched();
  205. }
  206. }
  207. static void keep_key_fresh(struct wg_peer *peer)
  208. {
  209. struct noise_keypair *keypair;
  210. bool send;
  211. if (peer->sent_lastminute_handshake)
  212. return;
  213. rcu_read_lock_bh();
  214. keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
  215. send = keypair && READ_ONCE(keypair->sending.is_valid) &&
  216. keypair->i_am_the_initiator &&
  217. wg_birthdate_has_expired(keypair->sending.birthdate,
  218. REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT);
  219. rcu_read_unlock_bh();
  220. if (unlikely(send)) {
  221. peer->sent_lastminute_handshake = true;
  222. wg_packet_send_queued_handshake_initiation(peer, false);
  223. }
  224. }
  225. static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
  226. {
  227. struct scatterlist sg[MAX_SKB_FRAGS + 8];
  228. struct sk_buff *trailer;
  229. unsigned int offset;
  230. int num_frags;
  231. if (unlikely(!keypair))
  232. return false;
  233. if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
  234. wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
  235. keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
  236. WRITE_ONCE(keypair->receiving.is_valid, false);
  237. return false;
  238. }
  239. PACKET_CB(skb)->nonce =
  240. le64_to_cpu(((struct message_data *)skb->data)->counter);
  241. /* We ensure that the network header is part of the packet before we
  242. * call skb_cow_data, so that there's no chance that data is removed
  243. * from the skb, so that later we can extract the original endpoint.
  244. */
  245. offset = skb->data - skb_network_header(skb);
  246. skb_push(skb, offset);
  247. num_frags = skb_cow_data(skb, 0, &trailer);
  248. offset += sizeof(struct message_data);
  249. skb_pull(skb, offset);
  250. if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
  251. return false;
  252. sg_init_table(sg, num_frags);
  253. if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0)
  254. return false;
  255. if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
  256. PACKET_CB(skb)->nonce,
  257. keypair->receiving.key))
  258. return false;
  259. /* Another ugly situation of pushing and pulling the header so as to
  260. * keep endpoint information intact.
  261. */
  262. skb_push(skb, offset);
  263. if (pskb_trim(skb, skb->len - noise_encrypted_len(0)))
  264. return false;
  265. skb_pull(skb, offset);
  266. return true;
  267. }
  268. /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
  269. static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter)
  270. {
  271. unsigned long index, index_current, top, i;
  272. bool ret = false;
  273. spin_lock_bh(&counter->lock);
  274. if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 ||
  275. their_counter >= REJECT_AFTER_MESSAGES))
  276. goto out;
  277. ++their_counter;
  278. if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
  279. counter->counter))
  280. goto out;
  281. index = their_counter >> ilog2(BITS_PER_LONG);
  282. if (likely(their_counter > counter->counter)) {
  283. index_current = counter->counter >> ilog2(BITS_PER_LONG);
  284. top = min_t(unsigned long, index - index_current,
  285. COUNTER_BITS_TOTAL / BITS_PER_LONG);
  286. for (i = 1; i <= top; ++i)
  287. counter->backtrack[(i + index_current) &
  288. ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
  289. counter->counter = their_counter;
  290. }
  291. index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
  292. ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
  293. &counter->backtrack[index]);
  294. out:
  295. spin_unlock_bh(&counter->lock);
  296. return ret;
  297. }
  298. #include "selftest/counter.c"
  299. static void wg_packet_consume_data_done(struct wg_peer *peer,
  300. struct sk_buff *skb,
  301. struct endpoint *endpoint)
  302. {
  303. struct net_device *dev = peer->device->dev;
  304. unsigned int len, len_before_trim;
  305. struct wg_peer *routed_peer;
  306. wg_socket_set_peer_endpoint(peer, endpoint);
  307. if (unlikely(wg_noise_received_with_keypair(&peer->keypairs,
  308. PACKET_CB(skb)->keypair))) {
  309. wg_timers_handshake_complete(peer);
  310. wg_packet_send_staged_packets(peer);
  311. }
  312. keep_key_fresh(peer);
  313. wg_timers_any_authenticated_packet_received(peer);
  314. wg_timers_any_authenticated_packet_traversal(peer);
  315. /* A packet with length 0 is a keepalive packet */
  316. if (unlikely(!skb->len)) {
  317. update_rx_stats(peer, message_data_len(0));
  318. net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n",
  319. dev->name, peer->internal_id,
  320. &peer->endpoint.addr);
  321. goto packet_processed;
  322. }
  323. wg_timers_data_received(peer);
  324. if (unlikely(skb_network_header(skb) < skb->head))
  325. goto dishonest_packet_size;
  326. if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) &&
  327. (ip_hdr(skb)->version == 4 ||
  328. (ip_hdr(skb)->version == 6 &&
  329. pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))))))
  330. goto dishonest_packet_type;
  331. skb->dev = dev;
  332. /* We've already verified the Poly1305 auth tag, which means this packet
  333. * was not modified in transit. We can therefore tell the networking
  334. * stack that all checksums of every layer of encapsulation have already
  335. * been checked "by the hardware" and therefore is unnecessary to check
  336. * again in software.
  337. */
  338. skb->ip_summed = CHECKSUM_UNNECESSARY;
  339. skb->csum_level = ~0; /* All levels */
  340. skb->protocol = ip_tunnel_parse_protocol(skb);
  341. if (skb->protocol == htons(ETH_P_IP)) {
  342. len = ntohs(ip_hdr(skb)->tot_len);
  343. if (unlikely(len < sizeof(struct iphdr)))
  344. goto dishonest_packet_size;
  345. INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
  346. } else if (skb->protocol == htons(ETH_P_IPV6)) {
  347. len = ntohs(ipv6_hdr(skb)->payload_len) +
  348. sizeof(struct ipv6hdr);
  349. INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
  350. } else {
  351. goto dishonest_packet_type;
  352. }
  353. if (unlikely(len > skb->len))
  354. goto dishonest_packet_size;
  355. len_before_trim = skb->len;
  356. if (unlikely(pskb_trim(skb, len)))
  357. goto packet_processed;
  358. routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips,
  359. skb);
  360. wg_peer_put(routed_peer); /* We don't need the extra reference. */
  361. if (unlikely(routed_peer != peer))
  362. goto dishonest_packet_peer;
  363. napi_gro_receive(&peer->napi, skb);
  364. update_rx_stats(peer, message_data_len(len_before_trim));
  365. return;
  366. dishonest_packet_peer:
  367. net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
  368. dev->name, skb, peer->internal_id,
  369. &peer->endpoint.addr);
  370. ++dev->stats.rx_errors;
  371. ++dev->stats.rx_frame_errors;
  372. goto packet_processed;
  373. dishonest_packet_type:
  374. net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
  375. dev->name, peer->internal_id, &peer->endpoint.addr);
  376. ++dev->stats.rx_errors;
  377. ++dev->stats.rx_frame_errors;
  378. goto packet_processed;
  379. dishonest_packet_size:
  380. net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
  381. dev->name, peer->internal_id, &peer->endpoint.addr);
  382. ++dev->stats.rx_errors;
  383. ++dev->stats.rx_length_errors;
  384. goto packet_processed;
  385. packet_processed:
  386. dev_kfree_skb(skb);
  387. }
  388. int wg_packet_rx_poll(struct napi_struct *napi, int budget)
  389. {
  390. struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
  391. struct noise_keypair *keypair;
  392. struct endpoint endpoint;
  393. enum packet_state state;
  394. struct sk_buff *skb;
  395. int work_done = 0;
  396. bool free;
  397. if (unlikely(budget <= 0))
  398. return 0;
  399. while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL &&
  400. (state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
  401. PACKET_STATE_UNCRYPTED) {
  402. wg_prev_queue_drop_peeked(&peer->rx_queue);
  403. keypair = PACKET_CB(skb)->keypair;
  404. free = true;
  405. if (unlikely(state != PACKET_STATE_CRYPTED))
  406. goto next;
  407. if (unlikely(!counter_validate(&keypair->receiving_counter,
  408. PACKET_CB(skb)->nonce))) {
  409. net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
  410. peer->device->dev->name,
  411. PACKET_CB(skb)->nonce,
  412. keypair->receiving_counter.counter);
  413. goto next;
  414. }
  415. if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
  416. goto next;
  417. wg_reset_packet(skb, false);
  418. wg_packet_consume_data_done(peer, skb, &endpoint);
  419. free = false;
  420. next:
  421. wg_noise_keypair_put(keypair, false);
  422. wg_peer_put(peer);
  423. if (unlikely(free))
  424. dev_kfree_skb(skb);
  425. if (++work_done >= budget)
  426. break;
  427. }
  428. if (work_done < budget)
  429. napi_complete_done(napi, work_done);
  430. return work_done;
  431. }
  432. void wg_packet_decrypt_worker(struct work_struct *work)
  433. {
  434. struct crypt_queue *queue = container_of(work, struct multicore_worker,
  435. work)->ptr;
  436. struct sk_buff *skb;
  437. while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
  438. enum packet_state state =
  439. likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
  440. PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
  441. wg_queue_enqueue_per_peer_rx(skb, state);
  442. if (need_resched())
  443. cond_resched();
  444. }
  445. }
  446. static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
  447. {
  448. __le32 idx = ((struct message_data *)skb->data)->key_idx;
  449. struct wg_peer *peer = NULL;
  450. int ret;
  451. rcu_read_lock_bh();
  452. PACKET_CB(skb)->keypair =
  453. (struct noise_keypair *)wg_index_hashtable_lookup(
  454. wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
  455. &peer);
  456. if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair)))
  457. goto err_keypair;
  458. if (unlikely(READ_ONCE(peer->is_dead)))
  459. goto err;
  460. ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
  461. wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
  462. if (unlikely(ret == -EPIPE))
  463. wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
  464. if (likely(!ret || ret == -EPIPE)) {
  465. rcu_read_unlock_bh();
  466. return;
  467. }
  468. err:
  469. wg_noise_keypair_put(PACKET_CB(skb)->keypair, false);
  470. err_keypair:
  471. rcu_read_unlock_bh();
  472. wg_peer_put(peer);
  473. dev_kfree_skb(skb);
  474. }
  475. void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
  476. {
  477. if (unlikely(prepare_skb_header(skb, wg) < 0))
  478. goto err;
  479. switch (SKB_TYPE_LE32(skb)) {
  480. case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
  481. case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
  482. case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
  483. int cpu, ret = -EBUSY;
  484. if (unlikely(!rng_is_initialized()))
  485. goto drop;
  486. if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
  487. if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
  488. ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
  489. spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
  490. }
  491. } else
  492. ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
  493. if (ret) {
  494. drop:
  495. net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
  496. wg->dev->name, skb);
  497. goto err;
  498. }
  499. atomic_inc(&wg->handshake_queue_len);
  500. cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
  501. /* Queues up a call to packet_process_queued_handshake_packets(skb): */
  502. queue_work_on(cpu, wg->handshake_receive_wq,
  503. &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
  504. break;
  505. }
  506. case cpu_to_le32(MESSAGE_DATA):
  507. PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
  508. wg_packet_consume_data(wg, skb);
  509. break;
  510. default:
  511. WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n");
  512. goto err;
  513. }
  514. return;
  515. err:
  516. dev_kfree_skb(skb);
  517. }