noise.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  4. */
  5. #include "noise.h"
  6. #include "device.h"
  7. #include "peer.h"
  8. #include "messages.h"
  9. #include "queueing.h"
  10. #include "peerlookup.h"
  11. #include <linux/rcupdate.h>
  12. #include <linux/slab.h>
  13. #include <linux/bitmap.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/highmem.h>
  16. #include <crypto/algapi.h>
  17. /* This implements Noise_IKpsk2:
  18. *
  19. * <- s
  20. * ******
  21. * -> e, es, s, ss, {t}
  22. * <- e, ee, se, psk, {}
  23. */
  24. static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
  25. static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com";
  26. static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init;
  27. static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init;
  28. static atomic64_t keypair_counter = ATOMIC64_INIT(0);
  29. void __init wg_noise_init(void)
  30. {
  31. struct blake2s_state blake;
  32. blake2s(handshake_init_chaining_key, handshake_name, NULL,
  33. NOISE_HASH_LEN, sizeof(handshake_name), 0);
  34. blake2s_init(&blake, NOISE_HASH_LEN);
  35. blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN);
  36. blake2s_update(&blake, identifier_name, sizeof(identifier_name));
  37. blake2s_final(&blake, handshake_init_hash);
  38. }
  39. /* Must hold peer->handshake.static_identity->lock */
  40. void wg_noise_precompute_static_static(struct wg_peer *peer)
  41. {
  42. down_write(&peer->handshake.lock);
  43. if (!peer->handshake.static_identity->has_identity ||
  44. !curve25519(peer->handshake.precomputed_static_static,
  45. peer->handshake.static_identity->static_private,
  46. peer->handshake.remote_static))
  47. memset(peer->handshake.precomputed_static_static, 0,
  48. NOISE_PUBLIC_KEY_LEN);
  49. up_write(&peer->handshake.lock);
  50. }
  51. void wg_noise_handshake_init(struct noise_handshake *handshake,
  52. struct noise_static_identity *static_identity,
  53. const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
  54. const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
  55. struct wg_peer *peer)
  56. {
  57. memset(handshake, 0, sizeof(*handshake));
  58. init_rwsem(&handshake->lock);
  59. handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE;
  60. handshake->entry.peer = peer;
  61. memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN);
  62. if (peer_preshared_key)
  63. memcpy(handshake->preshared_key, peer_preshared_key,
  64. NOISE_SYMMETRIC_KEY_LEN);
  65. handshake->static_identity = static_identity;
  66. handshake->state = HANDSHAKE_ZEROED;
  67. wg_noise_precompute_static_static(peer);
  68. }
  69. static void handshake_zero(struct noise_handshake *handshake)
  70. {
  71. memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN);
  72. memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN);
  73. memset(&handshake->hash, 0, NOISE_HASH_LEN);
  74. memset(&handshake->chaining_key, 0, NOISE_HASH_LEN);
  75. handshake->remote_index = 0;
  76. handshake->state = HANDSHAKE_ZEROED;
  77. }
  78. void wg_noise_handshake_clear(struct noise_handshake *handshake)
  79. {
  80. down_write(&handshake->lock);
  81. wg_index_hashtable_remove(
  82. handshake->entry.peer->device->index_hashtable,
  83. &handshake->entry);
  84. handshake_zero(handshake);
  85. up_write(&handshake->lock);
  86. }
  87. static struct noise_keypair *keypair_create(struct wg_peer *peer)
  88. {
  89. struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL);
  90. if (unlikely(!keypair))
  91. return NULL;
  92. spin_lock_init(&keypair->receiving_counter.lock);
  93. keypair->internal_id = atomic64_inc_return(&keypair_counter);
  94. keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
  95. keypair->entry.peer = peer;
  96. kref_init(&keypair->refcount);
  97. return keypair;
  98. }
  99. static void keypair_free_rcu(struct rcu_head *rcu)
  100. {
  101. kfree_sensitive(container_of(rcu, struct noise_keypair, rcu));
  102. }
  103. static void keypair_free_kref(struct kref *kref)
  104. {
  105. struct noise_keypair *keypair =
  106. container_of(kref, struct noise_keypair, refcount);
  107. net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n",
  108. keypair->entry.peer->device->dev->name,
  109. keypair->internal_id,
  110. keypair->entry.peer->internal_id);
  111. wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable,
  112. &keypair->entry);
  113. call_rcu(&keypair->rcu, keypair_free_rcu);
  114. }
  115. void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now)
  116. {
  117. if (unlikely(!keypair))
  118. return;
  119. if (unlikely(unreference_now))
  120. wg_index_hashtable_remove(
  121. keypair->entry.peer->device->index_hashtable,
  122. &keypair->entry);
  123. kref_put(&keypair->refcount, keypair_free_kref);
  124. }
  125. struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair)
  126. {
  127. RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
  128. "Taking noise keypair reference without holding the RCU BH read lock");
  129. if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount)))
  130. return NULL;
  131. return keypair;
  132. }
  133. void wg_noise_keypairs_clear(struct noise_keypairs *keypairs)
  134. {
  135. struct noise_keypair *old;
  136. spin_lock_bh(&keypairs->keypair_update_lock);
  137. /* We zero the next_keypair before zeroing the others, so that
  138. * wg_noise_received_with_keypair returns early before subsequent ones
  139. * are zeroed.
  140. */
  141. old = rcu_dereference_protected(keypairs->next_keypair,
  142. lockdep_is_held(&keypairs->keypair_update_lock));
  143. RCU_INIT_POINTER(keypairs->next_keypair, NULL);
  144. wg_noise_keypair_put(old, true);
  145. old = rcu_dereference_protected(keypairs->previous_keypair,
  146. lockdep_is_held(&keypairs->keypair_update_lock));
  147. RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
  148. wg_noise_keypair_put(old, true);
  149. old = rcu_dereference_protected(keypairs->current_keypair,
  150. lockdep_is_held(&keypairs->keypair_update_lock));
  151. RCU_INIT_POINTER(keypairs->current_keypair, NULL);
  152. wg_noise_keypair_put(old, true);
  153. spin_unlock_bh(&keypairs->keypair_update_lock);
  154. }
  155. void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer)
  156. {
  157. struct noise_keypair *keypair;
  158. wg_noise_handshake_clear(&peer->handshake);
  159. wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
  160. spin_lock_bh(&peer->keypairs.keypair_update_lock);
  161. keypair = rcu_dereference_protected(peer->keypairs.next_keypair,
  162. lockdep_is_held(&peer->keypairs.keypair_update_lock));
  163. if (keypair)
  164. keypair->sending.is_valid = false;
  165. keypair = rcu_dereference_protected(peer->keypairs.current_keypair,
  166. lockdep_is_held(&peer->keypairs.keypair_update_lock));
  167. if (keypair)
  168. keypair->sending.is_valid = false;
  169. spin_unlock_bh(&peer->keypairs.keypair_update_lock);
  170. }
  171. static void add_new_keypair(struct noise_keypairs *keypairs,
  172. struct noise_keypair *new_keypair)
  173. {
  174. struct noise_keypair *previous_keypair, *next_keypair, *current_keypair;
  175. spin_lock_bh(&keypairs->keypair_update_lock);
  176. previous_keypair = rcu_dereference_protected(keypairs->previous_keypair,
  177. lockdep_is_held(&keypairs->keypair_update_lock));
  178. next_keypair = rcu_dereference_protected(keypairs->next_keypair,
  179. lockdep_is_held(&keypairs->keypair_update_lock));
  180. current_keypair = rcu_dereference_protected(keypairs->current_keypair,
  181. lockdep_is_held(&keypairs->keypair_update_lock));
  182. if (new_keypair->i_am_the_initiator) {
  183. /* If we're the initiator, it means we've sent a handshake, and
  184. * received a confirmation response, which means this new
  185. * keypair can now be used.
  186. */
  187. if (next_keypair) {
  188. /* If there already was a next keypair pending, we
  189. * demote it to be the previous keypair, and free the
  190. * existing current. Note that this means KCI can result
  191. * in this transition. It would perhaps be more sound to
  192. * always just get rid of the unused next keypair
  193. * instead of putting it in the previous slot, but this
  194. * might be a bit less robust. Something to think about
  195. * for the future.
  196. */
  197. RCU_INIT_POINTER(keypairs->next_keypair, NULL);
  198. rcu_assign_pointer(keypairs->previous_keypair,
  199. next_keypair);
  200. wg_noise_keypair_put(current_keypair, true);
  201. } else /* If there wasn't an existing next keypair, we replace
  202. * the previous with the current one.
  203. */
  204. rcu_assign_pointer(keypairs->previous_keypair,
  205. current_keypair);
  206. /* At this point we can get rid of the old previous keypair, and
  207. * set up the new keypair.
  208. */
  209. wg_noise_keypair_put(previous_keypair, true);
  210. rcu_assign_pointer(keypairs->current_keypair, new_keypair);
  211. } else {
  212. /* If we're the responder, it means we can't use the new keypair
  213. * until we receive confirmation via the first data packet, so
  214. * we get rid of the existing previous one, the possibly
  215. * existing next one, and slide in the new next one.
  216. */
  217. rcu_assign_pointer(keypairs->next_keypair, new_keypair);
  218. wg_noise_keypair_put(next_keypair, true);
  219. RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
  220. wg_noise_keypair_put(previous_keypair, true);
  221. }
  222. spin_unlock_bh(&keypairs->keypair_update_lock);
  223. }
  224. bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
  225. struct noise_keypair *received_keypair)
  226. {
  227. struct noise_keypair *old_keypair;
  228. bool key_is_new;
  229. /* We first check without taking the spinlock. */
  230. key_is_new = received_keypair ==
  231. rcu_access_pointer(keypairs->next_keypair);
  232. if (likely(!key_is_new))
  233. return false;
  234. spin_lock_bh(&keypairs->keypair_update_lock);
  235. /* After locking, we double check that things didn't change from
  236. * beneath us.
  237. */
  238. if (unlikely(received_keypair !=
  239. rcu_dereference_protected(keypairs->next_keypair,
  240. lockdep_is_held(&keypairs->keypair_update_lock)))) {
  241. spin_unlock_bh(&keypairs->keypair_update_lock);
  242. return false;
  243. }
  244. /* When we've finally received the confirmation, we slide the next
  245. * into the current, the current into the previous, and get rid of
  246. * the old previous.
  247. */
  248. old_keypair = rcu_dereference_protected(keypairs->previous_keypair,
  249. lockdep_is_held(&keypairs->keypair_update_lock));
  250. rcu_assign_pointer(keypairs->previous_keypair,
  251. rcu_dereference_protected(keypairs->current_keypair,
  252. lockdep_is_held(&keypairs->keypair_update_lock)));
  253. wg_noise_keypair_put(old_keypair, true);
  254. rcu_assign_pointer(keypairs->current_keypair, received_keypair);
  255. RCU_INIT_POINTER(keypairs->next_keypair, NULL);
  256. spin_unlock_bh(&keypairs->keypair_update_lock);
  257. return true;
  258. }
  259. /* Must hold static_identity->lock */
  260. void wg_noise_set_static_identity_private_key(
  261. struct noise_static_identity *static_identity,
  262. const u8 private_key[NOISE_PUBLIC_KEY_LEN])
  263. {
  264. memcpy(static_identity->static_private, private_key,
  265. NOISE_PUBLIC_KEY_LEN);
  266. curve25519_clamp_secret(static_identity->static_private);
  267. static_identity->has_identity = curve25519_generate_public(
  268. static_identity->static_public, private_key);
  269. }
  270. /* This is Hugo Krawczyk's HKDF:
  271. * - https://eprint.iacr.org/2010/264.pdf
  272. * - https://tools.ietf.org/html/rfc5869
  273. */
  274. static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
  275. size_t first_len, size_t second_len, size_t third_len,
  276. size_t data_len, const u8 chaining_key[NOISE_HASH_LEN])
  277. {
  278. u8 output[BLAKE2S_HASH_SIZE + 1];
  279. u8 secret[BLAKE2S_HASH_SIZE];
  280. WARN_ON(IS_ENABLED(DEBUG) &&
  281. (first_len > BLAKE2S_HASH_SIZE ||
  282. second_len > BLAKE2S_HASH_SIZE ||
  283. third_len > BLAKE2S_HASH_SIZE ||
  284. ((second_len || second_dst || third_len || third_dst) &&
  285. (!first_len || !first_dst)) ||
  286. ((third_len || third_dst) && (!second_len || !second_dst))));
  287. /* Extract entropy from data into secret */
  288. blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
  289. if (!first_dst || !first_len)
  290. goto out;
  291. /* Expand first key: key = secret, data = 0x1 */
  292. output[0] = 1;
  293. blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
  294. memcpy(first_dst, output, first_len);
  295. if (!second_dst || !second_len)
  296. goto out;
  297. /* Expand second key: key = secret, data = first-key || 0x2 */
  298. output[BLAKE2S_HASH_SIZE] = 2;
  299. blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
  300. BLAKE2S_HASH_SIZE);
  301. memcpy(second_dst, output, second_len);
  302. if (!third_dst || !third_len)
  303. goto out;
  304. /* Expand third key: key = secret, data = second-key || 0x3 */
  305. output[BLAKE2S_HASH_SIZE] = 3;
  306. blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
  307. BLAKE2S_HASH_SIZE);
  308. memcpy(third_dst, output, third_len);
  309. out:
  310. /* Clear sensitive data from stack */
  311. memzero_explicit(secret, BLAKE2S_HASH_SIZE);
  312. memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
  313. }
  314. static void derive_keys(struct noise_symmetric_key *first_dst,
  315. struct noise_symmetric_key *second_dst,
  316. const u8 chaining_key[NOISE_HASH_LEN])
  317. {
  318. u64 birthdate = ktime_get_coarse_boottime_ns();
  319. kdf(first_dst->key, second_dst->key, NULL, NULL,
  320. NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
  321. chaining_key);
  322. first_dst->birthdate = second_dst->birthdate = birthdate;
  323. first_dst->is_valid = second_dst->is_valid = true;
  324. }
  325. static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
  326. u8 key[NOISE_SYMMETRIC_KEY_LEN],
  327. const u8 private[NOISE_PUBLIC_KEY_LEN],
  328. const u8 public[NOISE_PUBLIC_KEY_LEN])
  329. {
  330. u8 dh_calculation[NOISE_PUBLIC_KEY_LEN];
  331. if (unlikely(!curve25519(dh_calculation, private, public)))
  332. return false;
  333. kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN,
  334. NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key);
  335. memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN);
  336. return true;
  337. }
  338. static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN],
  339. u8 key[NOISE_SYMMETRIC_KEY_LEN],
  340. const u8 precomputed[NOISE_PUBLIC_KEY_LEN])
  341. {
  342. static u8 zero_point[NOISE_PUBLIC_KEY_LEN];
  343. if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN)))
  344. return false;
  345. kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN,
  346. NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
  347. chaining_key);
  348. return true;
  349. }
  350. static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len)
  351. {
  352. struct blake2s_state blake;
  353. blake2s_init(&blake, NOISE_HASH_LEN);
  354. blake2s_update(&blake, hash, NOISE_HASH_LEN);
  355. blake2s_update(&blake, src, src_len);
  356. blake2s_final(&blake, hash);
  357. }
  358. static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN],
  359. u8 key[NOISE_SYMMETRIC_KEY_LEN],
  360. const u8 psk[NOISE_SYMMETRIC_KEY_LEN])
  361. {
  362. u8 temp_hash[NOISE_HASH_LEN];
  363. kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN,
  364. NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key);
  365. mix_hash(hash, temp_hash, NOISE_HASH_LEN);
  366. memzero_explicit(temp_hash, NOISE_HASH_LEN);
  367. }
  368. static void handshake_init(u8 chaining_key[NOISE_HASH_LEN],
  369. u8 hash[NOISE_HASH_LEN],
  370. const u8 remote_static[NOISE_PUBLIC_KEY_LEN])
  371. {
  372. memcpy(hash, handshake_init_hash, NOISE_HASH_LEN);
  373. memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN);
  374. mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN);
  375. }
  376. static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext,
  377. size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
  378. u8 hash[NOISE_HASH_LEN])
  379. {
  380. chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash,
  381. NOISE_HASH_LEN,
  382. 0 /* Always zero for Noise_IK */, key);
  383. mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len));
  384. }
  385. static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext,
  386. size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
  387. u8 hash[NOISE_HASH_LEN])
  388. {
  389. if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len,
  390. hash, NOISE_HASH_LEN,
  391. 0 /* Always zero for Noise_IK */, key))
  392. return false;
  393. mix_hash(hash, src_ciphertext, src_len);
  394. return true;
  395. }
  396. static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN],
  397. const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN],
  398. u8 chaining_key[NOISE_HASH_LEN],
  399. u8 hash[NOISE_HASH_LEN])
  400. {
  401. if (ephemeral_dst != ephemeral_src)
  402. memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
  403. mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
  404. kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0,
  405. NOISE_PUBLIC_KEY_LEN, chaining_key);
  406. }
  407. static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN])
  408. {
  409. struct timespec64 now;
  410. ktime_get_real_ts64(&now);
  411. /* In order to prevent some sort of infoleak from precise timers, we
  412. * round down the nanoseconds part to the closest rounded-down power of
  413. * two to the maximum initiations per second allowed anyway by the
  414. * implementation.
  415. */
  416. now.tv_nsec = ALIGN_DOWN(now.tv_nsec,
  417. rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND));
  418. /* https://cr.yp.to/libtai/tai64.html */
  419. *(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec);
  420. *(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec);
  421. }
  422. bool
  423. wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
  424. struct noise_handshake *handshake)
  425. {
  426. u8 timestamp[NOISE_TIMESTAMP_LEN];
  427. u8 key[NOISE_SYMMETRIC_KEY_LEN];
  428. bool ret = false;
  429. /* We need to wait for crng _before_ taking any locks, since
  430. * curve25519_generate_secret uses get_random_bytes_wait.
  431. */
  432. wait_for_random_bytes();
  433. down_read(&handshake->static_identity->lock);
  434. down_write(&handshake->lock);
  435. if (unlikely(!handshake->static_identity->has_identity))
  436. goto out;
  437. dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION);
  438. handshake_init(handshake->chaining_key, handshake->hash,
  439. handshake->remote_static);
  440. /* e */
  441. curve25519_generate_secret(handshake->ephemeral_private);
  442. if (!curve25519_generate_public(dst->unencrypted_ephemeral,
  443. handshake->ephemeral_private))
  444. goto out;
  445. message_ephemeral(dst->unencrypted_ephemeral,
  446. dst->unencrypted_ephemeral, handshake->chaining_key,
  447. handshake->hash);
  448. /* es */
  449. if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private,
  450. handshake->remote_static))
  451. goto out;
  452. /* s */
  453. message_encrypt(dst->encrypted_static,
  454. handshake->static_identity->static_public,
  455. NOISE_PUBLIC_KEY_LEN, key, handshake->hash);
  456. /* ss */
  457. if (!mix_precomputed_dh(handshake->chaining_key, key,
  458. handshake->precomputed_static_static))
  459. goto out;
  460. /* {t} */
  461. tai64n_now(timestamp);
  462. message_encrypt(dst->encrypted_timestamp, timestamp,
  463. NOISE_TIMESTAMP_LEN, key, handshake->hash);
  464. dst->sender_index = wg_index_hashtable_insert(
  465. handshake->entry.peer->device->index_hashtable,
  466. &handshake->entry);
  467. handshake->state = HANDSHAKE_CREATED_INITIATION;
  468. ret = true;
  469. out:
  470. up_write(&handshake->lock);
  471. up_read(&handshake->static_identity->lock);
  472. memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
  473. return ret;
  474. }
  475. struct wg_peer *
  476. wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
  477. struct wg_device *wg)
  478. {
  479. struct wg_peer *peer = NULL, *ret_peer = NULL;
  480. struct noise_handshake *handshake;
  481. bool replay_attack, flood_attack;
  482. u8 key[NOISE_SYMMETRIC_KEY_LEN];
  483. u8 chaining_key[NOISE_HASH_LEN];
  484. u8 hash[NOISE_HASH_LEN];
  485. u8 s[NOISE_PUBLIC_KEY_LEN];
  486. u8 e[NOISE_PUBLIC_KEY_LEN];
  487. u8 t[NOISE_TIMESTAMP_LEN];
  488. u64 initiation_consumption;
  489. down_read(&wg->static_identity.lock);
  490. if (unlikely(!wg->static_identity.has_identity))
  491. goto out;
  492. handshake_init(chaining_key, hash, wg->static_identity.static_public);
  493. /* e */
  494. message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
  495. /* es */
  496. if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e))
  497. goto out;
  498. /* s */
  499. if (!message_decrypt(s, src->encrypted_static,
  500. sizeof(src->encrypted_static), key, hash))
  501. goto out;
  502. /* Lookup which peer we're actually talking to */
  503. peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s);
  504. if (!peer)
  505. goto out;
  506. handshake = &peer->handshake;
  507. /* ss */
  508. if (!mix_precomputed_dh(chaining_key, key,
  509. handshake->precomputed_static_static))
  510. goto out;
  511. /* {t} */
  512. if (!message_decrypt(t, src->encrypted_timestamp,
  513. sizeof(src->encrypted_timestamp), key, hash))
  514. goto out;
  515. down_read(&handshake->lock);
  516. replay_attack = memcmp(t, handshake->latest_timestamp,
  517. NOISE_TIMESTAMP_LEN) <= 0;
  518. flood_attack = (s64)handshake->last_initiation_consumption +
  519. NSEC_PER_SEC / INITIATIONS_PER_SECOND >
  520. (s64)ktime_get_coarse_boottime_ns();
  521. up_read(&handshake->lock);
  522. if (replay_attack || flood_attack)
  523. goto out;
  524. /* Success! Copy everything to peer */
  525. down_write(&handshake->lock);
  526. memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
  527. if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0)
  528. memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN);
  529. memcpy(handshake->hash, hash, NOISE_HASH_LEN);
  530. memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
  531. handshake->remote_index = src->sender_index;
  532. initiation_consumption = ktime_get_coarse_boottime_ns();
  533. if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0)
  534. handshake->last_initiation_consumption = initiation_consumption;
  535. handshake->state = HANDSHAKE_CONSUMED_INITIATION;
  536. up_write(&handshake->lock);
  537. ret_peer = peer;
  538. out:
  539. memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
  540. memzero_explicit(hash, NOISE_HASH_LEN);
  541. memzero_explicit(chaining_key, NOISE_HASH_LEN);
  542. up_read(&wg->static_identity.lock);
  543. if (!ret_peer)
  544. wg_peer_put(peer);
  545. return ret_peer;
  546. }
  547. bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
  548. struct noise_handshake *handshake)
  549. {
  550. u8 key[NOISE_SYMMETRIC_KEY_LEN];
  551. bool ret = false;
  552. /* We need to wait for crng _before_ taking any locks, since
  553. * curve25519_generate_secret uses get_random_bytes_wait.
  554. */
  555. wait_for_random_bytes();
  556. down_read(&handshake->static_identity->lock);
  557. down_write(&handshake->lock);
  558. if (handshake->state != HANDSHAKE_CONSUMED_INITIATION)
  559. goto out;
  560. dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE);
  561. dst->receiver_index = handshake->remote_index;
  562. /* e */
  563. curve25519_generate_secret(handshake->ephemeral_private);
  564. if (!curve25519_generate_public(dst->unencrypted_ephemeral,
  565. handshake->ephemeral_private))
  566. goto out;
  567. message_ephemeral(dst->unencrypted_ephemeral,
  568. dst->unencrypted_ephemeral, handshake->chaining_key,
  569. handshake->hash);
  570. /* ee */
  571. if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
  572. handshake->remote_ephemeral))
  573. goto out;
  574. /* se */
  575. if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
  576. handshake->remote_static))
  577. goto out;
  578. /* psk */
  579. mix_psk(handshake->chaining_key, handshake->hash, key,
  580. handshake->preshared_key);
  581. /* {} */
  582. message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash);
  583. dst->sender_index = wg_index_hashtable_insert(
  584. handshake->entry.peer->device->index_hashtable,
  585. &handshake->entry);
  586. handshake->state = HANDSHAKE_CREATED_RESPONSE;
  587. ret = true;
  588. out:
  589. up_write(&handshake->lock);
  590. up_read(&handshake->static_identity->lock);
  591. memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
  592. return ret;
  593. }
  594. struct wg_peer *
  595. wg_noise_handshake_consume_response(struct message_handshake_response *src,
  596. struct wg_device *wg)
  597. {
  598. enum noise_handshake_state state = HANDSHAKE_ZEROED;
  599. struct wg_peer *peer = NULL, *ret_peer = NULL;
  600. struct noise_handshake *handshake;
  601. u8 key[NOISE_SYMMETRIC_KEY_LEN];
  602. u8 hash[NOISE_HASH_LEN];
  603. u8 chaining_key[NOISE_HASH_LEN];
  604. u8 e[NOISE_PUBLIC_KEY_LEN];
  605. u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
  606. u8 static_private[NOISE_PUBLIC_KEY_LEN];
  607. u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
  608. down_read(&wg->static_identity.lock);
  609. if (unlikely(!wg->static_identity.has_identity))
  610. goto out;
  611. handshake = (struct noise_handshake *)wg_index_hashtable_lookup(
  612. wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE,
  613. src->receiver_index, &peer);
  614. if (unlikely(!handshake))
  615. goto out;
  616. down_read(&handshake->lock);
  617. state = handshake->state;
  618. memcpy(hash, handshake->hash, NOISE_HASH_LEN);
  619. memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
  620. memcpy(ephemeral_private, handshake->ephemeral_private,
  621. NOISE_PUBLIC_KEY_LEN);
  622. memcpy(preshared_key, handshake->preshared_key,
  623. NOISE_SYMMETRIC_KEY_LEN);
  624. up_read(&handshake->lock);
  625. if (state != HANDSHAKE_CREATED_INITIATION)
  626. goto fail;
  627. /* e */
  628. message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
  629. /* ee */
  630. if (!mix_dh(chaining_key, NULL, ephemeral_private, e))
  631. goto fail;
  632. /* se */
  633. if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e))
  634. goto fail;
  635. /* psk */
  636. mix_psk(chaining_key, hash, key, preshared_key);
  637. /* {} */
  638. if (!message_decrypt(NULL, src->encrypted_nothing,
  639. sizeof(src->encrypted_nothing), key, hash))
  640. goto fail;
  641. /* Success! Copy everything to peer */
  642. down_write(&handshake->lock);
  643. /* It's important to check that the state is still the same, while we
  644. * have an exclusive lock.
  645. */
  646. if (handshake->state != state) {
  647. up_write(&handshake->lock);
  648. goto fail;
  649. }
  650. memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
  651. memcpy(handshake->hash, hash, NOISE_HASH_LEN);
  652. memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
  653. handshake->remote_index = src->sender_index;
  654. handshake->state = HANDSHAKE_CONSUMED_RESPONSE;
  655. up_write(&handshake->lock);
  656. ret_peer = peer;
  657. goto out;
  658. fail:
  659. wg_peer_put(peer);
  660. out:
  661. memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
  662. memzero_explicit(hash, NOISE_HASH_LEN);
  663. memzero_explicit(chaining_key, NOISE_HASH_LEN);
  664. memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
  665. memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
  666. memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN);
  667. up_read(&wg->static_identity.lock);
  668. return ret_peer;
  669. }
  670. bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
  671. struct noise_keypairs *keypairs)
  672. {
  673. struct noise_keypair *new_keypair;
  674. bool ret = false;
  675. down_write(&handshake->lock);
  676. if (handshake->state != HANDSHAKE_CREATED_RESPONSE &&
  677. handshake->state != HANDSHAKE_CONSUMED_RESPONSE)
  678. goto out;
  679. new_keypair = keypair_create(handshake->entry.peer);
  680. if (!new_keypair)
  681. goto out;
  682. new_keypair->i_am_the_initiator = handshake->state ==
  683. HANDSHAKE_CONSUMED_RESPONSE;
  684. new_keypair->remote_index = handshake->remote_index;
  685. if (new_keypair->i_am_the_initiator)
  686. derive_keys(&new_keypair->sending, &new_keypair->receiving,
  687. handshake->chaining_key);
  688. else
  689. derive_keys(&new_keypair->receiving, &new_keypair->sending,
  690. handshake->chaining_key);
  691. handshake_zero(handshake);
  692. rcu_read_lock_bh();
  693. if (likely(!READ_ONCE(container_of(handshake, struct wg_peer,
  694. handshake)->is_dead))) {
  695. add_new_keypair(keypairs, new_keypair);
  696. net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n",
  697. handshake->entry.peer->device->dev->name,
  698. new_keypair->internal_id,
  699. handshake->entry.peer->internal_id);
  700. ret = wg_index_hashtable_replace(
  701. handshake->entry.peer->device->index_hashtable,
  702. &handshake->entry, &new_keypair->entry);
  703. } else {
  704. kfree_sensitive(new_keypair);
  705. }
  706. rcu_read_unlock_bh();
  707. out:
  708. up_write(&handshake->lock);
  709. return ret;
  710. }