tls_main.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. /*
  2. * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/module.h>
  34. #include <net/tcp.h>
  35. #include <net/inet_common.h>
  36. #include <linux/highmem.h>
  37. #include <linux/netdevice.h>
  38. #include <linux/sched/signal.h>
  39. #include <linux/inetdevice.h>
  40. #include <linux/inet_diag.h>
  41. #include <net/snmp.h>
  42. #include <net/tls.h>
  43. #include <net/tls_toe.h>
  44. MODULE_AUTHOR("Mellanox Technologies");
  45. MODULE_DESCRIPTION("Transport Layer Security Support");
  46. MODULE_LICENSE("Dual BSD/GPL");
  47. MODULE_ALIAS_TCP_ULP("tls");
  48. enum {
  49. TLSV4,
  50. TLSV6,
  51. TLS_NUM_PROTS,
  52. };
  53. static const struct proto *saved_tcpv6_prot;
  54. static DEFINE_MUTEX(tcpv6_prot_mutex);
  55. static const struct proto *saved_tcpv4_prot;
  56. static DEFINE_MUTEX(tcpv4_prot_mutex);
  57. static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
  58. static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
  59. static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
  60. const struct proto *base);
  61. void update_sk_prot(struct sock *sk, struct tls_context *ctx)
  62. {
  63. int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
  64. WRITE_ONCE(sk->sk_prot,
  65. &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
  66. WRITE_ONCE(sk->sk_socket->ops,
  67. &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
  68. }
  69. int wait_on_pending_writer(struct sock *sk, long *timeo)
  70. {
  71. int rc = 0;
  72. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  73. add_wait_queue(sk_sleep(sk), &wait);
  74. while (1) {
  75. if (!*timeo) {
  76. rc = -EAGAIN;
  77. break;
  78. }
  79. if (signal_pending(current)) {
  80. rc = sock_intr_errno(*timeo);
  81. break;
  82. }
  83. if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
  84. break;
  85. }
  86. remove_wait_queue(sk_sleep(sk), &wait);
  87. return rc;
  88. }
  89. int tls_push_sg(struct sock *sk,
  90. struct tls_context *ctx,
  91. struct scatterlist *sg,
  92. u16 first_offset,
  93. int flags)
  94. {
  95. int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST;
  96. int ret = 0;
  97. struct page *p;
  98. size_t size;
  99. int offset = first_offset;
  100. size = sg->length - offset;
  101. offset += sg->offset;
  102. ctx->in_tcp_sendpages = true;
  103. while (1) {
  104. if (sg_is_last(sg))
  105. sendpage_flags = flags;
  106. /* is sending application-limited? */
  107. tcp_rate_check_app_limited(sk);
  108. p = sg_page(sg);
  109. retry:
  110. ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
  111. if (ret != size) {
  112. if (ret > 0) {
  113. offset += ret;
  114. size -= ret;
  115. goto retry;
  116. }
  117. offset -= sg->offset;
  118. ctx->partially_sent_offset = offset;
  119. ctx->partially_sent_record = (void *)sg;
  120. ctx->in_tcp_sendpages = false;
  121. return ret;
  122. }
  123. put_page(p);
  124. sk_mem_uncharge(sk, sg->length);
  125. sg = sg_next(sg);
  126. if (!sg)
  127. break;
  128. offset = sg->offset;
  129. size = sg->length;
  130. }
  131. ctx->in_tcp_sendpages = false;
  132. return 0;
  133. }
  134. static int tls_handle_open_record(struct sock *sk, int flags)
  135. {
  136. struct tls_context *ctx = tls_get_ctx(sk);
  137. if (tls_is_pending_open_record(ctx))
  138. return ctx->push_pending_record(sk, flags);
  139. return 0;
  140. }
  141. int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
  142. unsigned char *record_type)
  143. {
  144. struct cmsghdr *cmsg;
  145. int rc = -EINVAL;
  146. for_each_cmsghdr(cmsg, msg) {
  147. if (!CMSG_OK(msg, cmsg))
  148. return -EINVAL;
  149. if (cmsg->cmsg_level != SOL_TLS)
  150. continue;
  151. switch (cmsg->cmsg_type) {
  152. case TLS_SET_RECORD_TYPE:
  153. if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type)))
  154. return -EINVAL;
  155. if (msg->msg_flags & MSG_MORE)
  156. return -EINVAL;
  157. rc = tls_handle_open_record(sk, msg->msg_flags);
  158. if (rc)
  159. return rc;
  160. *record_type = *(unsigned char *)CMSG_DATA(cmsg);
  161. rc = 0;
  162. break;
  163. default:
  164. return -EINVAL;
  165. }
  166. }
  167. return rc;
  168. }
  169. int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
  170. int flags)
  171. {
  172. struct scatterlist *sg;
  173. u16 offset;
  174. sg = ctx->partially_sent_record;
  175. offset = ctx->partially_sent_offset;
  176. ctx->partially_sent_record = NULL;
  177. return tls_push_sg(sk, ctx, sg, offset, flags);
  178. }
  179. void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
  180. {
  181. struct scatterlist *sg;
  182. for (sg = ctx->partially_sent_record; sg; sg = sg_next(sg)) {
  183. put_page(sg_page(sg));
  184. sk_mem_uncharge(sk, sg->length);
  185. }
  186. ctx->partially_sent_record = NULL;
  187. }
  188. static void tls_write_space(struct sock *sk)
  189. {
  190. struct tls_context *ctx = tls_get_ctx(sk);
  191. /* If in_tcp_sendpages call lower protocol write space handler
  192. * to ensure we wake up any waiting operations there. For example
  193. * if do_tcp_sendpages where to call sk_wait_event.
  194. */
  195. if (ctx->in_tcp_sendpages) {
  196. ctx->sk_write_space(sk);
  197. return;
  198. }
  199. #ifdef CONFIG_TLS_DEVICE
  200. if (ctx->tx_conf == TLS_HW)
  201. tls_device_write_space(sk, ctx);
  202. else
  203. #endif
  204. tls_sw_write_space(sk, ctx);
  205. ctx->sk_write_space(sk);
  206. }
  207. /**
  208. * tls_ctx_free() - free TLS ULP context
  209. * @sk: socket to with @ctx is attached
  210. * @ctx: TLS context structure
  211. *
  212. * Free TLS context. If @sk is %NULL caller guarantees that the socket
  213. * to which @ctx was attached has no outstanding references.
  214. */
  215. void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
  216. {
  217. if (!ctx)
  218. return;
  219. memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
  220. memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
  221. mutex_destroy(&ctx->tx_lock);
  222. if (sk)
  223. kfree_rcu(ctx, rcu);
  224. else
  225. kfree(ctx);
  226. }
  227. static void tls_sk_proto_cleanup(struct sock *sk,
  228. struct tls_context *ctx, long timeo)
  229. {
  230. if (unlikely(sk->sk_write_pending) &&
  231. !wait_on_pending_writer(sk, &timeo))
  232. tls_handle_open_record(sk, 0);
  233. /* We need these for tls_sw_fallback handling of other packets */
  234. if (ctx->tx_conf == TLS_SW) {
  235. kfree(ctx->tx.rec_seq);
  236. kfree(ctx->tx.iv);
  237. tls_sw_release_resources_tx(sk);
  238. TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
  239. } else if (ctx->tx_conf == TLS_HW) {
  240. tls_device_free_resources_tx(sk);
  241. TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
  242. }
  243. if (ctx->rx_conf == TLS_SW) {
  244. tls_sw_release_resources_rx(sk);
  245. TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
  246. } else if (ctx->rx_conf == TLS_HW) {
  247. tls_device_offload_cleanup_rx(sk);
  248. TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
  249. }
  250. }
  251. static void tls_sk_proto_close(struct sock *sk, long timeout)
  252. {
  253. struct inet_connection_sock *icsk = inet_csk(sk);
  254. struct tls_context *ctx = tls_get_ctx(sk);
  255. long timeo = sock_sndtimeo(sk, 0);
  256. bool free_ctx;
  257. if (ctx->tx_conf == TLS_SW)
  258. tls_sw_cancel_work_tx(ctx);
  259. lock_sock(sk);
  260. free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
  261. if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
  262. tls_sk_proto_cleanup(sk, ctx, timeo);
  263. write_lock_bh(&sk->sk_callback_lock);
  264. if (free_ctx)
  265. rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
  266. WRITE_ONCE(sk->sk_prot, ctx->sk_proto);
  267. if (sk->sk_write_space == tls_write_space)
  268. sk->sk_write_space = ctx->sk_write_space;
  269. write_unlock_bh(&sk->sk_callback_lock);
  270. release_sock(sk);
  271. if (ctx->tx_conf == TLS_SW)
  272. tls_sw_free_ctx_tx(ctx);
  273. if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
  274. tls_sw_strparser_done(ctx);
  275. if (ctx->rx_conf == TLS_SW)
  276. tls_sw_free_ctx_rx(ctx);
  277. ctx->sk_proto->close(sk, timeout);
  278. if (free_ctx)
  279. tls_ctx_free(sk, ctx);
  280. }
  281. static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
  282. int __user *optlen, int tx)
  283. {
  284. int rc = 0;
  285. struct tls_context *ctx = tls_get_ctx(sk);
  286. struct tls_crypto_info *crypto_info;
  287. struct cipher_context *cctx;
  288. int len;
  289. if (get_user(len, optlen))
  290. return -EFAULT;
  291. if (!optval || (len < sizeof(*crypto_info))) {
  292. rc = -EINVAL;
  293. goto out;
  294. }
  295. if (!ctx) {
  296. rc = -EBUSY;
  297. goto out;
  298. }
  299. /* get user crypto info */
  300. if (tx) {
  301. crypto_info = &ctx->crypto_send.info;
  302. cctx = &ctx->tx;
  303. } else {
  304. crypto_info = &ctx->crypto_recv.info;
  305. cctx = &ctx->rx;
  306. }
  307. if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
  308. rc = -EBUSY;
  309. goto out;
  310. }
  311. if (len == sizeof(*crypto_info)) {
  312. if (copy_to_user(optval, crypto_info, sizeof(*crypto_info)))
  313. rc = -EFAULT;
  314. goto out;
  315. }
  316. switch (crypto_info->cipher_type) {
  317. case TLS_CIPHER_AES_GCM_128: {
  318. struct tls12_crypto_info_aes_gcm_128 *
  319. crypto_info_aes_gcm_128 =
  320. container_of(crypto_info,
  321. struct tls12_crypto_info_aes_gcm_128,
  322. info);
  323. if (len != sizeof(*crypto_info_aes_gcm_128)) {
  324. rc = -EINVAL;
  325. goto out;
  326. }
  327. lock_sock(sk);
  328. memcpy(crypto_info_aes_gcm_128->iv,
  329. cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
  330. TLS_CIPHER_AES_GCM_128_IV_SIZE);
  331. memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
  332. TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
  333. release_sock(sk);
  334. if (copy_to_user(optval,
  335. crypto_info_aes_gcm_128,
  336. sizeof(*crypto_info_aes_gcm_128)))
  337. rc = -EFAULT;
  338. break;
  339. }
  340. case TLS_CIPHER_AES_GCM_256: {
  341. struct tls12_crypto_info_aes_gcm_256 *
  342. crypto_info_aes_gcm_256 =
  343. container_of(crypto_info,
  344. struct tls12_crypto_info_aes_gcm_256,
  345. info);
  346. if (len != sizeof(*crypto_info_aes_gcm_256)) {
  347. rc = -EINVAL;
  348. goto out;
  349. }
  350. lock_sock(sk);
  351. memcpy(crypto_info_aes_gcm_256->iv,
  352. cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
  353. TLS_CIPHER_AES_GCM_256_IV_SIZE);
  354. memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
  355. TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
  356. release_sock(sk);
  357. if (copy_to_user(optval,
  358. crypto_info_aes_gcm_256,
  359. sizeof(*crypto_info_aes_gcm_256)))
  360. rc = -EFAULT;
  361. break;
  362. }
  363. default:
  364. rc = -EINVAL;
  365. }
  366. out:
  367. return rc;
  368. }
  369. static int do_tls_getsockopt(struct sock *sk, int optname,
  370. char __user *optval, int __user *optlen)
  371. {
  372. int rc = 0;
  373. switch (optname) {
  374. case TLS_TX:
  375. case TLS_RX:
  376. rc = do_tls_getsockopt_conf(sk, optval, optlen,
  377. optname == TLS_TX);
  378. break;
  379. default:
  380. rc = -ENOPROTOOPT;
  381. break;
  382. }
  383. return rc;
  384. }
  385. static int tls_getsockopt(struct sock *sk, int level, int optname,
  386. char __user *optval, int __user *optlen)
  387. {
  388. struct tls_context *ctx = tls_get_ctx(sk);
  389. if (level != SOL_TLS)
  390. return ctx->sk_proto->getsockopt(sk, level,
  391. optname, optval, optlen);
  392. return do_tls_getsockopt(sk, optname, optval, optlen);
  393. }
  394. static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
  395. unsigned int optlen, int tx)
  396. {
  397. struct tls_crypto_info *crypto_info;
  398. struct tls_crypto_info *alt_crypto_info;
  399. struct tls_context *ctx = tls_get_ctx(sk);
  400. size_t optsize;
  401. int rc = 0;
  402. int conf;
  403. if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info))) {
  404. rc = -EINVAL;
  405. goto out;
  406. }
  407. if (tx) {
  408. crypto_info = &ctx->crypto_send.info;
  409. alt_crypto_info = &ctx->crypto_recv.info;
  410. } else {
  411. crypto_info = &ctx->crypto_recv.info;
  412. alt_crypto_info = &ctx->crypto_send.info;
  413. }
  414. /* Currently we don't support set crypto info more than one time */
  415. if (TLS_CRYPTO_INFO_READY(crypto_info)) {
  416. rc = -EBUSY;
  417. goto out;
  418. }
  419. rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info));
  420. if (rc) {
  421. rc = -EFAULT;
  422. goto err_crypto_info;
  423. }
  424. /* check version */
  425. if (crypto_info->version != TLS_1_2_VERSION &&
  426. crypto_info->version != TLS_1_3_VERSION) {
  427. rc = -EINVAL;
  428. goto err_crypto_info;
  429. }
  430. /* Ensure that TLS version and ciphers are same in both directions */
  431. if (TLS_CRYPTO_INFO_READY(alt_crypto_info)) {
  432. if (alt_crypto_info->version != crypto_info->version ||
  433. alt_crypto_info->cipher_type != crypto_info->cipher_type) {
  434. rc = -EINVAL;
  435. goto err_crypto_info;
  436. }
  437. }
  438. switch (crypto_info->cipher_type) {
  439. case TLS_CIPHER_AES_GCM_128:
  440. optsize = sizeof(struct tls12_crypto_info_aes_gcm_128);
  441. break;
  442. case TLS_CIPHER_AES_GCM_256: {
  443. optsize = sizeof(struct tls12_crypto_info_aes_gcm_256);
  444. break;
  445. }
  446. case TLS_CIPHER_AES_CCM_128:
  447. optsize = sizeof(struct tls12_crypto_info_aes_ccm_128);
  448. break;
  449. default:
  450. rc = -EINVAL;
  451. goto err_crypto_info;
  452. }
  453. if (optlen != optsize) {
  454. rc = -EINVAL;
  455. goto err_crypto_info;
  456. }
  457. rc = copy_from_sockptr_offset(crypto_info + 1, optval,
  458. sizeof(*crypto_info),
  459. optlen - sizeof(*crypto_info));
  460. if (rc) {
  461. rc = -EFAULT;
  462. goto err_crypto_info;
  463. }
  464. if (tx) {
  465. rc = tls_set_device_offload(sk, ctx);
  466. conf = TLS_HW;
  467. if (!rc) {
  468. TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE);
  469. TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE);
  470. } else {
  471. rc = tls_set_sw_offload(sk, ctx, 1);
  472. if (rc)
  473. goto err_crypto_info;
  474. TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW);
  475. TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW);
  476. conf = TLS_SW;
  477. }
  478. } else {
  479. rc = tls_set_device_offload_rx(sk, ctx);
  480. conf = TLS_HW;
  481. if (!rc) {
  482. TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE);
  483. TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE);
  484. } else {
  485. rc = tls_set_sw_offload(sk, ctx, 0);
  486. if (rc)
  487. goto err_crypto_info;
  488. TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXSW);
  489. TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW);
  490. conf = TLS_SW;
  491. }
  492. tls_sw_strparser_arm(sk, ctx);
  493. }
  494. if (tx)
  495. ctx->tx_conf = conf;
  496. else
  497. ctx->rx_conf = conf;
  498. update_sk_prot(sk, ctx);
  499. if (tx) {
  500. ctx->sk_write_space = sk->sk_write_space;
  501. sk->sk_write_space = tls_write_space;
  502. }
  503. goto out;
  504. err_crypto_info:
  505. memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
  506. out:
  507. return rc;
  508. }
  509. static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
  510. unsigned int optlen)
  511. {
  512. int rc = 0;
  513. switch (optname) {
  514. case TLS_TX:
  515. case TLS_RX:
  516. lock_sock(sk);
  517. rc = do_tls_setsockopt_conf(sk, optval, optlen,
  518. optname == TLS_TX);
  519. release_sock(sk);
  520. break;
  521. default:
  522. rc = -ENOPROTOOPT;
  523. break;
  524. }
  525. return rc;
  526. }
  527. static int tls_setsockopt(struct sock *sk, int level, int optname,
  528. sockptr_t optval, unsigned int optlen)
  529. {
  530. struct tls_context *ctx = tls_get_ctx(sk);
  531. if (level != SOL_TLS)
  532. return ctx->sk_proto->setsockopt(sk, level, optname, optval,
  533. optlen);
  534. return do_tls_setsockopt(sk, optname, optval, optlen);
  535. }
  536. struct tls_context *tls_ctx_create(struct sock *sk)
  537. {
  538. struct inet_connection_sock *icsk = inet_csk(sk);
  539. struct tls_context *ctx;
  540. ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
  541. if (!ctx)
  542. return NULL;
  543. mutex_init(&ctx->tx_lock);
  544. rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
  545. ctx->sk_proto = READ_ONCE(sk->sk_prot);
  546. ctx->sk = sk;
  547. return ctx;
  548. }
  549. static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
  550. const struct proto_ops *base)
  551. {
  552. ops[TLS_BASE][TLS_BASE] = *base;
  553. ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
  554. ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
  555. ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
  556. ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
  557. ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
  558. ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
  559. #ifdef CONFIG_TLS_DEVICE
  560. ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
  561. ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
  562. ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
  563. ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
  564. ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
  565. ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
  566. ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
  567. ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
  568. #endif
  569. #ifdef CONFIG_TLS_TOE
  570. ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
  571. #endif
  572. }
  573. static void tls_build_proto(struct sock *sk)
  574. {
  575. int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
  576. struct proto *prot = READ_ONCE(sk->sk_prot);
  577. /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
  578. if (ip_ver == TLSV6 &&
  579. unlikely(prot != smp_load_acquire(&saved_tcpv6_prot))) {
  580. mutex_lock(&tcpv6_prot_mutex);
  581. if (likely(prot != saved_tcpv6_prot)) {
  582. build_protos(tls_prots[TLSV6], prot);
  583. build_proto_ops(tls_proto_ops[TLSV6],
  584. sk->sk_socket->ops);
  585. smp_store_release(&saved_tcpv6_prot, prot);
  586. }
  587. mutex_unlock(&tcpv6_prot_mutex);
  588. }
  589. if (ip_ver == TLSV4 &&
  590. unlikely(prot != smp_load_acquire(&saved_tcpv4_prot))) {
  591. mutex_lock(&tcpv4_prot_mutex);
  592. if (likely(prot != saved_tcpv4_prot)) {
  593. build_protos(tls_prots[TLSV4], prot);
  594. build_proto_ops(tls_proto_ops[TLSV4],
  595. sk->sk_socket->ops);
  596. smp_store_release(&saved_tcpv4_prot, prot);
  597. }
  598. mutex_unlock(&tcpv4_prot_mutex);
  599. }
  600. }
  601. static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
  602. const struct proto *base)
  603. {
  604. prot[TLS_BASE][TLS_BASE] = *base;
  605. prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt;
  606. prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt;
  607. prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close;
  608. prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
  609. prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg;
  610. prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage;
  611. prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE];
  612. prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg;
  613. prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read;
  614. prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
  615. prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE];
  616. prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg;
  617. prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read;
  618. prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
  619. #ifdef CONFIG_TLS_DEVICE
  620. prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE];
  621. prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg;
  622. prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage;
  623. prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW];
  624. prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg;
  625. prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage;
  626. prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW];
  627. prot[TLS_SW][TLS_HW] = prot[TLS_SW][TLS_SW];
  628. prot[TLS_HW][TLS_HW] = prot[TLS_HW][TLS_SW];
  629. #endif
  630. #ifdef CONFIG_TLS_TOE
  631. prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
  632. prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_toe_hash;
  633. prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_toe_unhash;
  634. #endif
  635. }
  636. static int tls_init(struct sock *sk)
  637. {
  638. struct tls_context *ctx;
  639. int rc = 0;
  640. tls_build_proto(sk);
  641. #ifdef CONFIG_TLS_TOE
  642. if (tls_toe_bypass(sk))
  643. return 0;
  644. #endif
  645. /* The TLS ulp is currently supported only for TCP sockets
  646. * in ESTABLISHED state.
  647. * Supporting sockets in LISTEN state will require us
  648. * to modify the accept implementation to clone rather then
  649. * share the ulp context.
  650. */
  651. if (sk->sk_state != TCP_ESTABLISHED)
  652. return -ENOTCONN;
  653. /* allocate tls context */
  654. write_lock_bh(&sk->sk_callback_lock);
  655. ctx = tls_ctx_create(sk);
  656. if (!ctx) {
  657. rc = -ENOMEM;
  658. goto out;
  659. }
  660. ctx->tx_conf = TLS_BASE;
  661. ctx->rx_conf = TLS_BASE;
  662. update_sk_prot(sk, ctx);
  663. out:
  664. write_unlock_bh(&sk->sk_callback_lock);
  665. return rc;
  666. }
  667. static void tls_update(struct sock *sk, struct proto *p,
  668. void (*write_space)(struct sock *sk))
  669. {
  670. struct tls_context *ctx;
  671. ctx = tls_get_ctx(sk);
  672. if (likely(ctx)) {
  673. ctx->sk_write_space = write_space;
  674. ctx->sk_proto = p;
  675. } else {
  676. /* Pairs with lockless read in sk_clone_lock(). */
  677. WRITE_ONCE(sk->sk_prot, p);
  678. sk->sk_write_space = write_space;
  679. }
  680. }
  681. static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
  682. {
  683. u16 version, cipher_type;
  684. struct tls_context *ctx;
  685. struct nlattr *start;
  686. int err;
  687. start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS);
  688. if (!start)
  689. return -EMSGSIZE;
  690. rcu_read_lock();
  691. ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
  692. if (!ctx) {
  693. err = 0;
  694. goto nla_failure;
  695. }
  696. version = ctx->prot_info.version;
  697. if (version) {
  698. err = nla_put_u16(skb, TLS_INFO_VERSION, version);
  699. if (err)
  700. goto nla_failure;
  701. }
  702. cipher_type = ctx->prot_info.cipher_type;
  703. if (cipher_type) {
  704. err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type);
  705. if (err)
  706. goto nla_failure;
  707. }
  708. err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true));
  709. if (err)
  710. goto nla_failure;
  711. err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false));
  712. if (err)
  713. goto nla_failure;
  714. rcu_read_unlock();
  715. nla_nest_end(skb, start);
  716. return 0;
  717. nla_failure:
  718. rcu_read_unlock();
  719. nla_nest_cancel(skb, start);
  720. return err;
  721. }
  722. static size_t tls_get_info_size(const struct sock *sk)
  723. {
  724. size_t size = 0;
  725. size += nla_total_size(0) + /* INET_ULP_INFO_TLS */
  726. nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */
  727. nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */
  728. nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */
  729. nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */
  730. 0;
  731. return size;
  732. }
  733. static int __net_init tls_init_net(struct net *net)
  734. {
  735. int err;
  736. net->mib.tls_statistics = alloc_percpu(struct linux_tls_mib);
  737. if (!net->mib.tls_statistics)
  738. return -ENOMEM;
  739. err = tls_proc_init(net);
  740. if (err)
  741. goto err_free_stats;
  742. return 0;
  743. err_free_stats:
  744. free_percpu(net->mib.tls_statistics);
  745. return err;
  746. }
  747. static void __net_exit tls_exit_net(struct net *net)
  748. {
  749. tls_proc_fini(net);
  750. free_percpu(net->mib.tls_statistics);
  751. }
  752. static struct pernet_operations tls_proc_ops = {
  753. .init = tls_init_net,
  754. .exit = tls_exit_net,
  755. };
  756. static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
  757. .name = "tls",
  758. .owner = THIS_MODULE,
  759. .init = tls_init,
  760. .update = tls_update,
  761. .get_info = tls_get_info,
  762. .get_info_size = tls_get_info_size,
  763. };
  764. static int __init tls_register(void)
  765. {
  766. int err;
  767. err = register_pernet_subsys(&tls_proc_ops);
  768. if (err)
  769. return err;
  770. tls_device_init();
  771. tcp_register_ulp(&tcp_tls_ulp_ops);
  772. return 0;
  773. }
  774. static void __exit tls_unregister(void)
  775. {
  776. tcp_unregister_ulp(&tcp_tls_ulp_ops);
  777. tls_device_cleanup();
  778. unregister_pernet_subsys(&tls_proc_ops);
  779. }
  780. module_init(tls_register);
  781. module_exit(tls_unregister);