conn_event.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* connection-level event handling
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/net.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/errqueue.h>
  12. #include <net/sock.h>
  13. #include <net/af_rxrpc.h>
  14. #include <net/ip.h>
  15. #include "ar-internal.h"
  16. /*
  17. * Retransmit terminal ACK or ABORT of the previous call.
  18. */
  19. static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
  20. struct sk_buff *skb,
  21. unsigned int channel)
  22. {
  23. struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
  24. struct rxrpc_channel *chan;
  25. struct msghdr msg;
  26. struct kvec iov[3];
  27. struct {
  28. struct rxrpc_wire_header whdr;
  29. union {
  30. __be32 abort_code;
  31. struct rxrpc_ackpacket ack;
  32. };
  33. } __attribute__((packed)) pkt;
  34. struct rxrpc_ackinfo ack_info;
  35. size_t len;
  36. int ret, ioc;
  37. u32 serial, mtu, call_id, padding;
  38. _enter("%d", conn->debug_id);
  39. chan = &conn->channels[channel];
  40. /* If the last call got moved on whilst we were waiting to run, just
  41. * ignore this packet.
  42. */
  43. call_id = READ_ONCE(chan->last_call);
  44. /* Sync with __rxrpc_disconnect_call() */
  45. smp_rmb();
  46. if (skb && call_id != sp->hdr.callNumber)
  47. return;
  48. msg.msg_name = &conn->params.peer->srx.transport;
  49. msg.msg_namelen = conn->params.peer->srx.transport_len;
  50. msg.msg_control = NULL;
  51. msg.msg_controllen = 0;
  52. msg.msg_flags = 0;
  53. iov[0].iov_base = &pkt;
  54. iov[0].iov_len = sizeof(pkt.whdr);
  55. iov[1].iov_base = &padding;
  56. iov[1].iov_len = 3;
  57. iov[2].iov_base = &ack_info;
  58. iov[2].iov_len = sizeof(ack_info);
  59. pkt.whdr.epoch = htonl(conn->proto.epoch);
  60. pkt.whdr.cid = htonl(conn->proto.cid | channel);
  61. pkt.whdr.callNumber = htonl(call_id);
  62. pkt.whdr.seq = 0;
  63. pkt.whdr.type = chan->last_type;
  64. pkt.whdr.flags = conn->out_clientflag;
  65. pkt.whdr.userStatus = 0;
  66. pkt.whdr.securityIndex = conn->security_ix;
  67. pkt.whdr._rsvd = 0;
  68. pkt.whdr.serviceId = htons(conn->service_id);
  69. len = sizeof(pkt.whdr);
  70. switch (chan->last_type) {
  71. case RXRPC_PACKET_TYPE_ABORT:
  72. pkt.abort_code = htonl(chan->last_abort);
  73. iov[0].iov_len += sizeof(pkt.abort_code);
  74. len += sizeof(pkt.abort_code);
  75. ioc = 1;
  76. break;
  77. case RXRPC_PACKET_TYPE_ACK:
  78. mtu = conn->params.peer->if_mtu;
  79. mtu -= conn->params.peer->hdrsize;
  80. pkt.ack.bufferSpace = 0;
  81. pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
  82. pkt.ack.firstPacket = htonl(chan->last_seq + 1);
  83. pkt.ack.previousPacket = htonl(chan->last_seq);
  84. pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
  85. pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
  86. pkt.ack.nAcks = 0;
  87. ack_info.rxMTU = htonl(rxrpc_rx_mtu);
  88. ack_info.maxMTU = htonl(mtu);
  89. ack_info.rwind = htonl(rxrpc_rx_window_size);
  90. ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
  91. pkt.whdr.flags |= RXRPC_SLOW_START_OK;
  92. padding = 0;
  93. iov[0].iov_len += sizeof(pkt.ack);
  94. len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
  95. ioc = 3;
  96. break;
  97. default:
  98. return;
  99. }
  100. /* Resync with __rxrpc_disconnect_call() and check that the last call
  101. * didn't get advanced whilst we were filling out the packets.
  102. */
  103. smp_rmb();
  104. if (READ_ONCE(chan->last_call) != call_id)
  105. return;
  106. serial = atomic_inc_return(&conn->serial);
  107. pkt.whdr.serial = htonl(serial);
  108. switch (chan->last_type) {
  109. case RXRPC_PACKET_TYPE_ABORT:
  110. _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
  111. break;
  112. case RXRPC_PACKET_TYPE_ACK:
  113. trace_rxrpc_tx_ack(chan->call_debug_id, serial,
  114. ntohl(pkt.ack.firstPacket),
  115. ntohl(pkt.ack.serial),
  116. pkt.ack.reason, 0);
  117. _proto("Tx ACK %%%u [re]", serial);
  118. break;
  119. }
  120. ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
  121. conn->params.peer->last_tx_at = ktime_get_seconds();
  122. if (ret < 0)
  123. trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret,
  124. rxrpc_tx_point_call_final_resend);
  125. else
  126. trace_rxrpc_tx_packet(chan->call_debug_id, &pkt.whdr,
  127. rxrpc_tx_point_call_final_resend);
  128. _leave("");
  129. }
  130. /*
  131. * pass a connection-level abort onto all calls on that connection
  132. */
  133. static void rxrpc_abort_calls(struct rxrpc_connection *conn,
  134. enum rxrpc_call_completion compl,
  135. rxrpc_serial_t serial)
  136. {
  137. struct rxrpc_call *call;
  138. int i;
  139. _enter("{%d},%x", conn->debug_id, conn->abort_code);
  140. spin_lock(&conn->bundle->channel_lock);
  141. for (i = 0; i < RXRPC_MAXCALLS; i++) {
  142. call = rcu_dereference_protected(
  143. conn->channels[i].call,
  144. lockdep_is_held(&conn->bundle->channel_lock));
  145. if (call) {
  146. if (compl == RXRPC_CALL_LOCALLY_ABORTED)
  147. trace_rxrpc_abort(call->debug_id,
  148. "CON", call->cid,
  149. call->call_id, 0,
  150. conn->abort_code,
  151. conn->error);
  152. else
  153. trace_rxrpc_rx_abort(call, serial,
  154. conn->abort_code);
  155. rxrpc_set_call_completion(call, compl,
  156. conn->abort_code,
  157. conn->error);
  158. }
  159. }
  160. spin_unlock(&conn->bundle->channel_lock);
  161. _leave("");
  162. }
  163. /*
  164. * generate a connection-level abort
  165. */
  166. static int rxrpc_abort_connection(struct rxrpc_connection *conn,
  167. int error, u32 abort_code)
  168. {
  169. struct rxrpc_wire_header whdr;
  170. struct msghdr msg;
  171. struct kvec iov[2];
  172. __be32 word;
  173. size_t len;
  174. u32 serial;
  175. int ret;
  176. _enter("%d,,%u,%u", conn->debug_id, error, abort_code);
  177. /* generate a connection-level abort */
  178. spin_lock_bh(&conn->state_lock);
  179. if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
  180. spin_unlock_bh(&conn->state_lock);
  181. _leave(" = 0 [already dead]");
  182. return 0;
  183. }
  184. conn->error = error;
  185. conn->abort_code = abort_code;
  186. conn->state = RXRPC_CONN_LOCALLY_ABORTED;
  187. set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
  188. spin_unlock_bh(&conn->state_lock);
  189. msg.msg_name = &conn->params.peer->srx.transport;
  190. msg.msg_namelen = conn->params.peer->srx.transport_len;
  191. msg.msg_control = NULL;
  192. msg.msg_controllen = 0;
  193. msg.msg_flags = 0;
  194. whdr.epoch = htonl(conn->proto.epoch);
  195. whdr.cid = htonl(conn->proto.cid);
  196. whdr.callNumber = 0;
  197. whdr.seq = 0;
  198. whdr.type = RXRPC_PACKET_TYPE_ABORT;
  199. whdr.flags = conn->out_clientflag;
  200. whdr.userStatus = 0;
  201. whdr.securityIndex = conn->security_ix;
  202. whdr._rsvd = 0;
  203. whdr.serviceId = htons(conn->service_id);
  204. word = htonl(conn->abort_code);
  205. iov[0].iov_base = &whdr;
  206. iov[0].iov_len = sizeof(whdr);
  207. iov[1].iov_base = &word;
  208. iov[1].iov_len = sizeof(word);
  209. len = iov[0].iov_len + iov[1].iov_len;
  210. serial = atomic_inc_return(&conn->serial);
  211. rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
  212. whdr.serial = htonl(serial);
  213. _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
  214. ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
  215. if (ret < 0) {
  216. trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
  217. rxrpc_tx_point_conn_abort);
  218. _debug("sendmsg failed: %d", ret);
  219. return -EAGAIN;
  220. }
  221. trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
  222. conn->params.peer->last_tx_at = ktime_get_seconds();
  223. _leave(" = 0");
  224. return 0;
  225. }
  226. /*
  227. * mark a call as being on a now-secured channel
  228. * - must be called with BH's disabled.
  229. */
  230. static void rxrpc_call_is_secure(struct rxrpc_call *call)
  231. {
  232. _enter("%p", call);
  233. if (call) {
  234. write_lock_bh(&call->state_lock);
  235. if (call->state == RXRPC_CALL_SERVER_SECURING) {
  236. call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
  237. rxrpc_notify_socket(call);
  238. }
  239. write_unlock_bh(&call->state_lock);
  240. }
  241. }
  242. /*
  243. * connection-level Rx packet processor
  244. */
  245. static int rxrpc_process_event(struct rxrpc_connection *conn,
  246. struct sk_buff *skb,
  247. u32 *_abort_code)
  248. {
  249. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  250. __be32 wtmp;
  251. u32 abort_code;
  252. int loop, ret;
  253. if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
  254. _leave(" = -ECONNABORTED [%u]", conn->state);
  255. return -ECONNABORTED;
  256. }
  257. _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
  258. switch (sp->hdr.type) {
  259. case RXRPC_PACKET_TYPE_DATA:
  260. case RXRPC_PACKET_TYPE_ACK:
  261. rxrpc_conn_retransmit_call(conn, skb,
  262. sp->hdr.cid & RXRPC_CHANNELMASK);
  263. return 0;
  264. case RXRPC_PACKET_TYPE_BUSY:
  265. /* Just ignore BUSY packets for now. */
  266. return 0;
  267. case RXRPC_PACKET_TYPE_ABORT:
  268. if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
  269. &wtmp, sizeof(wtmp)) < 0) {
  270. trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
  271. tracepoint_string("bad_abort"));
  272. return -EPROTO;
  273. }
  274. abort_code = ntohl(wtmp);
  275. _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
  276. conn->error = -ECONNABORTED;
  277. conn->abort_code = abort_code;
  278. conn->state = RXRPC_CONN_REMOTELY_ABORTED;
  279. set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
  280. rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
  281. return -ECONNABORTED;
  282. case RXRPC_PACKET_TYPE_CHALLENGE:
  283. return conn->security->respond_to_challenge(conn, skb,
  284. _abort_code);
  285. case RXRPC_PACKET_TYPE_RESPONSE:
  286. ret = conn->security->verify_response(conn, skb, _abort_code);
  287. if (ret < 0)
  288. return ret;
  289. ret = conn->security->init_connection_security(conn);
  290. if (ret < 0)
  291. return ret;
  292. ret = conn->security->prime_packet_security(conn);
  293. if (ret < 0)
  294. return ret;
  295. spin_lock(&conn->bundle->channel_lock);
  296. spin_lock_bh(&conn->state_lock);
  297. if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
  298. conn->state = RXRPC_CONN_SERVICE;
  299. spin_unlock_bh(&conn->state_lock);
  300. for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
  301. rxrpc_call_is_secure(
  302. rcu_dereference_protected(
  303. conn->channels[loop].call,
  304. lockdep_is_held(&conn->bundle->channel_lock)));
  305. } else {
  306. spin_unlock_bh(&conn->state_lock);
  307. }
  308. spin_unlock(&conn->bundle->channel_lock);
  309. return 0;
  310. default:
  311. trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
  312. tracepoint_string("bad_conn_pkt"));
  313. return -EPROTO;
  314. }
  315. }
  316. /*
  317. * set up security and issue a challenge
  318. */
  319. static void rxrpc_secure_connection(struct rxrpc_connection *conn)
  320. {
  321. u32 abort_code;
  322. int ret;
  323. _enter("{%d}", conn->debug_id);
  324. ASSERT(conn->security_ix != 0);
  325. ASSERT(conn->server_key);
  326. if (conn->security->issue_challenge(conn) < 0) {
  327. abort_code = RX_CALL_DEAD;
  328. ret = -ENOMEM;
  329. goto abort;
  330. }
  331. _leave("");
  332. return;
  333. abort:
  334. _debug("abort %d, %d", ret, abort_code);
  335. rxrpc_abort_connection(conn, ret, abort_code);
  336. _leave(" [aborted]");
  337. }
  338. /*
  339. * Process delayed final ACKs that we haven't subsumed into a subsequent call.
  340. */
  341. void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn, bool force)
  342. {
  343. unsigned long j = jiffies, next_j;
  344. unsigned int channel;
  345. bool set;
  346. again:
  347. next_j = j + LONG_MAX;
  348. set = false;
  349. for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
  350. struct rxrpc_channel *chan = &conn->channels[channel];
  351. unsigned long ack_at;
  352. if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
  353. continue;
  354. smp_rmb(); /* vs rxrpc_disconnect_client_call */
  355. ack_at = READ_ONCE(chan->final_ack_at);
  356. if (time_before(j, ack_at) && !force) {
  357. if (time_before(ack_at, next_j)) {
  358. next_j = ack_at;
  359. set = true;
  360. }
  361. continue;
  362. }
  363. if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
  364. &conn->flags))
  365. rxrpc_conn_retransmit_call(conn, NULL, channel);
  366. }
  367. j = jiffies;
  368. if (time_before_eq(next_j, j))
  369. goto again;
  370. if (set)
  371. rxrpc_reduce_conn_timer(conn, next_j);
  372. }
  373. /*
  374. * connection-level event processor
  375. */
  376. static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
  377. {
  378. struct sk_buff *skb;
  379. u32 abort_code = RX_PROTOCOL_ERROR;
  380. int ret;
  381. if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
  382. rxrpc_secure_connection(conn);
  383. /* Process delayed ACKs whose time has come. */
  384. if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
  385. rxrpc_process_delayed_final_acks(conn, false);
  386. /* go through the conn-level event packets, releasing the ref on this
  387. * connection that each one has when we've finished with it */
  388. while ((skb = skb_dequeue(&conn->rx_queue))) {
  389. rxrpc_see_skb(skb, rxrpc_skb_seen);
  390. ret = rxrpc_process_event(conn, skb, &abort_code);
  391. switch (ret) {
  392. case -EPROTO:
  393. case -EKEYEXPIRED:
  394. case -EKEYREJECTED:
  395. goto protocol_error;
  396. case -ENOMEM:
  397. case -EAGAIN:
  398. goto requeue_and_leave;
  399. case -ECONNABORTED:
  400. default:
  401. rxrpc_free_skb(skb, rxrpc_skb_freed);
  402. break;
  403. }
  404. }
  405. return;
  406. requeue_and_leave:
  407. skb_queue_head(&conn->rx_queue, skb);
  408. return;
  409. protocol_error:
  410. if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
  411. goto requeue_and_leave;
  412. rxrpc_free_skb(skb, rxrpc_skb_freed);
  413. return;
  414. }
  415. void rxrpc_process_connection(struct work_struct *work)
  416. {
  417. struct rxrpc_connection *conn =
  418. container_of(work, struct rxrpc_connection, processor);
  419. rxrpc_see_connection(conn);
  420. if (__rxrpc_use_local(conn->params.local)) {
  421. rxrpc_do_process_connection(conn);
  422. rxrpc_unuse_local(conn->params.local);
  423. }
  424. rxrpc_put_connection(conn);
  425. _leave("");
  426. return;
  427. }