conn_service.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Service connection management
  3. *
  4. * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #include <linux/slab.h>
  8. #include "ar-internal.h"
  9. static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
  10. .usage = ATOMIC_INIT(1),
  11. .debug_id = UINT_MAX,
  12. .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
  13. };
  14. /*
  15. * Find a service connection under RCU conditions.
  16. *
  17. * We could use a hash table, but that is subject to bucket stuffing by an
  18. * attacker as the client gets to pick the epoch and cid values and would know
  19. * the hash function. So, instead, we use a hash table for the peer and from
  20. * that an rbtree to find the service connection. Under ordinary circumstances
  21. * it might be slower than a large hash table, but it is at least limited in
  22. * depth.
  23. */
  24. struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
  25. struct sk_buff *skb)
  26. {
  27. struct rxrpc_connection *conn = NULL;
  28. struct rxrpc_conn_proto k;
  29. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  30. struct rb_node *p;
  31. unsigned int seq = 0;
  32. k.epoch = sp->hdr.epoch;
  33. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  34. do {
  35. /* Unfortunately, rbtree walking doesn't give reliable results
  36. * under just the RCU read lock, so we have to check for
  37. * changes.
  38. */
  39. read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
  40. p = rcu_dereference_raw(peer->service_conns.rb_node);
  41. while (p) {
  42. conn = rb_entry(p, struct rxrpc_connection, service_node);
  43. if (conn->proto.index_key < k.index_key)
  44. p = rcu_dereference_raw(p->rb_left);
  45. else if (conn->proto.index_key > k.index_key)
  46. p = rcu_dereference_raw(p->rb_right);
  47. else
  48. break;
  49. conn = NULL;
  50. }
  51. } while (need_seqretry(&peer->service_conn_lock, seq));
  52. done_seqretry(&peer->service_conn_lock, seq);
  53. _leave(" = %d", conn ? conn->debug_id : -1);
  54. return conn;
  55. }
  56. /*
  57. * Insert a service connection into a peer's tree, thereby making it a target
  58. * for incoming packets.
  59. */
  60. static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
  61. struct rxrpc_connection *conn)
  62. {
  63. struct rxrpc_connection *cursor = NULL;
  64. struct rxrpc_conn_proto k = conn->proto;
  65. struct rb_node **pp, *parent;
  66. write_seqlock_bh(&peer->service_conn_lock);
  67. pp = &peer->service_conns.rb_node;
  68. parent = NULL;
  69. while (*pp) {
  70. parent = *pp;
  71. cursor = rb_entry(parent,
  72. struct rxrpc_connection, service_node);
  73. if (cursor->proto.index_key < k.index_key)
  74. pp = &(*pp)->rb_left;
  75. else if (cursor->proto.index_key > k.index_key)
  76. pp = &(*pp)->rb_right;
  77. else
  78. goto found_extant_conn;
  79. }
  80. rb_link_node_rcu(&conn->service_node, parent, pp);
  81. rb_insert_color(&conn->service_node, &peer->service_conns);
  82. conn_published:
  83. set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
  84. write_sequnlock_bh(&peer->service_conn_lock);
  85. _leave(" = %d [new]", conn->debug_id);
  86. return;
  87. found_extant_conn:
  88. if (atomic_read(&cursor->usage) == 0)
  89. goto replace_old_connection;
  90. write_sequnlock_bh(&peer->service_conn_lock);
  91. /* We should not be able to get here. rxrpc_incoming_connection() is
  92. * called in a non-reentrant context, so there can't be a race to
  93. * insert a new connection.
  94. */
  95. BUG();
  96. replace_old_connection:
  97. /* The old connection is from an outdated epoch. */
  98. _debug("replace conn");
  99. rb_replace_node_rcu(&cursor->service_node,
  100. &conn->service_node,
  101. &peer->service_conns);
  102. clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
  103. goto conn_published;
  104. }
  105. /*
  106. * Preallocate a service connection. The connection is placed on the proc and
  107. * reap lists so that we don't have to get the lock from BH context.
  108. */
  109. struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
  110. gfp_t gfp)
  111. {
  112. struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
  113. if (conn) {
  114. /* We maintain an extra ref on the connection whilst it is on
  115. * the rxrpc_connections list.
  116. */
  117. conn->state = RXRPC_CONN_SERVICE_PREALLOC;
  118. atomic_set(&conn->usage, 2);
  119. conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle);
  120. atomic_inc(&rxnet->nr_conns);
  121. write_lock(&rxnet->conn_lock);
  122. list_add_tail(&conn->link, &rxnet->service_conns);
  123. list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
  124. write_unlock(&rxnet->conn_lock);
  125. trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
  126. atomic_read(&conn->usage),
  127. __builtin_return_address(0));
  128. }
  129. return conn;
  130. }
  131. /*
  132. * Set up an incoming connection. This is called in BH context with the RCU
  133. * read lock held.
  134. */
  135. void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
  136. struct rxrpc_connection *conn,
  137. const struct rxrpc_security *sec,
  138. struct key *key,
  139. struct sk_buff *skb)
  140. {
  141. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  142. _enter("");
  143. conn->proto.epoch = sp->hdr.epoch;
  144. conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
  145. conn->params.service_id = sp->hdr.serviceId;
  146. conn->service_id = sp->hdr.serviceId;
  147. conn->security_ix = sp->hdr.securityIndex;
  148. conn->out_clientflag = 0;
  149. conn->security = sec;
  150. conn->server_key = key_get(key);
  151. if (conn->security_ix)
  152. conn->state = RXRPC_CONN_SERVICE_UNSECURED;
  153. else
  154. conn->state = RXRPC_CONN_SERVICE;
  155. /* See if we should upgrade the service. This can only happen on the
  156. * first packet on a new connection. Once done, it applies to all
  157. * subsequent calls on that connection.
  158. */
  159. if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
  160. conn->service_id == rx->service_upgrade.from)
  161. conn->service_id = rx->service_upgrade.to;
  162. /* Make the connection a target for incoming packets. */
  163. rxrpc_publish_service_conn(conn->params.peer, conn);
  164. _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
  165. }
  166. /*
  167. * Remove the service connection from the peer's tree, thereby removing it as a
  168. * target for incoming packets.
  169. */
  170. void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
  171. {
  172. struct rxrpc_peer *peer = conn->params.peer;
  173. write_seqlock_bh(&peer->service_conn_lock);
  174. if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
  175. rb_erase(&conn->service_node, &peer->service_conns);
  176. write_sequnlock_bh(&peer->service_conn_lock);
  177. }