nf_conntrack_proto_gre.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Connection tracking protocol helper module for GRE.
  4. *
  5. * GRE is a generic encapsulation protocol, which is generally not very
  6. * suited for NAT, as it has no protocol-specific part as port numbers.
  7. *
  8. * It has an optional key field, which may help us distinguishing two
  9. * connections between the same two hosts.
  10. *
  11. * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
  12. *
  13. * PPTP is built on top of a modified version of GRE, and has a mandatory
  14. * field called "CallID", which serves us for the same purpose as the key
  15. * field in plain GRE.
  16. *
  17. * Documentation about PPTP can be found in RFC 2637
  18. *
  19. * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
  20. *
  21. * Development of this code funded by Astaro AG (http://www.astaro.com/)
  22. *
  23. * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
  24. */
  25. #include <linux/module.h>
  26. #include <linux/types.h>
  27. #include <linux/timer.h>
  28. #include <linux/list.h>
  29. #include <linux/seq_file.h>
  30. #include <linux/in.h>
  31. #include <linux/netdevice.h>
  32. #include <linux/skbuff.h>
  33. #include <linux/slab.h>
  34. #include <net/dst.h>
  35. #include <net/net_namespace.h>
  36. #include <net/netns/generic.h>
  37. #include <net/netfilter/nf_conntrack_l4proto.h>
  38. #include <net/netfilter/nf_conntrack_helper.h>
  39. #include <net/netfilter/nf_conntrack_core.h>
  40. #include <net/netfilter/nf_conntrack_timeout.h>
  41. #include <linux/netfilter/nf_conntrack_proto_gre.h>
  42. #include <linux/netfilter/nf_conntrack_pptp.h>
  43. static const unsigned int gre_timeouts[GRE_CT_MAX] = {
  44. [GRE_CT_UNREPLIED] = 30*HZ,
  45. [GRE_CT_REPLIED] = 180*HZ,
  46. };
  47. /* used when expectation is added */
  48. static DEFINE_SPINLOCK(keymap_lock);
  49. static inline struct nf_gre_net *gre_pernet(struct net *net)
  50. {
  51. return &net->ct.nf_ct_proto.gre;
  52. }
  53. void nf_ct_gre_keymap_flush(struct net *net)
  54. {
  55. struct nf_gre_net *net_gre = gre_pernet(net);
  56. struct nf_ct_gre_keymap *km, *tmp;
  57. spin_lock_bh(&keymap_lock);
  58. list_for_each_entry_safe(km, tmp, &net_gre->keymap_list, list) {
  59. list_del_rcu(&km->list);
  60. kfree_rcu(km, rcu);
  61. }
  62. spin_unlock_bh(&keymap_lock);
  63. }
  64. static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
  65. const struct nf_conntrack_tuple *t)
  66. {
  67. return km->tuple.src.l3num == t->src.l3num &&
  68. !memcmp(&km->tuple.src.u3, &t->src.u3, sizeof(t->src.u3)) &&
  69. !memcmp(&km->tuple.dst.u3, &t->dst.u3, sizeof(t->dst.u3)) &&
  70. km->tuple.dst.protonum == t->dst.protonum &&
  71. km->tuple.dst.u.all == t->dst.u.all;
  72. }
  73. /* look up the source key for a given tuple */
  74. static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t)
  75. {
  76. struct nf_gre_net *net_gre = gre_pernet(net);
  77. struct nf_ct_gre_keymap *km;
  78. __be16 key = 0;
  79. list_for_each_entry_rcu(km, &net_gre->keymap_list, list) {
  80. if (gre_key_cmpfn(km, t)) {
  81. key = km->tuple.src.u.gre.key;
  82. break;
  83. }
  84. }
  85. pr_debug("lookup src key 0x%x for ", key);
  86. nf_ct_dump_tuple(t);
  87. return key;
  88. }
  89. /* add a single keymap entry, associate with specified master ct */
  90. int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
  91. struct nf_conntrack_tuple *t)
  92. {
  93. struct net *net = nf_ct_net(ct);
  94. struct nf_gre_net *net_gre = gre_pernet(net);
  95. struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
  96. struct nf_ct_gre_keymap **kmp, *km;
  97. kmp = &ct_pptp_info->keymap[dir];
  98. if (*kmp) {
  99. /* check whether it's a retransmission */
  100. list_for_each_entry_rcu(km, &net_gre->keymap_list, list) {
  101. if (gre_key_cmpfn(km, t) && km == *kmp)
  102. return 0;
  103. }
  104. pr_debug("trying to override keymap_%s for ct %p\n",
  105. dir == IP_CT_DIR_REPLY ? "reply" : "orig", ct);
  106. return -EEXIST;
  107. }
  108. km = kmalloc(sizeof(*km), GFP_ATOMIC);
  109. if (!km)
  110. return -ENOMEM;
  111. memcpy(&km->tuple, t, sizeof(*t));
  112. *kmp = km;
  113. pr_debug("adding new entry %p: ", km);
  114. nf_ct_dump_tuple(&km->tuple);
  115. spin_lock_bh(&keymap_lock);
  116. list_add_tail(&km->list, &net_gre->keymap_list);
  117. spin_unlock_bh(&keymap_lock);
  118. return 0;
  119. }
  120. EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add);
  121. /* destroy the keymap entries associated with specified master ct */
  122. void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
  123. {
  124. struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
  125. enum ip_conntrack_dir dir;
  126. pr_debug("entering for ct %p\n", ct);
  127. spin_lock_bh(&keymap_lock);
  128. for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
  129. if (ct_pptp_info->keymap[dir]) {
  130. pr_debug("removing %p from list\n",
  131. ct_pptp_info->keymap[dir]);
  132. list_del_rcu(&ct_pptp_info->keymap[dir]->list);
  133. kfree_rcu(ct_pptp_info->keymap[dir], rcu);
  134. ct_pptp_info->keymap[dir] = NULL;
  135. }
  136. }
  137. spin_unlock_bh(&keymap_lock);
  138. }
  139. EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_destroy);
  140. /* PUBLIC CONNTRACK PROTO HELPER FUNCTIONS */
  141. /* gre hdr info to tuple */
  142. bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
  143. struct net *net, struct nf_conntrack_tuple *tuple)
  144. {
  145. const struct pptp_gre_header *pgrehdr;
  146. struct pptp_gre_header _pgrehdr;
  147. __be16 srckey;
  148. const struct gre_base_hdr *grehdr;
  149. struct gre_base_hdr _grehdr;
  150. /* first only delinearize old RFC1701 GRE header */
  151. grehdr = skb_header_pointer(skb, dataoff, sizeof(_grehdr), &_grehdr);
  152. if (!grehdr || (grehdr->flags & GRE_VERSION) != GRE_VERSION_1) {
  153. /* try to behave like "nf_conntrack_proto_generic" */
  154. tuple->src.u.all = 0;
  155. tuple->dst.u.all = 0;
  156. return true;
  157. }
  158. /* PPTP header is variable length, only need up to the call_id field */
  159. pgrehdr = skb_header_pointer(skb, dataoff, 8, &_pgrehdr);
  160. if (!pgrehdr)
  161. return true;
  162. if (grehdr->protocol != GRE_PROTO_PPP) {
  163. pr_debug("Unsupported GRE proto(0x%x)\n", ntohs(grehdr->protocol));
  164. return false;
  165. }
  166. tuple->dst.u.gre.key = pgrehdr->call_id;
  167. srckey = gre_keymap_lookup(net, tuple);
  168. tuple->src.u.gre.key = srckey;
  169. return true;
  170. }
  171. #ifdef CONFIG_NF_CONNTRACK_PROCFS
  172. /* print private data for conntrack */
  173. static void gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
  174. {
  175. seq_printf(s, "timeout=%u, stream_timeout=%u ",
  176. (ct->proto.gre.timeout / HZ),
  177. (ct->proto.gre.stream_timeout / HZ));
  178. }
  179. #endif
  180. static unsigned int *gre_get_timeouts(struct net *net)
  181. {
  182. return gre_pernet(net)->timeouts;
  183. }
  184. /* Returns verdict for packet, and may modify conntrack */
  185. int nf_conntrack_gre_packet(struct nf_conn *ct,
  186. struct sk_buff *skb,
  187. unsigned int dataoff,
  188. enum ip_conntrack_info ctinfo,
  189. const struct nf_hook_state *state)
  190. {
  191. if (!nf_ct_is_confirmed(ct)) {
  192. unsigned int *timeouts = nf_ct_timeout_lookup(ct);
  193. if (!timeouts)
  194. timeouts = gre_get_timeouts(nf_ct_net(ct));
  195. /* initialize to sane value. Ideally a conntrack helper
  196. * (e.g. in case of pptp) is increasing them */
  197. ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
  198. ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
  199. }
  200. /* If we've seen traffic both ways, this is a GRE connection.
  201. * Extend timeout. */
  202. if (ct->status & IPS_SEEN_REPLY) {
  203. nf_ct_refresh_acct(ct, ctinfo, skb,
  204. ct->proto.gre.stream_timeout);
  205. /* Also, more likely to be important, and not a probe. */
  206. if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
  207. nf_conntrack_event_cache(IPCT_ASSURED, ct);
  208. } else
  209. nf_ct_refresh_acct(ct, ctinfo, skb,
  210. ct->proto.gre.timeout);
  211. return NF_ACCEPT;
  212. }
  213. #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
  214. #include <linux/netfilter/nfnetlink.h>
  215. #include <linux/netfilter/nfnetlink_cttimeout.h>
  216. static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
  217. struct net *net, void *data)
  218. {
  219. unsigned int *timeouts = data;
  220. struct nf_gre_net *net_gre = gre_pernet(net);
  221. if (!timeouts)
  222. timeouts = gre_get_timeouts(net);
  223. /* set default timeouts for GRE. */
  224. timeouts[GRE_CT_UNREPLIED] = net_gre->timeouts[GRE_CT_UNREPLIED];
  225. timeouts[GRE_CT_REPLIED] = net_gre->timeouts[GRE_CT_REPLIED];
  226. if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
  227. timeouts[GRE_CT_UNREPLIED] =
  228. ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ;
  229. }
  230. if (tb[CTA_TIMEOUT_GRE_REPLIED]) {
  231. timeouts[GRE_CT_REPLIED] =
  232. ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ;
  233. }
  234. return 0;
  235. }
  236. static int
  237. gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
  238. {
  239. const unsigned int *timeouts = data;
  240. if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
  241. htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) ||
  242. nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED,
  243. htonl(timeouts[GRE_CT_REPLIED] / HZ)))
  244. goto nla_put_failure;
  245. return 0;
  246. nla_put_failure:
  247. return -ENOSPC;
  248. }
  249. static const struct nla_policy
  250. gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
  251. [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
  252. [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
  253. };
  254. #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
  255. void nf_conntrack_gre_init_net(struct net *net)
  256. {
  257. struct nf_gre_net *net_gre = gre_pernet(net);
  258. int i;
  259. INIT_LIST_HEAD(&net_gre->keymap_list);
  260. for (i = 0; i < GRE_CT_MAX; i++)
  261. net_gre->timeouts[i] = gre_timeouts[i];
  262. }
  263. /* protocol helper struct */
  264. const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre = {
  265. .l4proto = IPPROTO_GRE,
  266. #ifdef CONFIG_NF_CONNTRACK_PROCFS
  267. .print_conntrack = gre_print_conntrack,
  268. #endif
  269. #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
  270. .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
  271. .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
  272. .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
  273. .nla_policy = nf_ct_port_nla_policy,
  274. #endif
  275. #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
  276. .ctnl_timeout = {
  277. .nlattr_to_obj = gre_timeout_nlattr_to_obj,
  278. .obj_to_nlattr = gre_timeout_obj_to_nlattr,
  279. .nlattr_max = CTA_TIMEOUT_GRE_MAX,
  280. .obj_size = sizeof(unsigned int) * GRE_CT_MAX,
  281. .nla_policy = gre_timeout_nla_policy,
  282. },
  283. #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
  284. };