nf_flow_table_core.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/kernel.h>
  3. #include <linux/init.h>
  4. #include <linux/module.h>
  5. #include <linux/netfilter.h>
  6. #include <linux/rhashtable.h>
  7. #include <linux/netdevice.h>
  8. #include <net/ip.h>
  9. #include <net/ip6_route.h>
  10. #include <net/netfilter/nf_tables.h>
  11. #include <net/netfilter/nf_flow_table.h>
  12. #include <net/netfilter/nf_conntrack.h>
  13. #include <net/netfilter/nf_conntrack_core.h>
  14. #include <net/netfilter/nf_conntrack_l4proto.h>
  15. #include <net/netfilter/nf_conntrack_tuple.h>
  16. static DEFINE_MUTEX(flowtable_lock);
  17. static LIST_HEAD(flowtables);
  18. static void
  19. flow_offload_fill_dir(struct flow_offload *flow,
  20. enum flow_offload_tuple_dir dir)
  21. {
  22. struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
  23. struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple;
  24. ft->dir = dir;
  25. switch (ctt->src.l3num) {
  26. case NFPROTO_IPV4:
  27. ft->src_v4 = ctt->src.u3.in;
  28. ft->dst_v4 = ctt->dst.u3.in;
  29. break;
  30. case NFPROTO_IPV6:
  31. ft->src_v6 = ctt->src.u3.in6;
  32. ft->dst_v6 = ctt->dst.u3.in6;
  33. break;
  34. }
  35. ft->l3proto = ctt->src.l3num;
  36. ft->l4proto = ctt->dst.protonum;
  37. ft->src_port = ctt->src.u.tcp.port;
  38. ft->dst_port = ctt->dst.u.tcp.port;
  39. }
  40. struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
  41. {
  42. struct flow_offload *flow;
  43. if (unlikely(nf_ct_is_dying(ct) ||
  44. !atomic_inc_not_zero(&ct->ct_general.use)))
  45. return NULL;
  46. flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
  47. if (!flow)
  48. goto err_ct_refcnt;
  49. flow->ct = ct;
  50. flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL);
  51. flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY);
  52. if (ct->status & IPS_SRC_NAT)
  53. __set_bit(NF_FLOW_SNAT, &flow->flags);
  54. if (ct->status & IPS_DST_NAT)
  55. __set_bit(NF_FLOW_DNAT, &flow->flags);
  56. return flow;
  57. err_ct_refcnt:
  58. nf_ct_put(ct);
  59. return NULL;
  60. }
  61. EXPORT_SYMBOL_GPL(flow_offload_alloc);
  62. static int flow_offload_fill_route(struct flow_offload *flow,
  63. const struct nf_flow_route *route,
  64. enum flow_offload_tuple_dir dir)
  65. {
  66. struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
  67. struct dst_entry *other_dst = route->tuple[!dir].dst;
  68. struct dst_entry *dst = route->tuple[dir].dst;
  69. if (!dst_hold_safe(route->tuple[dir].dst))
  70. return -1;
  71. switch (flow_tuple->l3proto) {
  72. case NFPROTO_IPV4:
  73. flow_tuple->mtu = ip_dst_mtu_maybe_forward(dst, true);
  74. break;
  75. case NFPROTO_IPV6:
  76. flow_tuple->mtu = ip6_dst_mtu_forward(dst);
  77. break;
  78. }
  79. flow_tuple->iifidx = other_dst->dev->ifindex;
  80. flow_tuple->dst_cache = dst;
  81. return 0;
  82. }
  83. int flow_offload_route_init(struct flow_offload *flow,
  84. const struct nf_flow_route *route)
  85. {
  86. int err;
  87. err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL);
  88. if (err < 0)
  89. return err;
  90. err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY);
  91. if (err < 0)
  92. goto err_route_reply;
  93. flow->type = NF_FLOW_OFFLOAD_ROUTE;
  94. return 0;
  95. err_route_reply:
  96. dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
  97. return err;
  98. }
  99. EXPORT_SYMBOL_GPL(flow_offload_route_init);
  100. static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
  101. {
  102. tcp->state = TCP_CONNTRACK_ESTABLISHED;
  103. tcp->seen[0].td_maxwin = 0;
  104. tcp->seen[1].td_maxwin = 0;
  105. }
  106. #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
  107. #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
  108. static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
  109. {
  110. const struct nf_conntrack_l4proto *l4proto;
  111. int l4num = nf_ct_protonum(ct);
  112. unsigned int timeout;
  113. l4proto = nf_ct_l4proto_find(l4num);
  114. if (!l4proto)
  115. return;
  116. if (l4num == IPPROTO_TCP)
  117. timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
  118. else if (l4num == IPPROTO_UDP)
  119. timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
  120. else
  121. return;
  122. if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
  123. WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
  124. }
  125. static void flow_offload_fixup_ct_state(struct nf_conn *ct)
  126. {
  127. if (nf_ct_protonum(ct) == IPPROTO_TCP)
  128. flow_offload_fixup_tcp(&ct->proto.tcp);
  129. }
  130. static void flow_offload_fixup_ct(struct nf_conn *ct)
  131. {
  132. flow_offload_fixup_ct_state(ct);
  133. flow_offload_fixup_ct_timeout(ct);
  134. }
  135. static void flow_offload_route_release(struct flow_offload *flow)
  136. {
  137. dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
  138. dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
  139. }
  140. void flow_offload_free(struct flow_offload *flow)
  141. {
  142. switch (flow->type) {
  143. case NF_FLOW_OFFLOAD_ROUTE:
  144. flow_offload_route_release(flow);
  145. break;
  146. default:
  147. break;
  148. }
  149. nf_ct_put(flow->ct);
  150. kfree_rcu(flow, rcu_head);
  151. }
  152. EXPORT_SYMBOL_GPL(flow_offload_free);
  153. static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
  154. {
  155. const struct flow_offload_tuple *tuple = data;
  156. return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
  157. }
  158. static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
  159. {
  160. const struct flow_offload_tuple_rhash *tuplehash = data;
  161. return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
  162. }
  163. static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
  164. const void *ptr)
  165. {
  166. const struct flow_offload_tuple *tuple = arg->key;
  167. const struct flow_offload_tuple_rhash *x = ptr;
  168. if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
  169. return 1;
  170. return 0;
  171. }
  172. static const struct rhashtable_params nf_flow_offload_rhash_params = {
  173. .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
  174. .hashfn = flow_offload_hash,
  175. .obj_hashfn = flow_offload_hash_obj,
  176. .obj_cmpfn = flow_offload_hash_cmp,
  177. .automatic_shrinking = true,
  178. };
  179. int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
  180. {
  181. int err;
  182. flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
  183. err = rhashtable_insert_fast(&flow_table->rhashtable,
  184. &flow->tuplehash[0].node,
  185. nf_flow_offload_rhash_params);
  186. if (err < 0)
  187. return err;
  188. err = rhashtable_insert_fast(&flow_table->rhashtable,
  189. &flow->tuplehash[1].node,
  190. nf_flow_offload_rhash_params);
  191. if (err < 0) {
  192. rhashtable_remove_fast(&flow_table->rhashtable,
  193. &flow->tuplehash[0].node,
  194. nf_flow_offload_rhash_params);
  195. return err;
  196. }
  197. nf_ct_offload_timeout(flow->ct);
  198. if (nf_flowtable_hw_offload(flow_table)) {
  199. __set_bit(NF_FLOW_HW, &flow->flags);
  200. nf_flow_offload_add(flow_table, flow);
  201. }
  202. return 0;
  203. }
  204. EXPORT_SYMBOL_GPL(flow_offload_add);
  205. void flow_offload_refresh(struct nf_flowtable *flow_table,
  206. struct flow_offload *flow)
  207. {
  208. flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
  209. if (likely(!nf_flowtable_hw_offload(flow_table)))
  210. return;
  211. nf_flow_offload_add(flow_table, flow);
  212. }
  213. EXPORT_SYMBOL_GPL(flow_offload_refresh);
  214. static inline bool nf_flow_has_expired(const struct flow_offload *flow)
  215. {
  216. return nf_flow_timeout_delta(flow->timeout) <= 0;
  217. }
  218. static void flow_offload_del(struct nf_flowtable *flow_table,
  219. struct flow_offload *flow)
  220. {
  221. rhashtable_remove_fast(&flow_table->rhashtable,
  222. &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
  223. nf_flow_offload_rhash_params);
  224. rhashtable_remove_fast(&flow_table->rhashtable,
  225. &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
  226. nf_flow_offload_rhash_params);
  227. clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status);
  228. if (nf_flow_has_expired(flow))
  229. flow_offload_fixup_ct(flow->ct);
  230. else
  231. flow_offload_fixup_ct_timeout(flow->ct);
  232. flow_offload_free(flow);
  233. }
  234. void flow_offload_teardown(struct flow_offload *flow)
  235. {
  236. set_bit(NF_FLOW_TEARDOWN, &flow->flags);
  237. flow_offload_fixup_ct_state(flow->ct);
  238. }
  239. EXPORT_SYMBOL_GPL(flow_offload_teardown);
  240. struct flow_offload_tuple_rhash *
  241. flow_offload_lookup(struct nf_flowtable *flow_table,
  242. struct flow_offload_tuple *tuple)
  243. {
  244. struct flow_offload_tuple_rhash *tuplehash;
  245. struct flow_offload *flow;
  246. int dir;
  247. tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
  248. nf_flow_offload_rhash_params);
  249. if (!tuplehash)
  250. return NULL;
  251. dir = tuplehash->tuple.dir;
  252. flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
  253. if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
  254. return NULL;
  255. if (unlikely(nf_ct_is_dying(flow->ct)))
  256. return NULL;
  257. return tuplehash;
  258. }
  259. EXPORT_SYMBOL_GPL(flow_offload_lookup);
  260. static int
  261. nf_flow_table_iterate(struct nf_flowtable *flow_table,
  262. void (*iter)(struct flow_offload *flow, void *data),
  263. void *data)
  264. {
  265. struct flow_offload_tuple_rhash *tuplehash;
  266. struct rhashtable_iter hti;
  267. struct flow_offload *flow;
  268. int err = 0;
  269. rhashtable_walk_enter(&flow_table->rhashtable, &hti);
  270. rhashtable_walk_start(&hti);
  271. while ((tuplehash = rhashtable_walk_next(&hti))) {
  272. if (IS_ERR(tuplehash)) {
  273. if (PTR_ERR(tuplehash) != -EAGAIN) {
  274. err = PTR_ERR(tuplehash);
  275. break;
  276. }
  277. continue;
  278. }
  279. if (tuplehash->tuple.dir)
  280. continue;
  281. flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
  282. iter(flow, data);
  283. }
  284. rhashtable_walk_stop(&hti);
  285. rhashtable_walk_exit(&hti);
  286. return err;
  287. }
  288. static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
  289. {
  290. struct nf_flowtable *flow_table = data;
  291. if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct))
  292. set_bit(NF_FLOW_TEARDOWN, &flow->flags);
  293. if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
  294. if (test_bit(NF_FLOW_HW, &flow->flags)) {
  295. if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
  296. nf_flow_offload_del(flow_table, flow);
  297. else if (test_bit(NF_FLOW_HW_DEAD, &flow->flags))
  298. flow_offload_del(flow_table, flow);
  299. } else {
  300. flow_offload_del(flow_table, flow);
  301. }
  302. } else if (test_bit(NF_FLOW_HW, &flow->flags)) {
  303. nf_flow_offload_stats(flow_table, flow);
  304. }
  305. }
  306. static void nf_flow_offload_work_gc(struct work_struct *work)
  307. {
  308. struct nf_flowtable *flow_table;
  309. flow_table = container_of(work, struct nf_flowtable, gc_work.work);
  310. nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
  311. queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
  312. }
  313. static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
  314. __be16 port, __be16 new_port)
  315. {
  316. struct tcphdr *tcph;
  317. if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
  318. return -1;
  319. tcph = (void *)(skb_network_header(skb) + thoff);
  320. inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
  321. return 0;
  322. }
  323. static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
  324. __be16 port, __be16 new_port)
  325. {
  326. struct udphdr *udph;
  327. if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
  328. return -1;
  329. udph = (void *)(skb_network_header(skb) + thoff);
  330. if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
  331. inet_proto_csum_replace2(&udph->check, skb, port,
  332. new_port, false);
  333. if (!udph->check)
  334. udph->check = CSUM_MANGLED_0;
  335. }
  336. return 0;
  337. }
  338. static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
  339. u8 protocol, __be16 port, __be16 new_port)
  340. {
  341. switch (protocol) {
  342. case IPPROTO_TCP:
  343. if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
  344. return NF_DROP;
  345. break;
  346. case IPPROTO_UDP:
  347. if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
  348. return NF_DROP;
  349. break;
  350. }
  351. return 0;
  352. }
  353. int nf_flow_snat_port(const struct flow_offload *flow,
  354. struct sk_buff *skb, unsigned int thoff,
  355. u8 protocol, enum flow_offload_tuple_dir dir)
  356. {
  357. struct flow_ports *hdr;
  358. __be16 port, new_port;
  359. if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
  360. return -1;
  361. hdr = (void *)(skb_network_header(skb) + thoff);
  362. switch (dir) {
  363. case FLOW_OFFLOAD_DIR_ORIGINAL:
  364. port = hdr->source;
  365. new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
  366. hdr->source = new_port;
  367. break;
  368. case FLOW_OFFLOAD_DIR_REPLY:
  369. port = hdr->dest;
  370. new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
  371. hdr->dest = new_port;
  372. break;
  373. default:
  374. return -1;
  375. }
  376. return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
  377. }
  378. EXPORT_SYMBOL_GPL(nf_flow_snat_port);
  379. int nf_flow_dnat_port(const struct flow_offload *flow,
  380. struct sk_buff *skb, unsigned int thoff,
  381. u8 protocol, enum flow_offload_tuple_dir dir)
  382. {
  383. struct flow_ports *hdr;
  384. __be16 port, new_port;
  385. if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
  386. return -1;
  387. hdr = (void *)(skb_network_header(skb) + thoff);
  388. switch (dir) {
  389. case FLOW_OFFLOAD_DIR_ORIGINAL:
  390. port = hdr->dest;
  391. new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
  392. hdr->dest = new_port;
  393. break;
  394. case FLOW_OFFLOAD_DIR_REPLY:
  395. port = hdr->source;
  396. new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
  397. hdr->source = new_port;
  398. break;
  399. default:
  400. return -1;
  401. }
  402. return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
  403. }
  404. EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
  405. int nf_flow_table_init(struct nf_flowtable *flowtable)
  406. {
  407. int err;
  408. INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
  409. flow_block_init(&flowtable->flow_block);
  410. init_rwsem(&flowtable->flow_block_lock);
  411. err = rhashtable_init(&flowtable->rhashtable,
  412. &nf_flow_offload_rhash_params);
  413. if (err < 0)
  414. return err;
  415. queue_delayed_work(system_power_efficient_wq,
  416. &flowtable->gc_work, HZ);
  417. mutex_lock(&flowtable_lock);
  418. list_add(&flowtable->list, &flowtables);
  419. mutex_unlock(&flowtable_lock);
  420. return 0;
  421. }
  422. EXPORT_SYMBOL_GPL(nf_flow_table_init);
  423. static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
  424. {
  425. struct net_device *dev = data;
  426. if (!dev) {
  427. flow_offload_teardown(flow);
  428. return;
  429. }
  430. if (net_eq(nf_ct_net(flow->ct), dev_net(dev)) &&
  431. (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
  432. flow->tuplehash[1].tuple.iifidx == dev->ifindex))
  433. flow_offload_teardown(flow);
  434. }
  435. void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
  436. struct net_device *dev)
  437. {
  438. nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
  439. flush_delayed_work(&flowtable->gc_work);
  440. nf_flow_table_offload_flush(flowtable);
  441. }
  442. void nf_flow_table_cleanup(struct net_device *dev)
  443. {
  444. struct nf_flowtable *flowtable;
  445. mutex_lock(&flowtable_lock);
  446. list_for_each_entry(flowtable, &flowtables, list)
  447. nf_flow_table_gc_cleanup(flowtable, dev);
  448. mutex_unlock(&flowtable_lock);
  449. }
  450. EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
  451. void nf_flow_table_free(struct nf_flowtable *flow_table)
  452. {
  453. mutex_lock(&flowtable_lock);
  454. list_del(&flow_table->list);
  455. mutex_unlock(&flowtable_lock);
  456. cancel_delayed_work_sync(&flow_table->gc_work);
  457. nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
  458. nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
  459. nf_flow_table_offload_flush(flow_table);
  460. if (nf_flowtable_hw_offload(flow_table))
  461. nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step,
  462. flow_table);
  463. rhashtable_destroy(&flow_table->rhashtable);
  464. }
  465. EXPORT_SYMBOL_GPL(nf_flow_table_free);
  466. static int __init nf_flow_table_module_init(void)
  467. {
  468. return nf_flow_table_offload_init();
  469. }
  470. static void __exit nf_flow_table_module_exit(void)
  471. {
  472. nf_flow_table_offload_exit();
  473. }
  474. module_init(nf_flow_table_module_init);
  475. module_exit(nf_flow_table_module_exit);
  476. MODULE_LICENSE("GPL");
  477. MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
  478. MODULE_DESCRIPTION("Netfilter flow table module");