nf_nat_core.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * (C) 1999-2001 Paul `Rusty' Russell
  4. * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  5. * (C) 2011 Patrick McHardy <kaber@trash.net>
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/module.h>
  9. #include <linux/types.h>
  10. #include <linux/timer.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/gfp.h>
  13. #include <net/xfrm.h>
  14. #include <linux/jhash.h>
  15. #include <linux/rtnetlink.h>
  16. #include <net/netfilter/nf_conntrack.h>
  17. #include <net/netfilter/nf_conntrack_core.h>
  18. #include <net/netfilter/nf_conntrack_helper.h>
  19. #include <net/netfilter/nf_conntrack_seqadj.h>
  20. #include <net/netfilter/nf_conntrack_zones.h>
  21. #include <net/netfilter/nf_nat.h>
  22. #include <net/netfilter/nf_nat_helper.h>
  23. #include <uapi/linux/netfilter/nf_nat.h>
  24. #include "nf_internals.h"
  25. static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
  26. static DEFINE_MUTEX(nf_nat_proto_mutex);
  27. static unsigned int nat_net_id __read_mostly;
  28. static struct hlist_head *nf_nat_bysource __read_mostly;
  29. static unsigned int nf_nat_htable_size __read_mostly;
  30. static unsigned int nf_nat_hash_rnd __read_mostly;
  31. struct nf_nat_lookup_hook_priv {
  32. struct nf_hook_entries __rcu *entries;
  33. struct rcu_head rcu_head;
  34. };
  35. struct nf_nat_hooks_net {
  36. struct nf_hook_ops *nat_hook_ops;
  37. unsigned int users;
  38. };
  39. struct nat_net {
  40. struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO];
  41. };
  42. #ifdef CONFIG_XFRM
  43. static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
  44. const struct nf_conn *ct,
  45. enum ip_conntrack_dir dir,
  46. unsigned long statusbit,
  47. struct flowi *fl)
  48. {
  49. const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
  50. struct flowi4 *fl4 = &fl->u.ip4;
  51. if (ct->status & statusbit) {
  52. fl4->daddr = t->dst.u3.ip;
  53. if (t->dst.protonum == IPPROTO_TCP ||
  54. t->dst.protonum == IPPROTO_UDP ||
  55. t->dst.protonum == IPPROTO_UDPLITE ||
  56. t->dst.protonum == IPPROTO_DCCP ||
  57. t->dst.protonum == IPPROTO_SCTP)
  58. fl4->fl4_dport = t->dst.u.all;
  59. }
  60. statusbit ^= IPS_NAT_MASK;
  61. if (ct->status & statusbit) {
  62. fl4->saddr = t->src.u3.ip;
  63. if (t->dst.protonum == IPPROTO_TCP ||
  64. t->dst.protonum == IPPROTO_UDP ||
  65. t->dst.protonum == IPPROTO_UDPLITE ||
  66. t->dst.protonum == IPPROTO_DCCP ||
  67. t->dst.protonum == IPPROTO_SCTP)
  68. fl4->fl4_sport = t->src.u.all;
  69. }
  70. }
  71. static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
  72. const struct nf_conn *ct,
  73. enum ip_conntrack_dir dir,
  74. unsigned long statusbit,
  75. struct flowi *fl)
  76. {
  77. #if IS_ENABLED(CONFIG_IPV6)
  78. const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
  79. struct flowi6 *fl6 = &fl->u.ip6;
  80. if (ct->status & statusbit) {
  81. fl6->daddr = t->dst.u3.in6;
  82. if (t->dst.protonum == IPPROTO_TCP ||
  83. t->dst.protonum == IPPROTO_UDP ||
  84. t->dst.protonum == IPPROTO_UDPLITE ||
  85. t->dst.protonum == IPPROTO_DCCP ||
  86. t->dst.protonum == IPPROTO_SCTP)
  87. fl6->fl6_dport = t->dst.u.all;
  88. }
  89. statusbit ^= IPS_NAT_MASK;
  90. if (ct->status & statusbit) {
  91. fl6->saddr = t->src.u3.in6;
  92. if (t->dst.protonum == IPPROTO_TCP ||
  93. t->dst.protonum == IPPROTO_UDP ||
  94. t->dst.protonum == IPPROTO_UDPLITE ||
  95. t->dst.protonum == IPPROTO_DCCP ||
  96. t->dst.protonum == IPPROTO_SCTP)
  97. fl6->fl6_sport = t->src.u.all;
  98. }
  99. #endif
  100. }
  101. static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
  102. {
  103. const struct nf_conn *ct;
  104. enum ip_conntrack_info ctinfo;
  105. enum ip_conntrack_dir dir;
  106. unsigned long statusbit;
  107. u8 family;
  108. ct = nf_ct_get(skb, &ctinfo);
  109. if (ct == NULL)
  110. return;
  111. family = nf_ct_l3num(ct);
  112. dir = CTINFO2DIR(ctinfo);
  113. if (dir == IP_CT_DIR_ORIGINAL)
  114. statusbit = IPS_DST_NAT;
  115. else
  116. statusbit = IPS_SRC_NAT;
  117. switch (family) {
  118. case NFPROTO_IPV4:
  119. nf_nat_ipv4_decode_session(skb, ct, dir, statusbit, fl);
  120. return;
  121. case NFPROTO_IPV6:
  122. nf_nat_ipv6_decode_session(skb, ct, dir, statusbit, fl);
  123. return;
  124. }
  125. }
  126. int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
  127. {
  128. struct flowi fl;
  129. unsigned int hh_len;
  130. struct dst_entry *dst;
  131. struct sock *sk = skb->sk;
  132. int err;
  133. err = xfrm_decode_session(skb, &fl, family);
  134. if (err < 0)
  135. return err;
  136. dst = skb_dst(skb);
  137. if (dst->xfrm)
  138. dst = ((struct xfrm_dst *)dst)->route;
  139. if (!dst_hold_safe(dst))
  140. return -EHOSTUNREACH;
  141. if (sk && !net_eq(net, sock_net(sk)))
  142. sk = NULL;
  143. dst = xfrm_lookup(net, dst, &fl, sk, 0);
  144. if (IS_ERR(dst))
  145. return PTR_ERR(dst);
  146. skb_dst_drop(skb);
  147. skb_dst_set(skb, dst);
  148. /* Change in oif may mean change in hh_len. */
  149. hh_len = skb_dst(skb)->dev->hard_header_len;
  150. if (skb_headroom(skb) < hh_len &&
  151. pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
  152. return -ENOMEM;
  153. return 0;
  154. }
  155. EXPORT_SYMBOL(nf_xfrm_me_harder);
  156. #endif /* CONFIG_XFRM */
  157. /* We keep an extra hash for each conntrack, for fast searching. */
  158. static unsigned int
  159. hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
  160. {
  161. unsigned int hash;
  162. get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
  163. /* Original src, to ensure we map it consistently if poss. */
  164. hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
  165. tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
  166. return reciprocal_scale(hash, nf_nat_htable_size);
  167. }
  168. /* Is this tuple already taken? (not by us) */
  169. static int
  170. nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
  171. const struct nf_conn *ignored_conntrack)
  172. {
  173. /* Conntrack tracking doesn't keep track of outgoing tuples; only
  174. * incoming ones. NAT means they don't have a fixed mapping,
  175. * so we invert the tuple and look for the incoming reply.
  176. *
  177. * We could keep a separate hash if this proves too slow.
  178. */
  179. struct nf_conntrack_tuple reply;
  180. nf_ct_invert_tuple(&reply, tuple);
  181. return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
  182. }
  183. static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
  184. const struct nf_nat_range2 *range)
  185. {
  186. if (t->src.l3num == NFPROTO_IPV4)
  187. return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
  188. ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
  189. return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
  190. ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
  191. }
  192. /* Is the manipable part of the tuple between min and max incl? */
  193. static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
  194. enum nf_nat_manip_type maniptype,
  195. const union nf_conntrack_man_proto *min,
  196. const union nf_conntrack_man_proto *max)
  197. {
  198. __be16 port;
  199. switch (tuple->dst.protonum) {
  200. case IPPROTO_ICMP:
  201. case IPPROTO_ICMPV6:
  202. return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
  203. ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
  204. case IPPROTO_GRE: /* all fall though */
  205. case IPPROTO_TCP:
  206. case IPPROTO_UDP:
  207. case IPPROTO_UDPLITE:
  208. case IPPROTO_DCCP:
  209. case IPPROTO_SCTP:
  210. if (maniptype == NF_NAT_MANIP_SRC)
  211. port = tuple->src.u.all;
  212. else
  213. port = tuple->dst.u.all;
  214. return ntohs(port) >= ntohs(min->all) &&
  215. ntohs(port) <= ntohs(max->all);
  216. default:
  217. return true;
  218. }
  219. }
  220. /* If we source map this tuple so reply looks like reply_tuple, will
  221. * that meet the constraints of range.
  222. */
  223. static int in_range(const struct nf_conntrack_tuple *tuple,
  224. const struct nf_nat_range2 *range)
  225. {
  226. /* If we are supposed to map IPs, then we must be in the
  227. * range specified, otherwise let this drag us onto a new src IP.
  228. */
  229. if (range->flags & NF_NAT_RANGE_MAP_IPS &&
  230. !nf_nat_inet_in_range(tuple, range))
  231. return 0;
  232. if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
  233. return 1;
  234. return l4proto_in_range(tuple, NF_NAT_MANIP_SRC,
  235. &range->min_proto, &range->max_proto);
  236. }
  237. static inline int
  238. same_src(const struct nf_conn *ct,
  239. const struct nf_conntrack_tuple *tuple)
  240. {
  241. const struct nf_conntrack_tuple *t;
  242. t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
  243. return (t->dst.protonum == tuple->dst.protonum &&
  244. nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
  245. t->src.u.all == tuple->src.u.all);
  246. }
  247. /* Only called for SRC manip */
  248. static int
  249. find_appropriate_src(struct net *net,
  250. const struct nf_conntrack_zone *zone,
  251. const struct nf_conntrack_tuple *tuple,
  252. struct nf_conntrack_tuple *result,
  253. const struct nf_nat_range2 *range)
  254. {
  255. unsigned int h = hash_by_src(net, tuple);
  256. const struct nf_conn *ct;
  257. hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
  258. if (same_src(ct, tuple) &&
  259. net_eq(net, nf_ct_net(ct)) &&
  260. nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
  261. /* Copy source part from reply tuple. */
  262. nf_ct_invert_tuple(result,
  263. &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
  264. result->dst = tuple->dst;
  265. if (in_range(result, range))
  266. return 1;
  267. }
  268. }
  269. return 0;
  270. }
  271. /* For [FUTURE] fragmentation handling, we want the least-used
  272. * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
  273. * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
  274. * 1-65535, we don't do pro-rata allocation based on ports; we choose
  275. * the ip with the lowest src-ip/dst-ip/proto usage.
  276. */
  277. static void
  278. find_best_ips_proto(const struct nf_conntrack_zone *zone,
  279. struct nf_conntrack_tuple *tuple,
  280. const struct nf_nat_range2 *range,
  281. const struct nf_conn *ct,
  282. enum nf_nat_manip_type maniptype)
  283. {
  284. union nf_inet_addr *var_ipp;
  285. unsigned int i, max;
  286. /* Host order */
  287. u32 minip, maxip, j, dist;
  288. bool full_range;
  289. /* No IP mapping? Do nothing. */
  290. if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
  291. return;
  292. if (maniptype == NF_NAT_MANIP_SRC)
  293. var_ipp = &tuple->src.u3;
  294. else
  295. var_ipp = &tuple->dst.u3;
  296. /* Fast path: only one choice. */
  297. if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
  298. *var_ipp = range->min_addr;
  299. return;
  300. }
  301. if (nf_ct_l3num(ct) == NFPROTO_IPV4)
  302. max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
  303. else
  304. max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
  305. /* Hashing source and destination IPs gives a fairly even
  306. * spread in practice (if there are a small number of IPs
  307. * involved, there usually aren't that many connections
  308. * anyway). The consistency means that servers see the same
  309. * client coming from the same IP (some Internet Banking sites
  310. * like this), even across reboots.
  311. */
  312. j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
  313. range->flags & NF_NAT_RANGE_PERSISTENT ?
  314. 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
  315. full_range = false;
  316. for (i = 0; i <= max; i++) {
  317. /* If first bytes of the address are at the maximum, use the
  318. * distance. Otherwise use the full range.
  319. */
  320. if (!full_range) {
  321. minip = ntohl((__force __be32)range->min_addr.all[i]);
  322. maxip = ntohl((__force __be32)range->max_addr.all[i]);
  323. dist = maxip - minip + 1;
  324. } else {
  325. minip = 0;
  326. dist = ~0;
  327. }
  328. var_ipp->all[i] = (__force __u32)
  329. htonl(minip + reciprocal_scale(j, dist));
  330. if (var_ipp->all[i] != range->max_addr.all[i])
  331. full_range = true;
  332. if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
  333. j ^= (__force u32)tuple->dst.u3.all[i];
  334. }
  335. }
  336. /* Alter the per-proto part of the tuple (depending on maniptype), to
  337. * give a unique tuple in the given range if possible.
  338. *
  339. * Per-protocol part of tuple is initialized to the incoming packet.
  340. */
  341. static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
  342. const struct nf_nat_range2 *range,
  343. enum nf_nat_manip_type maniptype,
  344. const struct nf_conn *ct)
  345. {
  346. unsigned int range_size, min, max, i, attempts;
  347. __be16 *keyptr;
  348. u16 off;
  349. static const unsigned int max_attempts = 128;
  350. switch (tuple->dst.protonum) {
  351. case IPPROTO_ICMP:
  352. case IPPROTO_ICMPV6:
  353. /* id is same for either direction... */
  354. keyptr = &tuple->src.u.icmp.id;
  355. if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
  356. min = 0;
  357. range_size = 65536;
  358. } else {
  359. min = ntohs(range->min_proto.icmp.id);
  360. range_size = ntohs(range->max_proto.icmp.id) -
  361. ntohs(range->min_proto.icmp.id) + 1;
  362. }
  363. goto find_free_id;
  364. #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
  365. case IPPROTO_GRE:
  366. /* If there is no master conntrack we are not PPTP,
  367. do not change tuples */
  368. if (!ct->master)
  369. return;
  370. if (maniptype == NF_NAT_MANIP_SRC)
  371. keyptr = &tuple->src.u.gre.key;
  372. else
  373. keyptr = &tuple->dst.u.gre.key;
  374. if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
  375. min = 1;
  376. range_size = 65535;
  377. } else {
  378. min = ntohs(range->min_proto.gre.key);
  379. range_size = ntohs(range->max_proto.gre.key) - min + 1;
  380. }
  381. goto find_free_id;
  382. #endif
  383. case IPPROTO_UDP:
  384. case IPPROTO_UDPLITE:
  385. case IPPROTO_TCP:
  386. case IPPROTO_SCTP:
  387. case IPPROTO_DCCP:
  388. if (maniptype == NF_NAT_MANIP_SRC)
  389. keyptr = &tuple->src.u.all;
  390. else
  391. keyptr = &tuple->dst.u.all;
  392. break;
  393. default:
  394. return;
  395. }
  396. /* If no range specified... */
  397. if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
  398. /* If it's dst rewrite, can't change port */
  399. if (maniptype == NF_NAT_MANIP_DST)
  400. return;
  401. if (ntohs(*keyptr) < 1024) {
  402. /* Loose convention: >> 512 is credential passing */
  403. if (ntohs(*keyptr) < 512) {
  404. min = 1;
  405. range_size = 511 - min + 1;
  406. } else {
  407. min = 600;
  408. range_size = 1023 - min + 1;
  409. }
  410. } else {
  411. min = 1024;
  412. range_size = 65535 - 1024 + 1;
  413. }
  414. } else {
  415. min = ntohs(range->min_proto.all);
  416. max = ntohs(range->max_proto.all);
  417. if (unlikely(max < min))
  418. swap(max, min);
  419. range_size = max - min + 1;
  420. }
  421. find_free_id:
  422. if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
  423. off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
  424. else
  425. off = prandom_u32();
  426. attempts = range_size;
  427. if (attempts > max_attempts)
  428. attempts = max_attempts;
  429. /* We are in softirq; doing a search of the entire range risks
  430. * soft lockup when all tuples are already used.
  431. *
  432. * If we can't find any free port from first offset, pick a new
  433. * one and try again, with ever smaller search window.
  434. */
  435. another_round:
  436. for (i = 0; i < attempts; i++, off++) {
  437. *keyptr = htons(min + off % range_size);
  438. if (!nf_nat_used_tuple(tuple, ct))
  439. return;
  440. }
  441. if (attempts >= range_size || attempts < 16)
  442. return;
  443. attempts /= 2;
  444. off = prandom_u32();
  445. goto another_round;
  446. }
  447. /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
  448. * we change the source to map into the range. For NF_INET_PRE_ROUTING
  449. * and NF_INET_LOCAL_OUT, we change the destination to map into the
  450. * range. It might not be possible to get a unique tuple, but we try.
  451. * At worst (or if we race), we will end up with a final duplicate in
  452. * __nf_conntrack_confirm and drop the packet. */
  453. static void
  454. get_unique_tuple(struct nf_conntrack_tuple *tuple,
  455. const struct nf_conntrack_tuple *orig_tuple,
  456. const struct nf_nat_range2 *range,
  457. struct nf_conn *ct,
  458. enum nf_nat_manip_type maniptype)
  459. {
  460. const struct nf_conntrack_zone *zone;
  461. struct net *net = nf_ct_net(ct);
  462. zone = nf_ct_zone(ct);
  463. /* 1) If this srcip/proto/src-proto-part is currently mapped,
  464. * and that same mapping gives a unique tuple within the given
  465. * range, use that.
  466. *
  467. * This is only required for source (ie. NAT/masq) mappings.
  468. * So far, we don't do local source mappings, so multiple
  469. * manips not an issue.
  470. */
  471. if (maniptype == NF_NAT_MANIP_SRC &&
  472. !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
  473. /* try the original tuple first */
  474. if (in_range(orig_tuple, range)) {
  475. if (!nf_nat_used_tuple(orig_tuple, ct)) {
  476. *tuple = *orig_tuple;
  477. return;
  478. }
  479. } else if (find_appropriate_src(net, zone,
  480. orig_tuple, tuple, range)) {
  481. pr_debug("get_unique_tuple: Found current src map\n");
  482. if (!nf_nat_used_tuple(tuple, ct))
  483. return;
  484. }
  485. }
  486. /* 2) Select the least-used IP/proto combination in the given range */
  487. *tuple = *orig_tuple;
  488. find_best_ips_proto(zone, tuple, range, ct, maniptype);
  489. /* 3) The per-protocol part of the manip is made to map into
  490. * the range to make a unique tuple.
  491. */
  492. /* Only bother mapping if it's not already in range and unique */
  493. if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
  494. if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
  495. if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
  496. l4proto_in_range(tuple, maniptype,
  497. &range->min_proto,
  498. &range->max_proto) &&
  499. (range->min_proto.all == range->max_proto.all ||
  500. !nf_nat_used_tuple(tuple, ct)))
  501. return;
  502. } else if (!nf_nat_used_tuple(tuple, ct)) {
  503. return;
  504. }
  505. }
  506. /* Last chance: get protocol to try to obtain unique tuple. */
  507. nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
  508. }
  509. struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
  510. {
  511. struct nf_conn_nat *nat = nfct_nat(ct);
  512. if (nat)
  513. return nat;
  514. if (!nf_ct_is_confirmed(ct))
  515. nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
  516. return nat;
  517. }
  518. EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
  519. unsigned int
  520. nf_nat_setup_info(struct nf_conn *ct,
  521. const struct nf_nat_range2 *range,
  522. enum nf_nat_manip_type maniptype)
  523. {
  524. struct net *net = nf_ct_net(ct);
  525. struct nf_conntrack_tuple curr_tuple, new_tuple;
  526. /* Can't setup nat info for confirmed ct. */
  527. if (nf_ct_is_confirmed(ct))
  528. return NF_ACCEPT;
  529. WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
  530. maniptype != NF_NAT_MANIP_DST);
  531. if (WARN_ON(nf_nat_initialized(ct, maniptype)))
  532. return NF_DROP;
  533. /* What we've got will look like inverse of reply. Normally
  534. * this is what is in the conntrack, except for prior
  535. * manipulations (future optimization: if num_manips == 0,
  536. * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
  537. */
  538. nf_ct_invert_tuple(&curr_tuple,
  539. &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
  540. get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
  541. if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
  542. struct nf_conntrack_tuple reply;
  543. /* Alter conntrack table so will recognize replies. */
  544. nf_ct_invert_tuple(&reply, &new_tuple);
  545. nf_conntrack_alter_reply(ct, &reply);
  546. /* Non-atomic: we own this at the moment. */
  547. if (maniptype == NF_NAT_MANIP_SRC)
  548. ct->status |= IPS_SRC_NAT;
  549. else
  550. ct->status |= IPS_DST_NAT;
  551. if (nfct_help(ct) && !nfct_seqadj(ct))
  552. if (!nfct_seqadj_ext_add(ct))
  553. return NF_DROP;
  554. }
  555. if (maniptype == NF_NAT_MANIP_SRC) {
  556. unsigned int srchash;
  557. spinlock_t *lock;
  558. srchash = hash_by_src(net,
  559. &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
  560. lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
  561. spin_lock_bh(lock);
  562. hlist_add_head_rcu(&ct->nat_bysource,
  563. &nf_nat_bysource[srchash]);
  564. spin_unlock_bh(lock);
  565. }
  566. /* It's done. */
  567. if (maniptype == NF_NAT_MANIP_DST)
  568. ct->status |= IPS_DST_NAT_DONE;
  569. else
  570. ct->status |= IPS_SRC_NAT_DONE;
  571. return NF_ACCEPT;
  572. }
  573. EXPORT_SYMBOL(nf_nat_setup_info);
  574. static unsigned int
  575. __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
  576. {
  577. /* Force range to this IP; let proto decide mapping for
  578. * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
  579. * Use reply in case it's already been mangled (eg local packet).
  580. */
  581. union nf_inet_addr ip =
  582. (manip == NF_NAT_MANIP_SRC ?
  583. ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
  584. ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
  585. struct nf_nat_range2 range = {
  586. .flags = NF_NAT_RANGE_MAP_IPS,
  587. .min_addr = ip,
  588. .max_addr = ip,
  589. };
  590. return nf_nat_setup_info(ct, &range, manip);
  591. }
  592. unsigned int
  593. nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
  594. {
  595. return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
  596. }
  597. EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
  598. /* Do packet manipulations according to nf_nat_setup_info. */
  599. unsigned int nf_nat_packet(struct nf_conn *ct,
  600. enum ip_conntrack_info ctinfo,
  601. unsigned int hooknum,
  602. struct sk_buff *skb)
  603. {
  604. enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
  605. enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
  606. unsigned int verdict = NF_ACCEPT;
  607. unsigned long statusbit;
  608. if (mtype == NF_NAT_MANIP_SRC)
  609. statusbit = IPS_SRC_NAT;
  610. else
  611. statusbit = IPS_DST_NAT;
  612. /* Invert if this is reply dir. */
  613. if (dir == IP_CT_DIR_REPLY)
  614. statusbit ^= IPS_NAT_MASK;
  615. /* Non-atomic: these bits don't change. */
  616. if (ct->status & statusbit)
  617. verdict = nf_nat_manip_pkt(skb, ct, mtype, dir);
  618. return verdict;
  619. }
  620. EXPORT_SYMBOL_GPL(nf_nat_packet);
  621. unsigned int
  622. nf_nat_inet_fn(void *priv, struct sk_buff *skb,
  623. const struct nf_hook_state *state)
  624. {
  625. struct nf_conn *ct;
  626. enum ip_conntrack_info ctinfo;
  627. struct nf_conn_nat *nat;
  628. /* maniptype == SRC for postrouting. */
  629. enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
  630. ct = nf_ct_get(skb, &ctinfo);
  631. /* Can't track? It's not due to stress, or conntrack would
  632. * have dropped it. Hence it's the user's responsibilty to
  633. * packet filter it out, or implement conntrack/NAT for that
  634. * protocol. 8) --RR
  635. */
  636. if (!ct)
  637. return NF_ACCEPT;
  638. nat = nfct_nat(ct);
  639. switch (ctinfo) {
  640. case IP_CT_RELATED:
  641. case IP_CT_RELATED_REPLY:
  642. /* Only ICMPs can be IP_CT_IS_REPLY. Fallthrough */
  643. case IP_CT_NEW:
  644. /* Seen it before? This can happen for loopback, retrans,
  645. * or local packets.
  646. */
  647. if (!nf_nat_initialized(ct, maniptype)) {
  648. struct nf_nat_lookup_hook_priv *lpriv = priv;
  649. struct nf_hook_entries *e = rcu_dereference(lpriv->entries);
  650. unsigned int ret;
  651. int i;
  652. if (!e)
  653. goto null_bind;
  654. for (i = 0; i < e->num_hook_entries; i++) {
  655. ret = e->hooks[i].hook(e->hooks[i].priv, skb,
  656. state);
  657. if (ret != NF_ACCEPT)
  658. return ret;
  659. if (nf_nat_initialized(ct, maniptype))
  660. goto do_nat;
  661. }
  662. null_bind:
  663. ret = nf_nat_alloc_null_binding(ct, state->hook);
  664. if (ret != NF_ACCEPT)
  665. return ret;
  666. } else {
  667. pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n",
  668. maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
  669. ct, ct->status);
  670. if (nf_nat_oif_changed(state->hook, ctinfo, nat,
  671. state->out))
  672. goto oif_changed;
  673. }
  674. break;
  675. default:
  676. /* ESTABLISHED */
  677. WARN_ON(ctinfo != IP_CT_ESTABLISHED &&
  678. ctinfo != IP_CT_ESTABLISHED_REPLY);
  679. if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
  680. goto oif_changed;
  681. }
  682. do_nat:
  683. return nf_nat_packet(ct, ctinfo, state->hook, skb);
  684. oif_changed:
  685. nf_ct_kill_acct(ct, ctinfo, skb);
  686. return NF_DROP;
  687. }
  688. EXPORT_SYMBOL_GPL(nf_nat_inet_fn);
  689. struct nf_nat_proto_clean {
  690. u8 l3proto;
  691. u8 l4proto;
  692. };
  693. /* kill conntracks with affected NAT section */
  694. static int nf_nat_proto_remove(struct nf_conn *i, void *data)
  695. {
  696. const struct nf_nat_proto_clean *clean = data;
  697. if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
  698. (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
  699. return 0;
  700. return i->status & IPS_NAT_MASK ? 1 : 0;
  701. }
  702. static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
  703. {
  704. unsigned int h;
  705. h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
  706. spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
  707. hlist_del_rcu(&ct->nat_bysource);
  708. spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
  709. }
  710. static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
  711. {
  712. if (nf_nat_proto_remove(ct, data))
  713. return 1;
  714. /* This module is being removed and conntrack has nat null binding.
  715. * Remove it from bysource hash, as the table will be freed soon.
  716. *
  717. * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
  718. * will delete entry from already-freed table.
  719. */
  720. if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
  721. __nf_nat_cleanup_conntrack(ct);
  722. /* don't delete conntrack. Although that would make things a lot
  723. * simpler, we'd end up flushing all conntracks on nat rmmod.
  724. */
  725. return 0;
  726. }
  727. /* No one using conntrack by the time this called. */
  728. static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
  729. {
  730. if (ct->status & IPS_SRC_NAT_DONE)
  731. __nf_nat_cleanup_conntrack(ct);
  732. }
  733. static struct nf_ct_ext_type nat_extend __read_mostly = {
  734. .len = sizeof(struct nf_conn_nat),
  735. .align = __alignof__(struct nf_conn_nat),
  736. .destroy = nf_nat_cleanup_conntrack,
  737. .id = NF_CT_EXT_NAT,
  738. };
  739. #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
  740. #include <linux/netfilter/nfnetlink.h>
  741. #include <linux/netfilter/nfnetlink_conntrack.h>
  742. static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
  743. [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
  744. [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
  745. };
  746. static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
  747. struct nf_nat_range2 *range)
  748. {
  749. if (tb[CTA_PROTONAT_PORT_MIN]) {
  750. range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
  751. range->max_proto.all = range->min_proto.all;
  752. range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
  753. }
  754. if (tb[CTA_PROTONAT_PORT_MAX]) {
  755. range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
  756. range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
  757. }
  758. return 0;
  759. }
  760. static int nfnetlink_parse_nat_proto(struct nlattr *attr,
  761. const struct nf_conn *ct,
  762. struct nf_nat_range2 *range)
  763. {
  764. struct nlattr *tb[CTA_PROTONAT_MAX+1];
  765. int err;
  766. err = nla_parse_nested_deprecated(tb, CTA_PROTONAT_MAX, attr,
  767. protonat_nla_policy, NULL);
  768. if (err < 0)
  769. return err;
  770. return nf_nat_l4proto_nlattr_to_range(tb, range);
  771. }
  772. static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
  773. [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
  774. [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
  775. [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
  776. [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
  777. [CTA_NAT_PROTO] = { .type = NLA_NESTED },
  778. };
  779. static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
  780. struct nf_nat_range2 *range)
  781. {
  782. if (tb[CTA_NAT_V4_MINIP]) {
  783. range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
  784. range->flags |= NF_NAT_RANGE_MAP_IPS;
  785. }
  786. if (tb[CTA_NAT_V4_MAXIP])
  787. range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
  788. else
  789. range->max_addr.ip = range->min_addr.ip;
  790. return 0;
  791. }
  792. static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
  793. struct nf_nat_range2 *range)
  794. {
  795. if (tb[CTA_NAT_V6_MINIP]) {
  796. nla_memcpy(&range->min_addr.ip6, tb[CTA_NAT_V6_MINIP],
  797. sizeof(struct in6_addr));
  798. range->flags |= NF_NAT_RANGE_MAP_IPS;
  799. }
  800. if (tb[CTA_NAT_V6_MAXIP])
  801. nla_memcpy(&range->max_addr.ip6, tb[CTA_NAT_V6_MAXIP],
  802. sizeof(struct in6_addr));
  803. else
  804. range->max_addr = range->min_addr;
  805. return 0;
  806. }
  807. static int
  808. nfnetlink_parse_nat(const struct nlattr *nat,
  809. const struct nf_conn *ct, struct nf_nat_range2 *range)
  810. {
  811. struct nlattr *tb[CTA_NAT_MAX+1];
  812. int err;
  813. memset(range, 0, sizeof(*range));
  814. err = nla_parse_nested_deprecated(tb, CTA_NAT_MAX, nat,
  815. nat_nla_policy, NULL);
  816. if (err < 0)
  817. return err;
  818. switch (nf_ct_l3num(ct)) {
  819. case NFPROTO_IPV4:
  820. err = nf_nat_ipv4_nlattr_to_range(tb, range);
  821. break;
  822. case NFPROTO_IPV6:
  823. err = nf_nat_ipv6_nlattr_to_range(tb, range);
  824. break;
  825. default:
  826. err = -EPROTONOSUPPORT;
  827. break;
  828. }
  829. if (err)
  830. return err;
  831. if (!tb[CTA_NAT_PROTO])
  832. return 0;
  833. return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
  834. }
  835. /* This function is called under rcu_read_lock() */
  836. static int
  837. nfnetlink_parse_nat_setup(struct nf_conn *ct,
  838. enum nf_nat_manip_type manip,
  839. const struct nlattr *attr)
  840. {
  841. struct nf_nat_range2 range;
  842. int err;
  843. /* Should not happen, restricted to creating new conntracks
  844. * via ctnetlink.
  845. */
  846. if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
  847. return -EEXIST;
  848. /* No NAT information has been passed, allocate the null-binding */
  849. if (attr == NULL)
  850. return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
  851. err = nfnetlink_parse_nat(attr, ct, &range);
  852. if (err < 0)
  853. return err;
  854. return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
  855. }
  856. #else
  857. static int
  858. nfnetlink_parse_nat_setup(struct nf_conn *ct,
  859. enum nf_nat_manip_type manip,
  860. const struct nlattr *attr)
  861. {
  862. return -EOPNOTSUPP;
  863. }
  864. #endif
  865. static struct nf_ct_helper_expectfn follow_master_nat = {
  866. .name = "nat-follow-master",
  867. .expectfn = nf_nat_follow_master,
  868. };
  869. int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
  870. const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
  871. {
  872. struct nat_net *nat_net = net_generic(net, nat_net_id);
  873. struct nf_nat_hooks_net *nat_proto_net;
  874. struct nf_nat_lookup_hook_priv *priv;
  875. unsigned int hooknum = ops->hooknum;
  876. struct nf_hook_ops *nat_ops;
  877. int i, ret;
  878. if (WARN_ON_ONCE(pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
  879. return -EINVAL;
  880. nat_proto_net = &nat_net->nat_proto_net[pf];
  881. for (i = 0; i < ops_count; i++) {
  882. if (orig_nat_ops[i].hooknum == hooknum) {
  883. hooknum = i;
  884. break;
  885. }
  886. }
  887. if (WARN_ON_ONCE(i == ops_count))
  888. return -EINVAL;
  889. mutex_lock(&nf_nat_proto_mutex);
  890. if (!nat_proto_net->nat_hook_ops) {
  891. WARN_ON(nat_proto_net->users != 0);
  892. nat_ops = kmemdup(orig_nat_ops, sizeof(*orig_nat_ops) * ops_count, GFP_KERNEL);
  893. if (!nat_ops) {
  894. mutex_unlock(&nf_nat_proto_mutex);
  895. return -ENOMEM;
  896. }
  897. for (i = 0; i < ops_count; i++) {
  898. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  899. if (priv) {
  900. nat_ops[i].priv = priv;
  901. continue;
  902. }
  903. mutex_unlock(&nf_nat_proto_mutex);
  904. while (i)
  905. kfree(nat_ops[--i].priv);
  906. kfree(nat_ops);
  907. return -ENOMEM;
  908. }
  909. ret = nf_register_net_hooks(net, nat_ops, ops_count);
  910. if (ret < 0) {
  911. mutex_unlock(&nf_nat_proto_mutex);
  912. for (i = 0; i < ops_count; i++)
  913. kfree(nat_ops[i].priv);
  914. kfree(nat_ops);
  915. return ret;
  916. }
  917. nat_proto_net->nat_hook_ops = nat_ops;
  918. }
  919. nat_ops = nat_proto_net->nat_hook_ops;
  920. priv = nat_ops[hooknum].priv;
  921. if (WARN_ON_ONCE(!priv)) {
  922. mutex_unlock(&nf_nat_proto_mutex);
  923. return -EOPNOTSUPP;
  924. }
  925. ret = nf_hook_entries_insert_raw(&priv->entries, ops);
  926. if (ret == 0)
  927. nat_proto_net->users++;
  928. mutex_unlock(&nf_nat_proto_mutex);
  929. return ret;
  930. }
  931. void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
  932. unsigned int ops_count)
  933. {
  934. struct nat_net *nat_net = net_generic(net, nat_net_id);
  935. struct nf_nat_hooks_net *nat_proto_net;
  936. struct nf_nat_lookup_hook_priv *priv;
  937. struct nf_hook_ops *nat_ops;
  938. int hooknum = ops->hooknum;
  939. int i;
  940. if (pf >= ARRAY_SIZE(nat_net->nat_proto_net))
  941. return;
  942. nat_proto_net = &nat_net->nat_proto_net[pf];
  943. mutex_lock(&nf_nat_proto_mutex);
  944. if (WARN_ON(nat_proto_net->users == 0))
  945. goto unlock;
  946. nat_proto_net->users--;
  947. nat_ops = nat_proto_net->nat_hook_ops;
  948. for (i = 0; i < ops_count; i++) {
  949. if (nat_ops[i].hooknum == hooknum) {
  950. hooknum = i;
  951. break;
  952. }
  953. }
  954. if (WARN_ON_ONCE(i == ops_count))
  955. goto unlock;
  956. priv = nat_ops[hooknum].priv;
  957. nf_hook_entries_delete_raw(&priv->entries, ops);
  958. if (nat_proto_net->users == 0) {
  959. nf_unregister_net_hooks(net, nat_ops, ops_count);
  960. for (i = 0; i < ops_count; i++) {
  961. priv = nat_ops[i].priv;
  962. kfree_rcu(priv, rcu_head);
  963. }
  964. nat_proto_net->nat_hook_ops = NULL;
  965. kfree(nat_ops);
  966. }
  967. unlock:
  968. mutex_unlock(&nf_nat_proto_mutex);
  969. }
  970. static struct pernet_operations nat_net_ops = {
  971. .id = &nat_net_id,
  972. .size = sizeof(struct nat_net),
  973. };
  974. static struct nf_nat_hook nat_hook = {
  975. .parse_nat_setup = nfnetlink_parse_nat_setup,
  976. #ifdef CONFIG_XFRM
  977. .decode_session = __nf_nat_decode_session,
  978. #endif
  979. .manip_pkt = nf_nat_manip_pkt,
  980. };
  981. static int __init nf_nat_init(void)
  982. {
  983. int ret, i;
  984. /* Leave them the same for the moment. */
  985. nf_nat_htable_size = nf_conntrack_htable_size;
  986. if (nf_nat_htable_size < CONNTRACK_LOCKS)
  987. nf_nat_htable_size = CONNTRACK_LOCKS;
  988. nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
  989. if (!nf_nat_bysource)
  990. return -ENOMEM;
  991. ret = nf_ct_extend_register(&nat_extend);
  992. if (ret < 0) {
  993. kvfree(nf_nat_bysource);
  994. pr_err("Unable to register extension\n");
  995. return ret;
  996. }
  997. for (i = 0; i < CONNTRACK_LOCKS; i++)
  998. spin_lock_init(&nf_nat_locks[i]);
  999. ret = register_pernet_subsys(&nat_net_ops);
  1000. if (ret < 0) {
  1001. nf_ct_extend_unregister(&nat_extend);
  1002. kvfree(nf_nat_bysource);
  1003. return ret;
  1004. }
  1005. nf_ct_helper_expectfn_register(&follow_master_nat);
  1006. WARN_ON(nf_nat_hook != NULL);
  1007. RCU_INIT_POINTER(nf_nat_hook, &nat_hook);
  1008. return 0;
  1009. }
  1010. static void __exit nf_nat_cleanup(void)
  1011. {
  1012. struct nf_nat_proto_clean clean = {};
  1013. nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
  1014. nf_ct_extend_unregister(&nat_extend);
  1015. nf_ct_helper_expectfn_unregister(&follow_master_nat);
  1016. RCU_INIT_POINTER(nf_nat_hook, NULL);
  1017. synchronize_net();
  1018. kvfree(nf_nat_bysource);
  1019. unregister_pernet_subsys(&nat_net_ops);
  1020. }
  1021. MODULE_LICENSE("GPL");
  1022. module_init(nf_nat_init);
  1023. module_exit(nf_nat_cleanup);