nf_nat_core.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /* NAT for netfilter; shared with compatibility layer. */
  2. /* (C) 1999-2001 Paul `Rusty' Russell
  3. * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/types.h>
  11. #include <linux/timer.h>
  12. #include <linux/skbuff.h>
  13. #include <linux/vmalloc.h>
  14. #include <net/checksum.h>
  15. #include <net/icmp.h>
  16. #include <net/ip.h>
  17. #include <net/tcp.h> /* For tcp_prot in getorigdst */
  18. #include <linux/icmp.h>
  19. #include <linux/udp.h>
  20. #include <linux/jhash.h>
  21. #include <linux/netfilter_ipv4.h>
  22. #include <net/netfilter/nf_conntrack.h>
  23. #include <net/netfilter/nf_conntrack_core.h>
  24. #include <net/netfilter/nf_nat.h>
  25. #include <net/netfilter/nf_nat_protocol.h>
  26. #include <net/netfilter/nf_nat_core.h>
  27. #include <net/netfilter/nf_nat_helper.h>
  28. #include <net/netfilter/nf_conntrack_helper.h>
  29. #include <net/netfilter/nf_conntrack_l3proto.h>
  30. #include <net/netfilter/nf_conntrack_l4proto.h>
  31. #if 0
  32. #define DEBUGP printk
  33. #else
  34. #define DEBUGP(format, args...)
  35. #endif
  36. static DEFINE_RWLOCK(nf_nat_lock);
  37. static struct nf_conntrack_l3proto *l3proto = NULL;
  38. /* Calculated at init based on memory size */
  39. static unsigned int nf_nat_htable_size;
  40. static struct list_head *bysource;
  41. #define MAX_IP_NAT_PROTO 256
  42. static struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO];
  43. static inline struct nf_nat_protocol *
  44. __nf_nat_proto_find(u_int8_t protonum)
  45. {
  46. return rcu_dereference(nf_nat_protos[protonum]);
  47. }
  48. struct nf_nat_protocol *
  49. nf_nat_proto_find_get(u_int8_t protonum)
  50. {
  51. struct nf_nat_protocol *p;
  52. rcu_read_lock();
  53. p = __nf_nat_proto_find(protonum);
  54. if (!try_module_get(p->me))
  55. p = &nf_nat_unknown_protocol;
  56. rcu_read_unlock();
  57. return p;
  58. }
  59. EXPORT_SYMBOL_GPL(nf_nat_proto_find_get);
  60. void
  61. nf_nat_proto_put(struct nf_nat_protocol *p)
  62. {
  63. module_put(p->me);
  64. }
  65. EXPORT_SYMBOL_GPL(nf_nat_proto_put);
  66. /* We keep an extra hash for each conntrack, for fast searching. */
  67. static inline unsigned int
  68. hash_by_src(const struct nf_conntrack_tuple *tuple)
  69. {
  70. /* Original src, to ensure we map it consistently if poss. */
  71. return jhash_3words((__force u32)tuple->src.u3.ip, tuple->src.u.all,
  72. tuple->dst.protonum, 0) % nf_nat_htable_size;
  73. }
  74. /* Noone using conntrack by the time this called. */
  75. static void nf_nat_cleanup_conntrack(struct nf_conn *conn)
  76. {
  77. struct nf_conn_nat *nat;
  78. if (!(conn->status & IPS_NAT_DONE_MASK))
  79. return;
  80. nat = nfct_nat(conn);
  81. write_lock_bh(&nf_nat_lock);
  82. list_del(&nat->info.bysource);
  83. write_unlock_bh(&nf_nat_lock);
  84. }
  85. /* Is this tuple already taken? (not by us) */
  86. int
  87. nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
  88. const struct nf_conn *ignored_conntrack)
  89. {
  90. /* Conntrack tracking doesn't keep track of outgoing tuples; only
  91. incoming ones. NAT means they don't have a fixed mapping,
  92. so we invert the tuple and look for the incoming reply.
  93. We could keep a separate hash if this proves too slow. */
  94. struct nf_conntrack_tuple reply;
  95. nf_ct_invert_tuplepr(&reply, tuple);
  96. return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
  97. }
  98. EXPORT_SYMBOL(nf_nat_used_tuple);
  99. /* If we source map this tuple so reply looks like reply_tuple, will
  100. * that meet the constraints of range. */
  101. static int
  102. in_range(const struct nf_conntrack_tuple *tuple,
  103. const struct nf_nat_range *range)
  104. {
  105. struct nf_nat_protocol *proto;
  106. int ret = 0;
  107. /* If we are supposed to map IPs, then we must be in the
  108. range specified, otherwise let this drag us onto a new src IP. */
  109. if (range->flags & IP_NAT_RANGE_MAP_IPS) {
  110. if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
  111. ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
  112. return 0;
  113. }
  114. rcu_read_lock();
  115. proto = __nf_nat_proto_find(tuple->dst.protonum);
  116. if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
  117. proto->in_range(tuple, IP_NAT_MANIP_SRC,
  118. &range->min, &range->max))
  119. ret = 1;
  120. rcu_read_unlock();
  121. return ret;
  122. }
  123. static inline int
  124. same_src(const struct nf_conn *ct,
  125. const struct nf_conntrack_tuple *tuple)
  126. {
  127. const struct nf_conntrack_tuple *t;
  128. t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
  129. return (t->dst.protonum == tuple->dst.protonum &&
  130. t->src.u3.ip == tuple->src.u3.ip &&
  131. t->src.u.all == tuple->src.u.all);
  132. }
  133. /* Only called for SRC manip */
  134. static int
  135. find_appropriate_src(const struct nf_conntrack_tuple *tuple,
  136. struct nf_conntrack_tuple *result,
  137. const struct nf_nat_range *range)
  138. {
  139. unsigned int h = hash_by_src(tuple);
  140. struct nf_conn_nat *nat;
  141. struct nf_conn *ct;
  142. read_lock_bh(&nf_nat_lock);
  143. list_for_each_entry(nat, &bysource[h], info.bysource) {
  144. ct = (struct nf_conn *)((char *)nat - offsetof(struct nf_conn, data));
  145. if (same_src(ct, tuple)) {
  146. /* Copy source part from reply tuple. */
  147. nf_ct_invert_tuplepr(result,
  148. &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
  149. result->dst = tuple->dst;
  150. if (in_range(result, range)) {
  151. read_unlock_bh(&nf_nat_lock);
  152. return 1;
  153. }
  154. }
  155. }
  156. read_unlock_bh(&nf_nat_lock);
  157. return 0;
  158. }
  159. /* For [FUTURE] fragmentation handling, we want the least-used
  160. src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
  161. if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
  162. 1-65535, we don't do pro-rata allocation based on ports; we choose
  163. the ip with the lowest src-ip/dst-ip/proto usage.
  164. */
  165. static void
  166. find_best_ips_proto(struct nf_conntrack_tuple *tuple,
  167. const struct nf_nat_range *range,
  168. const struct nf_conn *ct,
  169. enum nf_nat_manip_type maniptype)
  170. {
  171. __be32 *var_ipp;
  172. /* Host order */
  173. u_int32_t minip, maxip, j;
  174. /* No IP mapping? Do nothing. */
  175. if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
  176. return;
  177. if (maniptype == IP_NAT_MANIP_SRC)
  178. var_ipp = &tuple->src.u3.ip;
  179. else
  180. var_ipp = &tuple->dst.u3.ip;
  181. /* Fast path: only one choice. */
  182. if (range->min_ip == range->max_ip) {
  183. *var_ipp = range->min_ip;
  184. return;
  185. }
  186. /* Hashing source and destination IPs gives a fairly even
  187. * spread in practice (if there are a small number of IPs
  188. * involved, there usually aren't that many connections
  189. * anyway). The consistency means that servers see the same
  190. * client coming from the same IP (some Internet Banking sites
  191. * like this), even across reboots. */
  192. minip = ntohl(range->min_ip);
  193. maxip = ntohl(range->max_ip);
  194. j = jhash_2words((__force u32)tuple->src.u3.ip,
  195. (__force u32)tuple->dst.u3.ip, 0);
  196. *var_ipp = htonl(minip + j % (maxip - minip + 1));
  197. }
  198. /* Manipulate the tuple into the range given. For NF_IP_POST_ROUTING,
  199. * we change the source to map into the range. For NF_IP_PRE_ROUTING
  200. * and NF_IP_LOCAL_OUT, we change the destination to map into the
  201. * range. It might not be possible to get a unique tuple, but we try.
  202. * At worst (or if we race), we will end up with a final duplicate in
  203. * __ip_conntrack_confirm and drop the packet. */
  204. static void
  205. get_unique_tuple(struct nf_conntrack_tuple *tuple,
  206. const struct nf_conntrack_tuple *orig_tuple,
  207. const struct nf_nat_range *range,
  208. struct nf_conn *ct,
  209. enum nf_nat_manip_type maniptype)
  210. {
  211. struct nf_nat_protocol *proto;
  212. /* 1) If this srcip/proto/src-proto-part is currently mapped,
  213. and that same mapping gives a unique tuple within the given
  214. range, use that.
  215. This is only required for source (ie. NAT/masq) mappings.
  216. So far, we don't do local source mappings, so multiple
  217. manips not an issue. */
  218. if (maniptype == IP_NAT_MANIP_SRC) {
  219. if (find_appropriate_src(orig_tuple, tuple, range)) {
  220. DEBUGP("get_unique_tuple: Found current src map\n");
  221. if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
  222. if (!nf_nat_used_tuple(tuple, ct))
  223. return;
  224. }
  225. }
  226. /* 2) Select the least-used IP/proto combination in the given
  227. range. */
  228. *tuple = *orig_tuple;
  229. find_best_ips_proto(tuple, range, ct, maniptype);
  230. /* 3) The per-protocol part of the manip is made to map into
  231. the range to make a unique tuple. */
  232. rcu_read_lock();
  233. proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
  234. /* Change protocol info to have some randomization */
  235. if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
  236. proto->unique_tuple(tuple, range, maniptype, ct);
  237. goto out;
  238. }
  239. /* Only bother mapping if it's not already in range and unique */
  240. if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
  241. proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
  242. !nf_nat_used_tuple(tuple, ct))
  243. goto out;
  244. /* Last change: get protocol to try to obtain unique tuple. */
  245. proto->unique_tuple(tuple, range, maniptype, ct);
  246. out:
  247. rcu_read_unlock();
  248. }
  249. unsigned int
  250. nf_nat_setup_info(struct nf_conn *ct,
  251. const struct nf_nat_range *range,
  252. unsigned int hooknum)
  253. {
  254. struct nf_conntrack_tuple curr_tuple, new_tuple;
  255. struct nf_conn_nat *nat = nfct_nat(ct);
  256. struct nf_nat_info *info = &nat->info;
  257. int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
  258. enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
  259. NF_CT_ASSERT(hooknum == NF_IP_PRE_ROUTING ||
  260. hooknum == NF_IP_POST_ROUTING ||
  261. hooknum == NF_IP_LOCAL_IN ||
  262. hooknum == NF_IP_LOCAL_OUT);
  263. BUG_ON(nf_nat_initialized(ct, maniptype));
  264. /* What we've got will look like inverse of reply. Normally
  265. this is what is in the conntrack, except for prior
  266. manipulations (future optimization: if num_manips == 0,
  267. orig_tp =
  268. conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
  269. nf_ct_invert_tuplepr(&curr_tuple,
  270. &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
  271. get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
  272. if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
  273. struct nf_conntrack_tuple reply;
  274. /* Alter conntrack table so will recognize replies. */
  275. nf_ct_invert_tuplepr(&reply, &new_tuple);
  276. nf_conntrack_alter_reply(ct, &reply);
  277. /* Non-atomic: we own this at the moment. */
  278. if (maniptype == IP_NAT_MANIP_SRC)
  279. ct->status |= IPS_SRC_NAT;
  280. else
  281. ct->status |= IPS_DST_NAT;
  282. }
  283. /* Place in source hash if this is the first time. */
  284. if (have_to_hash) {
  285. unsigned int srchash;
  286. srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
  287. write_lock_bh(&nf_nat_lock);
  288. list_add(&info->bysource, &bysource[srchash]);
  289. write_unlock_bh(&nf_nat_lock);
  290. }
  291. /* It's done. */
  292. if (maniptype == IP_NAT_MANIP_DST)
  293. set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
  294. else
  295. set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
  296. return NF_ACCEPT;
  297. }
  298. EXPORT_SYMBOL(nf_nat_setup_info);
  299. /* Returns true if succeeded. */
  300. static int
  301. manip_pkt(u_int16_t proto,
  302. struct sk_buff **pskb,
  303. unsigned int iphdroff,
  304. const struct nf_conntrack_tuple *target,
  305. enum nf_nat_manip_type maniptype)
  306. {
  307. struct iphdr *iph;
  308. struct nf_nat_protocol *p;
  309. if (!skb_make_writable(pskb, iphdroff + sizeof(*iph)))
  310. return 0;
  311. iph = (void *)(*pskb)->data + iphdroff;
  312. /* Manipulate protcol part. */
  313. /* rcu_read_lock()ed by nf_hook_slow */
  314. p = __nf_nat_proto_find(proto);
  315. if (!p->manip_pkt(pskb, iphdroff, target, maniptype))
  316. return 0;
  317. iph = (void *)(*pskb)->data + iphdroff;
  318. if (maniptype == IP_NAT_MANIP_SRC) {
  319. nf_csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
  320. iph->saddr = target->src.u3.ip;
  321. } else {
  322. nf_csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
  323. iph->daddr = target->dst.u3.ip;
  324. }
  325. return 1;
  326. }
  327. /* Do packet manipulations according to nf_nat_setup_info. */
  328. unsigned int nf_nat_packet(struct nf_conn *ct,
  329. enum ip_conntrack_info ctinfo,
  330. unsigned int hooknum,
  331. struct sk_buff **pskb)
  332. {
  333. enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
  334. unsigned long statusbit;
  335. enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
  336. if (mtype == IP_NAT_MANIP_SRC)
  337. statusbit = IPS_SRC_NAT;
  338. else
  339. statusbit = IPS_DST_NAT;
  340. /* Invert if this is reply dir. */
  341. if (dir == IP_CT_DIR_REPLY)
  342. statusbit ^= IPS_NAT_MASK;
  343. /* Non-atomic: these bits don't change. */
  344. if (ct->status & statusbit) {
  345. struct nf_conntrack_tuple target;
  346. /* We are aiming to look like inverse of other direction. */
  347. nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
  348. if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype))
  349. return NF_DROP;
  350. }
  351. return NF_ACCEPT;
  352. }
  353. EXPORT_SYMBOL_GPL(nf_nat_packet);
  354. /* Dir is direction ICMP is coming from (opposite to packet it contains) */
  355. int nf_nat_icmp_reply_translation(struct nf_conn *ct,
  356. enum ip_conntrack_info ctinfo,
  357. unsigned int hooknum,
  358. struct sk_buff **pskb)
  359. {
  360. struct {
  361. struct icmphdr icmp;
  362. struct iphdr ip;
  363. } *inside;
  364. struct nf_conntrack_l4proto *l4proto;
  365. struct nf_conntrack_tuple inner, target;
  366. int hdrlen = (*pskb)->nh.iph->ihl * 4;
  367. enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
  368. unsigned long statusbit;
  369. enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
  370. if (!skb_make_writable(pskb, hdrlen + sizeof(*inside)))
  371. return 0;
  372. inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
  373. /* We're actually going to mangle it beyond trivial checksum
  374. adjustment, so make sure the current checksum is correct. */
  375. if (nf_ip_checksum(*pskb, hooknum, hdrlen, 0))
  376. return 0;
  377. /* Must be RELATED */
  378. NF_CT_ASSERT((*pskb)->nfctinfo == IP_CT_RELATED ||
  379. (*pskb)->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
  380. /* Redirects on non-null nats must be dropped, else they'll
  381. start talking to each other without our translation, and be
  382. confused... --RR */
  383. if (inside->icmp.type == ICMP_REDIRECT) {
  384. /* If NAT isn't finished, assume it and drop. */
  385. if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
  386. return 0;
  387. if (ct->status & IPS_NAT_MASK)
  388. return 0;
  389. }
  390. DEBUGP("icmp_reply_translation: translating error %p manp %u dir %s\n",
  391. *pskb, manip, dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
  392. /* rcu_read_lock()ed by nf_hook_slow */
  393. l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
  394. if (!nf_ct_get_tuple(*pskb,
  395. (*pskb)->nh.iph->ihl*4 + sizeof(struct icmphdr),
  396. (*pskb)->nh.iph->ihl*4 +
  397. sizeof(struct icmphdr) + inside->ip.ihl*4,
  398. (u_int16_t)AF_INET,
  399. inside->ip.protocol,
  400. &inner, l3proto, l4proto))
  401. return 0;
  402. /* Change inner back to look like incoming packet. We do the
  403. opposite manip on this hook to normal, because it might not
  404. pass all hooks (locally-generated ICMP). Consider incoming
  405. packet: PREROUTING (DST manip), routing produces ICMP, goes
  406. through POSTROUTING (which must correct the DST manip). */
  407. if (!manip_pkt(inside->ip.protocol, pskb,
  408. (*pskb)->nh.iph->ihl*4 + sizeof(inside->icmp),
  409. &ct->tuplehash[!dir].tuple,
  410. !manip))
  411. return 0;
  412. if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
  413. /* Reloading "inside" here since manip_pkt inner. */
  414. inside = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
  415. inside->icmp.checksum = 0;
  416. inside->icmp.checksum =
  417. csum_fold(skb_checksum(*pskb, hdrlen,
  418. (*pskb)->len - hdrlen, 0));
  419. }
  420. /* Change outer to look the reply to an incoming packet
  421. * (proto 0 means don't invert per-proto part). */
  422. if (manip == IP_NAT_MANIP_SRC)
  423. statusbit = IPS_SRC_NAT;
  424. else
  425. statusbit = IPS_DST_NAT;
  426. /* Invert if this is reply dir. */
  427. if (dir == IP_CT_DIR_REPLY)
  428. statusbit ^= IPS_NAT_MASK;
  429. if (ct->status & statusbit) {
  430. nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
  431. if (!manip_pkt(0, pskb, 0, &target, manip))
  432. return 0;
  433. }
  434. return 1;
  435. }
  436. EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
  437. /* Protocol registration. */
  438. int nf_nat_protocol_register(struct nf_nat_protocol *proto)
  439. {
  440. int ret = 0;
  441. write_lock_bh(&nf_nat_lock);
  442. if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
  443. ret = -EBUSY;
  444. goto out;
  445. }
  446. rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
  447. out:
  448. write_unlock_bh(&nf_nat_lock);
  449. return ret;
  450. }
  451. EXPORT_SYMBOL(nf_nat_protocol_register);
  452. /* Noone stores the protocol anywhere; simply delete it. */
  453. void nf_nat_protocol_unregister(struct nf_nat_protocol *proto)
  454. {
  455. write_lock_bh(&nf_nat_lock);
  456. rcu_assign_pointer(nf_nat_protos[proto->protonum],
  457. &nf_nat_unknown_protocol);
  458. write_unlock_bh(&nf_nat_lock);
  459. synchronize_rcu();
  460. }
  461. EXPORT_SYMBOL(nf_nat_protocol_unregister);
  462. #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
  463. int
  464. nf_nat_port_range_to_nfattr(struct sk_buff *skb,
  465. const struct nf_nat_range *range)
  466. {
  467. NFA_PUT(skb, CTA_PROTONAT_PORT_MIN, sizeof(__be16),
  468. &range->min.tcp.port);
  469. NFA_PUT(skb, CTA_PROTONAT_PORT_MAX, sizeof(__be16),
  470. &range->max.tcp.port);
  471. return 0;
  472. nfattr_failure:
  473. return -1;
  474. }
  475. EXPORT_SYMBOL_GPL(nf_nat_port_nfattr_to_range);
  476. int
  477. nf_nat_port_nfattr_to_range(struct nfattr *tb[], struct nf_nat_range *range)
  478. {
  479. int ret = 0;
  480. /* we have to return whether we actually parsed something or not */
  481. if (tb[CTA_PROTONAT_PORT_MIN-1]) {
  482. ret = 1;
  483. range->min.tcp.port =
  484. *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MIN-1]);
  485. }
  486. if (!tb[CTA_PROTONAT_PORT_MAX-1]) {
  487. if (ret)
  488. range->max.tcp.port = range->min.tcp.port;
  489. } else {
  490. ret = 1;
  491. range->max.tcp.port =
  492. *(__be16 *)NFA_DATA(tb[CTA_PROTONAT_PORT_MAX-1]);
  493. }
  494. return ret;
  495. }
  496. EXPORT_SYMBOL_GPL(nf_nat_port_range_to_nfattr);
  497. #endif
  498. static int __init nf_nat_init(void)
  499. {
  500. size_t i;
  501. /* Leave them the same for the moment. */
  502. nf_nat_htable_size = nf_conntrack_htable_size;
  503. /* One vmalloc for both hash tables */
  504. bysource = vmalloc(sizeof(struct list_head) * nf_nat_htable_size);
  505. if (!bysource)
  506. return -ENOMEM;
  507. /* Sew in builtin protocols. */
  508. write_lock_bh(&nf_nat_lock);
  509. for (i = 0; i < MAX_IP_NAT_PROTO; i++)
  510. rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
  511. rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
  512. rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
  513. rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
  514. write_unlock_bh(&nf_nat_lock);
  515. for (i = 0; i < nf_nat_htable_size; i++) {
  516. INIT_LIST_HEAD(&bysource[i]);
  517. }
  518. /* FIXME: Man, this is a hack. <SIGH> */
  519. NF_CT_ASSERT(rcu_dereference(nf_conntrack_destroyed) == NULL);
  520. rcu_assign_pointer(nf_conntrack_destroyed, nf_nat_cleanup_conntrack);
  521. /* Initialize fake conntrack so that NAT will skip it */
  522. nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
  523. l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
  524. return 0;
  525. }
  526. /* Clear NAT section of all conntracks, in case we're loaded again. */
  527. static int clean_nat(struct nf_conn *i, void *data)
  528. {
  529. struct nf_conn_nat *nat = nfct_nat(i);
  530. if (!nat)
  531. return 0;
  532. memset(nat, 0, sizeof(nat));
  533. i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
  534. return 0;
  535. }
  536. static void __exit nf_nat_cleanup(void)
  537. {
  538. nf_ct_iterate_cleanup(&clean_nat, NULL);
  539. rcu_assign_pointer(nf_conntrack_destroyed, NULL);
  540. synchronize_rcu();
  541. vfree(bysource);
  542. nf_ct_l3proto_put(l3proto);
  543. }
  544. MODULE_LICENSE("GPL");
  545. module_init(nf_nat_init);
  546. module_exit(nf_nat_cleanup);