nf_conntrack_core.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Connection state tracking for netfilter. This is separated from,
  3. but required by, the NAT layer; it can also be used by an iptables
  4. extension. */
  5. /* (C) 1999-2001 Paul `Rusty' Russell
  6. * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
  7. * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
  8. * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/types.h>
  12. #include <linux/netfilter.h>
  13. #include <linux/module.h>
  14. #include <linux/sched.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/stddef.h>
  19. #include <linux/slab.h>
  20. #include <linux/random.h>
  21. #include <linux/jhash.h>
  22. #include <linux/siphash.h>
  23. #include <linux/err.h>
  24. #include <linux/percpu.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/notifier.h>
  27. #include <linux/kernel.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/socket.h>
  30. #include <linux/mm.h>
  31. #include <linux/nsproxy.h>
  32. #include <linux/rculist_nulls.h>
  33. #include <trace/hooks/net.h>
  34. #include <net/netfilter/nf_conntrack.h>
  35. #include <net/netfilter/nf_conntrack_l4proto.h>
  36. #include <net/netfilter/nf_conntrack_expect.h>
  37. #include <net/netfilter/nf_conntrack_helper.h>
  38. #include <net/netfilter/nf_conntrack_seqadj.h>
  39. #include <net/netfilter/nf_conntrack_core.h>
  40. #include <net/netfilter/nf_conntrack_extend.h>
  41. #include <net/netfilter/nf_conntrack_acct.h>
  42. #include <net/netfilter/nf_conntrack_ecache.h>
  43. #include <net/netfilter/nf_conntrack_zones.h>
  44. #include <net/netfilter/nf_conntrack_timestamp.h>
  45. #include <net/netfilter/nf_conntrack_timeout.h>
  46. #include <net/netfilter/nf_conntrack_labels.h>
  47. #include <net/netfilter/nf_conntrack_synproxy.h>
  48. #include <net/netfilter/nf_nat.h>
  49. #include <net/netfilter/nf_nat_helper.h>
  50. #include <net/netns/hash.h>
  51. #include <net/ip.h>
  52. #include "nf_internals.h"
  53. __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
  54. EXPORT_SYMBOL_GPL(nf_conntrack_locks);
  55. __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
  56. EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
  57. struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
  58. EXPORT_SYMBOL_GPL(nf_conntrack_hash);
  59. struct conntrack_gc_work {
  60. struct delayed_work dwork;
  61. u32 next_bucket;
  62. bool exiting;
  63. bool early_drop;
  64. };
  65. static __read_mostly struct kmem_cache *nf_conntrack_cachep;
  66. static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
  67. static __read_mostly bool nf_conntrack_locks_all;
  68. /* serialize hash resizes and nf_ct_iterate_cleanup */
  69. static DEFINE_MUTEX(nf_conntrack_mutex);
  70. #define GC_SCAN_INTERVAL (120u * HZ)
  71. #define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
  72. static struct conntrack_gc_work conntrack_gc_work;
  73. void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
  74. {
  75. /* 1) Acquire the lock */
  76. spin_lock(lock);
  77. /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
  78. * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
  79. */
  80. if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
  81. return;
  82. /* fast path failed, unlock */
  83. spin_unlock(lock);
  84. /* Slow path 1) get global lock */
  85. spin_lock(&nf_conntrack_locks_all_lock);
  86. /* Slow path 2) get the lock we want */
  87. spin_lock(lock);
  88. /* Slow path 3) release the global lock */
  89. spin_unlock(&nf_conntrack_locks_all_lock);
  90. }
  91. EXPORT_SYMBOL_GPL(nf_conntrack_lock);
  92. static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
  93. {
  94. h1 %= CONNTRACK_LOCKS;
  95. h2 %= CONNTRACK_LOCKS;
  96. spin_unlock(&nf_conntrack_locks[h1]);
  97. if (h1 != h2)
  98. spin_unlock(&nf_conntrack_locks[h2]);
  99. }
  100. /* return true if we need to recompute hashes (in case hash table was resized) */
  101. static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
  102. unsigned int h2, unsigned int sequence)
  103. {
  104. h1 %= CONNTRACK_LOCKS;
  105. h2 %= CONNTRACK_LOCKS;
  106. if (h1 <= h2) {
  107. nf_conntrack_lock(&nf_conntrack_locks[h1]);
  108. if (h1 != h2)
  109. spin_lock_nested(&nf_conntrack_locks[h2],
  110. SINGLE_DEPTH_NESTING);
  111. } else {
  112. nf_conntrack_lock(&nf_conntrack_locks[h2]);
  113. spin_lock_nested(&nf_conntrack_locks[h1],
  114. SINGLE_DEPTH_NESTING);
  115. }
  116. if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
  117. nf_conntrack_double_unlock(h1, h2);
  118. return true;
  119. }
  120. return false;
  121. }
  122. static void nf_conntrack_all_lock(void)
  123. __acquires(&nf_conntrack_locks_all_lock)
  124. {
  125. int i;
  126. spin_lock(&nf_conntrack_locks_all_lock);
  127. nf_conntrack_locks_all = true;
  128. for (i = 0; i < CONNTRACK_LOCKS; i++) {
  129. spin_lock(&nf_conntrack_locks[i]);
  130. /* This spin_unlock provides the "release" to ensure that
  131. * nf_conntrack_locks_all==true is visible to everyone that
  132. * acquired spin_lock(&nf_conntrack_locks[]).
  133. */
  134. spin_unlock(&nf_conntrack_locks[i]);
  135. }
  136. }
  137. static void nf_conntrack_all_unlock(void)
  138. __releases(&nf_conntrack_locks_all_lock)
  139. {
  140. /* All prior stores must be complete before we clear
  141. * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
  142. * might observe the false value but not the entire
  143. * critical section.
  144. * It pairs with the smp_load_acquire() in nf_conntrack_lock()
  145. */
  146. smp_store_release(&nf_conntrack_locks_all, false);
  147. spin_unlock(&nf_conntrack_locks_all_lock);
  148. }
  149. unsigned int nf_conntrack_htable_size __read_mostly;
  150. EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
  151. unsigned int nf_conntrack_max __read_mostly;
  152. EXPORT_SYMBOL_GPL(nf_conntrack_max);
  153. seqcount_spinlock_t nf_conntrack_generation __read_mostly;
  154. static unsigned int nf_conntrack_hash_rnd __read_mostly;
  155. static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
  156. const struct net *net)
  157. {
  158. unsigned int n;
  159. u32 seed;
  160. get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
  161. /* The direction must be ignored, so we hash everything up to the
  162. * destination ports (which is a multiple of 4) and treat the last
  163. * three bytes manually.
  164. */
  165. seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
  166. n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
  167. return jhash2((u32 *)tuple, n, seed ^
  168. (((__force __u16)tuple->dst.u.all << 16) |
  169. tuple->dst.protonum));
  170. }
  171. static u32 scale_hash(u32 hash)
  172. {
  173. return reciprocal_scale(hash, nf_conntrack_htable_size);
  174. }
  175. static u32 __hash_conntrack(const struct net *net,
  176. const struct nf_conntrack_tuple *tuple,
  177. unsigned int size)
  178. {
  179. return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
  180. }
  181. static u32 hash_conntrack(const struct net *net,
  182. const struct nf_conntrack_tuple *tuple)
  183. {
  184. return scale_hash(hash_conntrack_raw(tuple, net));
  185. }
  186. static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
  187. unsigned int dataoff,
  188. struct nf_conntrack_tuple *tuple)
  189. { struct {
  190. __be16 sport;
  191. __be16 dport;
  192. } _inet_hdr, *inet_hdr;
  193. /* Actually only need first 4 bytes to get ports. */
  194. inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
  195. if (!inet_hdr)
  196. return false;
  197. tuple->src.u.udp.port = inet_hdr->sport;
  198. tuple->dst.u.udp.port = inet_hdr->dport;
  199. return true;
  200. }
  201. static bool
  202. nf_ct_get_tuple(const struct sk_buff *skb,
  203. unsigned int nhoff,
  204. unsigned int dataoff,
  205. u_int16_t l3num,
  206. u_int8_t protonum,
  207. struct net *net,
  208. struct nf_conntrack_tuple *tuple)
  209. {
  210. unsigned int size;
  211. const __be32 *ap;
  212. __be32 _addrs[8];
  213. memset(tuple, 0, sizeof(*tuple));
  214. tuple->src.l3num = l3num;
  215. switch (l3num) {
  216. case NFPROTO_IPV4:
  217. nhoff += offsetof(struct iphdr, saddr);
  218. size = 2 * sizeof(__be32);
  219. break;
  220. case NFPROTO_IPV6:
  221. nhoff += offsetof(struct ipv6hdr, saddr);
  222. size = sizeof(_addrs);
  223. break;
  224. default:
  225. return true;
  226. }
  227. ap = skb_header_pointer(skb, nhoff, size, _addrs);
  228. if (!ap)
  229. return false;
  230. switch (l3num) {
  231. case NFPROTO_IPV4:
  232. tuple->src.u3.ip = ap[0];
  233. tuple->dst.u3.ip = ap[1];
  234. break;
  235. case NFPROTO_IPV6:
  236. memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
  237. memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
  238. break;
  239. }
  240. tuple->dst.protonum = protonum;
  241. tuple->dst.dir = IP_CT_DIR_ORIGINAL;
  242. switch (protonum) {
  243. #if IS_ENABLED(CONFIG_IPV6)
  244. case IPPROTO_ICMPV6:
  245. return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
  246. #endif
  247. case IPPROTO_ICMP:
  248. return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
  249. #ifdef CONFIG_NF_CT_PROTO_GRE
  250. case IPPROTO_GRE:
  251. return gre_pkt_to_tuple(skb, dataoff, net, tuple);
  252. #endif
  253. case IPPROTO_TCP:
  254. case IPPROTO_UDP: /* fallthrough */
  255. return nf_ct_get_tuple_ports(skb, dataoff, tuple);
  256. #ifdef CONFIG_NF_CT_PROTO_UDPLITE
  257. case IPPROTO_UDPLITE:
  258. return nf_ct_get_tuple_ports(skb, dataoff, tuple);
  259. #endif
  260. #ifdef CONFIG_NF_CT_PROTO_SCTP
  261. case IPPROTO_SCTP:
  262. return nf_ct_get_tuple_ports(skb, dataoff, tuple);
  263. #endif
  264. #ifdef CONFIG_NF_CT_PROTO_DCCP
  265. case IPPROTO_DCCP:
  266. return nf_ct_get_tuple_ports(skb, dataoff, tuple);
  267. #endif
  268. default:
  269. break;
  270. }
  271. return true;
  272. }
  273. static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
  274. u_int8_t *protonum)
  275. {
  276. int dataoff = -1;
  277. const struct iphdr *iph;
  278. struct iphdr _iph;
  279. iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
  280. if (!iph)
  281. return -1;
  282. /* Conntrack defragments packets, we might still see fragments
  283. * inside ICMP packets though.
  284. */
  285. if (iph->frag_off & htons(IP_OFFSET))
  286. return -1;
  287. dataoff = nhoff + (iph->ihl << 2);
  288. *protonum = iph->protocol;
  289. /* Check bogus IP headers */
  290. if (dataoff > skb->len) {
  291. pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
  292. nhoff, iph->ihl << 2, skb->len);
  293. return -1;
  294. }
  295. return dataoff;
  296. }
  297. #if IS_ENABLED(CONFIG_IPV6)
  298. static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
  299. u8 *protonum)
  300. {
  301. int protoff = -1;
  302. unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
  303. __be16 frag_off;
  304. u8 nexthdr;
  305. if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
  306. &nexthdr, sizeof(nexthdr)) != 0) {
  307. pr_debug("can't get nexthdr\n");
  308. return -1;
  309. }
  310. protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
  311. /*
  312. * (protoff == skb->len) means the packet has not data, just
  313. * IPv6 and possibly extensions headers, but it is tracked anyway
  314. */
  315. if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
  316. pr_debug("can't find proto in pkt\n");
  317. return -1;
  318. }
  319. *protonum = nexthdr;
  320. return protoff;
  321. }
  322. #endif
  323. static int get_l4proto(const struct sk_buff *skb,
  324. unsigned int nhoff, u8 pf, u8 *l4num)
  325. {
  326. switch (pf) {
  327. case NFPROTO_IPV4:
  328. return ipv4_get_l4proto(skb, nhoff, l4num);
  329. #if IS_ENABLED(CONFIG_IPV6)
  330. case NFPROTO_IPV6:
  331. return ipv6_get_l4proto(skb, nhoff, l4num);
  332. #endif
  333. default:
  334. *l4num = 0;
  335. break;
  336. }
  337. return -1;
  338. }
  339. bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
  340. u_int16_t l3num,
  341. struct net *net, struct nf_conntrack_tuple *tuple)
  342. {
  343. u8 protonum;
  344. int protoff;
  345. protoff = get_l4proto(skb, nhoff, l3num, &protonum);
  346. if (protoff <= 0)
  347. return false;
  348. return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
  349. }
  350. EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
  351. bool
  352. nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
  353. const struct nf_conntrack_tuple *orig)
  354. {
  355. memset(inverse, 0, sizeof(*inverse));
  356. inverse->src.l3num = orig->src.l3num;
  357. switch (orig->src.l3num) {
  358. case NFPROTO_IPV4:
  359. inverse->src.u3.ip = orig->dst.u3.ip;
  360. inverse->dst.u3.ip = orig->src.u3.ip;
  361. break;
  362. case NFPROTO_IPV6:
  363. inverse->src.u3.in6 = orig->dst.u3.in6;
  364. inverse->dst.u3.in6 = orig->src.u3.in6;
  365. break;
  366. default:
  367. break;
  368. }
  369. inverse->dst.dir = !orig->dst.dir;
  370. inverse->dst.protonum = orig->dst.protonum;
  371. switch (orig->dst.protonum) {
  372. case IPPROTO_ICMP:
  373. return nf_conntrack_invert_icmp_tuple(inverse, orig);
  374. #if IS_ENABLED(CONFIG_IPV6)
  375. case IPPROTO_ICMPV6:
  376. return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
  377. #endif
  378. }
  379. inverse->src.u.all = orig->dst.u.all;
  380. inverse->dst.u.all = orig->src.u.all;
  381. return true;
  382. }
  383. EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
  384. /* Generate a almost-unique pseudo-id for a given conntrack.
  385. *
  386. * intentionally doesn't re-use any of the seeds used for hash
  387. * table location, we assume id gets exposed to userspace.
  388. *
  389. * Following nf_conn items do not change throughout lifetime
  390. * of the nf_conn:
  391. *
  392. * 1. nf_conn address
  393. * 2. nf_conn->master address (normally NULL)
  394. * 3. the associated net namespace
  395. * 4. the original direction tuple
  396. */
  397. u32 nf_ct_get_id(const struct nf_conn *ct)
  398. {
  399. static __read_mostly siphash_key_t ct_id_seed;
  400. unsigned long a, b, c, d;
  401. net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
  402. a = (unsigned long)ct;
  403. b = (unsigned long)ct->master;
  404. c = (unsigned long)nf_ct_net(ct);
  405. d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
  406. sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
  407. &ct_id_seed);
  408. #ifdef CONFIG_64BIT
  409. return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
  410. #else
  411. return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
  412. #endif
  413. }
  414. EXPORT_SYMBOL_GPL(nf_ct_get_id);
  415. static void
  416. clean_from_lists(struct nf_conn *ct)
  417. {
  418. pr_debug("clean_from_lists(%p)\n", ct);
  419. hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
  420. hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
  421. /* Destroy all pending expectations */
  422. nf_ct_remove_expectations(ct);
  423. }
  424. /* must be called with local_bh_disable */
  425. static void nf_ct_add_to_dying_list(struct nf_conn *ct)
  426. {
  427. struct ct_pcpu *pcpu;
  428. /* add this conntrack to the (per cpu) dying list */
  429. ct->cpu = smp_processor_id();
  430. pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
  431. spin_lock(&pcpu->lock);
  432. hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
  433. &pcpu->dying);
  434. spin_unlock(&pcpu->lock);
  435. }
  436. /* must be called with local_bh_disable */
  437. static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
  438. {
  439. struct ct_pcpu *pcpu;
  440. /* add this conntrack to the (per cpu) unconfirmed list */
  441. ct->cpu = smp_processor_id();
  442. pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
  443. spin_lock(&pcpu->lock);
  444. hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
  445. &pcpu->unconfirmed);
  446. spin_unlock(&pcpu->lock);
  447. }
  448. /* must be called with local_bh_disable */
  449. static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
  450. {
  451. struct ct_pcpu *pcpu;
  452. /* We overload first tuple to link into unconfirmed or dying list.*/
  453. pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
  454. spin_lock(&pcpu->lock);
  455. BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
  456. hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
  457. spin_unlock(&pcpu->lock);
  458. }
  459. #define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
  460. /* Released via destroy_conntrack() */
  461. struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
  462. const struct nf_conntrack_zone *zone,
  463. gfp_t flags)
  464. {
  465. struct nf_conn *tmpl, *p;
  466. if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
  467. tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
  468. if (!tmpl)
  469. return NULL;
  470. p = tmpl;
  471. tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
  472. if (tmpl != p) {
  473. tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
  474. tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
  475. }
  476. } else {
  477. tmpl = kzalloc(sizeof(*tmpl), flags);
  478. if (!tmpl)
  479. return NULL;
  480. }
  481. tmpl->status = IPS_TEMPLATE;
  482. write_pnet(&tmpl->ct_net, net);
  483. nf_ct_zone_add(tmpl, zone);
  484. atomic_set(&tmpl->ct_general.use, 0);
  485. return tmpl;
  486. }
  487. EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
  488. void nf_ct_tmpl_free(struct nf_conn *tmpl)
  489. {
  490. nf_ct_ext_destroy(tmpl);
  491. if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
  492. kfree((char *)tmpl - tmpl->proto.tmpl_padto);
  493. else
  494. kfree(tmpl);
  495. }
  496. EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
  497. static void destroy_gre_conntrack(struct nf_conn *ct)
  498. {
  499. #ifdef CONFIG_NF_CT_PROTO_GRE
  500. struct nf_conn *master = ct->master;
  501. if (master)
  502. nf_ct_gre_keymap_destroy(master);
  503. #endif
  504. }
  505. static void
  506. destroy_conntrack(struct nf_conntrack *nfct)
  507. {
  508. struct nf_conn *ct = (struct nf_conn *)nfct;
  509. pr_debug("destroy_conntrack(%p)\n", ct);
  510. WARN_ON(atomic_read(&nfct->use) != 0);
  511. if (unlikely(nf_ct_is_template(ct))) {
  512. nf_ct_tmpl_free(ct);
  513. return;
  514. }
  515. if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
  516. destroy_gre_conntrack(ct);
  517. local_bh_disable();
  518. /* Expectations will have been removed in clean_from_lists,
  519. * except TFTP can create an expectation on the first packet,
  520. * before connection is in the list, so we need to clean here,
  521. * too.
  522. */
  523. nf_ct_remove_expectations(ct);
  524. nf_ct_del_from_dying_or_unconfirmed_list(ct);
  525. local_bh_enable();
  526. if (ct->master)
  527. nf_ct_put(ct->master);
  528. pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
  529. nf_conntrack_free(ct);
  530. }
  531. static void nf_ct_delete_from_lists(struct nf_conn *ct)
  532. {
  533. struct net *net = nf_ct_net(ct);
  534. unsigned int hash, reply_hash;
  535. unsigned int sequence;
  536. nf_ct_helper_destroy(ct);
  537. local_bh_disable();
  538. do {
  539. sequence = read_seqcount_begin(&nf_conntrack_generation);
  540. hash = hash_conntrack(net,
  541. &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
  542. reply_hash = hash_conntrack(net,
  543. &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
  544. } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
  545. clean_from_lists(ct);
  546. nf_conntrack_double_unlock(hash, reply_hash);
  547. nf_ct_add_to_dying_list(ct);
  548. local_bh_enable();
  549. }
  550. bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
  551. {
  552. struct nf_conn_tstamp *tstamp;
  553. if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
  554. return false;
  555. tstamp = nf_conn_tstamp_find(ct);
  556. if (tstamp) {
  557. s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
  558. tstamp->stop = ktime_get_real_ns();
  559. if (timeout < 0)
  560. tstamp->stop -= jiffies_to_nsecs(-timeout);
  561. }
  562. if (nf_conntrack_event_report(IPCT_DESTROY, ct,
  563. portid, report) < 0) {
  564. /* destroy event was not delivered. nf_ct_put will
  565. * be done by event cache worker on redelivery.
  566. */
  567. nf_ct_delete_from_lists(ct);
  568. nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
  569. return false;
  570. }
  571. nf_conntrack_ecache_work(nf_ct_net(ct));
  572. nf_ct_delete_from_lists(ct);
  573. nf_ct_put(ct);
  574. return true;
  575. }
  576. EXPORT_SYMBOL_GPL(nf_ct_delete);
  577. static inline bool
  578. nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
  579. const struct nf_conntrack_tuple *tuple,
  580. const struct nf_conntrack_zone *zone,
  581. const struct net *net)
  582. {
  583. struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
  584. /* A conntrack can be recreated with the equal tuple,
  585. * so we need to check that the conntrack is confirmed
  586. */
  587. return nf_ct_tuple_equal(tuple, &h->tuple) &&
  588. nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
  589. nf_ct_is_confirmed(ct) &&
  590. net_eq(net, nf_ct_net(ct));
  591. }
  592. static inline bool
  593. nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
  594. {
  595. return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
  596. &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
  597. nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
  598. &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
  599. nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
  600. nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
  601. net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
  602. }
  603. /* caller must hold rcu readlock and none of the nf_conntrack_locks */
  604. static void nf_ct_gc_expired(struct nf_conn *ct)
  605. {
  606. if (!atomic_inc_not_zero(&ct->ct_general.use))
  607. return;
  608. if (nf_ct_should_gc(ct))
  609. nf_ct_kill(ct);
  610. nf_ct_put(ct);
  611. }
  612. /*
  613. * Warning :
  614. * - Caller must take a reference on returned object
  615. * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
  616. */
  617. static struct nf_conntrack_tuple_hash *
  618. ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
  619. const struct nf_conntrack_tuple *tuple, u32 hash)
  620. {
  621. struct nf_conntrack_tuple_hash *h;
  622. struct hlist_nulls_head *ct_hash;
  623. struct hlist_nulls_node *n;
  624. unsigned int bucket, hsize;
  625. begin:
  626. nf_conntrack_get_ht(&ct_hash, &hsize);
  627. bucket = reciprocal_scale(hash, hsize);
  628. hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
  629. struct nf_conn *ct;
  630. ct = nf_ct_tuplehash_to_ctrack(h);
  631. if (nf_ct_is_expired(ct)) {
  632. nf_ct_gc_expired(ct);
  633. continue;
  634. }
  635. if (nf_ct_key_equal(h, tuple, zone, net))
  636. return h;
  637. }
  638. /*
  639. * if the nulls value we got at the end of this lookup is
  640. * not the expected one, we must restart lookup.
  641. * We probably met an item that was moved to another chain.
  642. */
  643. if (get_nulls_value(n) != bucket) {
  644. NF_CT_STAT_INC_ATOMIC(net, search_restart);
  645. goto begin;
  646. }
  647. return NULL;
  648. }
  649. /* Find a connection corresponding to a tuple. */
  650. static struct nf_conntrack_tuple_hash *
  651. __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
  652. const struct nf_conntrack_tuple *tuple, u32 hash)
  653. {
  654. struct nf_conntrack_tuple_hash *h;
  655. struct nf_conn *ct;
  656. rcu_read_lock();
  657. h = ____nf_conntrack_find(net, zone, tuple, hash);
  658. if (h) {
  659. /* We have a candidate that matches the tuple we're interested
  660. * in, try to obtain a reference and re-check tuple
  661. */
  662. ct = nf_ct_tuplehash_to_ctrack(h);
  663. if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
  664. if (likely(nf_ct_key_equal(h, tuple, zone, net)))
  665. goto found;
  666. /* TYPESAFE_BY_RCU recycled the candidate */
  667. nf_ct_put(ct);
  668. }
  669. h = NULL;
  670. }
  671. found:
  672. rcu_read_unlock();
  673. return h;
  674. }
  675. struct nf_conntrack_tuple_hash *
  676. nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
  677. const struct nf_conntrack_tuple *tuple)
  678. {
  679. return __nf_conntrack_find_get(net, zone, tuple,
  680. hash_conntrack_raw(tuple, net));
  681. }
  682. EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
  683. static void __nf_conntrack_hash_insert(struct nf_conn *ct,
  684. unsigned int hash,
  685. unsigned int reply_hash)
  686. {
  687. hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
  688. &nf_conntrack_hash[hash]);
  689. hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
  690. &nf_conntrack_hash[reply_hash]);
  691. }
  692. int
  693. nf_conntrack_hash_check_insert(struct nf_conn *ct)
  694. {
  695. const struct nf_conntrack_zone *zone;
  696. struct net *net = nf_ct_net(ct);
  697. unsigned int hash, reply_hash;
  698. struct nf_conntrack_tuple_hash *h;
  699. struct hlist_nulls_node *n;
  700. unsigned int sequence;
  701. zone = nf_ct_zone(ct);
  702. local_bh_disable();
  703. do {
  704. sequence = read_seqcount_begin(&nf_conntrack_generation);
  705. hash = hash_conntrack(net,
  706. &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
  707. reply_hash = hash_conntrack(net,
  708. &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
  709. } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
  710. /* See if there's one in the list already, including reverse */
  711. hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
  712. if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
  713. zone, net))
  714. goto out;
  715. hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
  716. if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
  717. zone, net))
  718. goto out;
  719. smp_wmb();
  720. /* The caller holds a reference to this object */
  721. atomic_set(&ct->ct_general.use, 2);
  722. __nf_conntrack_hash_insert(ct, hash, reply_hash);
  723. nf_conntrack_double_unlock(hash, reply_hash);
  724. NF_CT_STAT_INC(net, insert);
  725. local_bh_enable();
  726. return 0;
  727. out:
  728. nf_conntrack_double_unlock(hash, reply_hash);
  729. local_bh_enable();
  730. return -EEXIST;
  731. }
  732. EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
  733. void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
  734. unsigned int bytes)
  735. {
  736. struct nf_conn_acct *acct;
  737. acct = nf_conn_acct_find(ct);
  738. if (acct) {
  739. struct nf_conn_counter *counter = acct->counter;
  740. atomic64_add(packets, &counter[dir].packets);
  741. atomic64_add(bytes, &counter[dir].bytes);
  742. }
  743. }
  744. EXPORT_SYMBOL_GPL(nf_ct_acct_add);
  745. static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
  746. const struct nf_conn *loser_ct)
  747. {
  748. struct nf_conn_acct *acct;
  749. acct = nf_conn_acct_find(loser_ct);
  750. if (acct) {
  751. struct nf_conn_counter *counter = acct->counter;
  752. unsigned int bytes;
  753. /* u32 should be fine since we must have seen one packet. */
  754. bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
  755. nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), bytes);
  756. }
  757. }
  758. static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
  759. {
  760. struct nf_conn_tstamp *tstamp;
  761. atomic_inc(&ct->ct_general.use);
  762. ct->status |= IPS_CONFIRMED;
  763. /* set conntrack timestamp, if enabled. */
  764. tstamp = nf_conn_tstamp_find(ct);
  765. if (tstamp)
  766. tstamp->start = ktime_get_real_ns();
  767. }
  768. /* caller must hold locks to prevent concurrent changes */
  769. static int __nf_ct_resolve_clash(struct sk_buff *skb,
  770. struct nf_conntrack_tuple_hash *h)
  771. {
  772. /* This is the conntrack entry already in hashes that won race. */
  773. struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
  774. enum ip_conntrack_info ctinfo;
  775. struct nf_conn *loser_ct;
  776. loser_ct = nf_ct_get(skb, &ctinfo);
  777. if (nf_ct_is_dying(ct))
  778. return NF_DROP;
  779. if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
  780. nf_ct_match(ct, loser_ct)) {
  781. struct net *net = nf_ct_net(ct);
  782. nf_conntrack_get(&ct->ct_general);
  783. nf_ct_acct_merge(ct, ctinfo, loser_ct);
  784. nf_ct_add_to_dying_list(loser_ct);
  785. nf_conntrack_put(&loser_ct->ct_general);
  786. nf_ct_set(skb, ct, ctinfo);
  787. NF_CT_STAT_INC(net, clash_resolve);
  788. return NF_ACCEPT;
  789. }
  790. return NF_DROP;
  791. }
  792. /**
  793. * nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
  794. *
  795. * @skb: skb that causes the collision
  796. * @repl_idx: hash slot for reply direction
  797. *
  798. * Called when origin or reply direction had a clash.
  799. * The skb can be handled without packet drop provided the reply direction
  800. * is unique or there the existing entry has the identical tuple in both
  801. * directions.
  802. *
  803. * Caller must hold conntrack table locks to prevent concurrent updates.
  804. *
  805. * Returns NF_DROP if the clash could not be handled.
  806. */
  807. static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
  808. {
  809. struct nf_conn *loser_ct = (struct nf_conn *)skb_nfct(skb);
  810. const struct nf_conntrack_zone *zone;
  811. struct nf_conntrack_tuple_hash *h;
  812. struct hlist_nulls_node *n;
  813. struct net *net;
  814. zone = nf_ct_zone(loser_ct);
  815. net = nf_ct_net(loser_ct);
  816. /* Reply direction must never result in a clash, unless both origin
  817. * and reply tuples are identical.
  818. */
  819. hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[repl_idx], hnnode) {
  820. if (nf_ct_key_equal(h,
  821. &loser_ct->tuplehash[IP_CT_DIR_REPLY].tuple,
  822. zone, net))
  823. return __nf_ct_resolve_clash(skb, h);
  824. }
  825. /* We want the clashing entry to go away real soon: 1 second timeout. */
  826. WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
  827. /* IPS_NAT_CLASH removes the entry automatically on the first
  828. * reply. Also prevents UDP tracker from moving the entry to
  829. * ASSURED state, i.e. the entry can always be evicted under
  830. * pressure.
  831. */
  832. loser_ct->status |= IPS_FIXED_TIMEOUT | IPS_NAT_CLASH;
  833. __nf_conntrack_insert_prepare(loser_ct);
  834. /* fake add for ORIGINAL dir: we want lookups to only find the entry
  835. * already in the table. This also hides the clashing entry from
  836. * ctnetlink iteration, i.e. conntrack -L won't show them.
  837. */
  838. hlist_nulls_add_fake(&loser_ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
  839. hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
  840. &nf_conntrack_hash[repl_idx]);
  841. NF_CT_STAT_INC(net, clash_resolve);
  842. return NF_ACCEPT;
  843. }
  844. /**
  845. * nf_ct_resolve_clash - attempt to handle clash without packet drop
  846. *
  847. * @skb: skb that causes the clash
  848. * @h: tuplehash of the clashing entry already in table
  849. * @reply_hash: hash slot for reply direction
  850. *
  851. * A conntrack entry can be inserted to the connection tracking table
  852. * if there is no existing entry with an identical tuple.
  853. *
  854. * If there is one, @skb (and the assocated, unconfirmed conntrack) has
  855. * to be dropped. In case @skb is retransmitted, next conntrack lookup
  856. * will find the already-existing entry.
  857. *
  858. * The major problem with such packet drop is the extra delay added by
  859. * the packet loss -- it will take some time for a retransmit to occur
  860. * (or the sender to time out when waiting for a reply).
  861. *
  862. * This function attempts to handle the situation without packet drop.
  863. *
  864. * If @skb has no NAT transformation or if the colliding entries are
  865. * exactly the same, only the to-be-confirmed conntrack entry is discarded
  866. * and @skb is associated with the conntrack entry already in the table.
  867. *
  868. * Failing that, the new, unconfirmed conntrack is still added to the table
  869. * provided that the collision only occurs in the ORIGINAL direction.
  870. * The new entry will be added only in the non-clashing REPLY direction,
  871. * so packets in the ORIGINAL direction will continue to match the existing
  872. * entry. The new entry will also have a fixed timeout so it expires --
  873. * due to the collision, it will only see reply traffic.
  874. *
  875. * Returns NF_DROP if the clash could not be resolved.
  876. */
  877. static __cold noinline int
  878. nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
  879. u32 reply_hash)
  880. {
  881. /* This is the conntrack entry already in hashes that won race. */
  882. struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
  883. const struct nf_conntrack_l4proto *l4proto;
  884. enum ip_conntrack_info ctinfo;
  885. struct nf_conn *loser_ct;
  886. struct net *net;
  887. int ret;
  888. loser_ct = nf_ct_get(skb, &ctinfo);
  889. net = nf_ct_net(loser_ct);
  890. l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
  891. if (!l4proto->allow_clash)
  892. goto drop;
  893. ret = __nf_ct_resolve_clash(skb, h);
  894. if (ret == NF_ACCEPT)
  895. return ret;
  896. ret = nf_ct_resolve_clash_harder(skb, reply_hash);
  897. if (ret == NF_ACCEPT)
  898. return ret;
  899. drop:
  900. nf_ct_add_to_dying_list(loser_ct);
  901. NF_CT_STAT_INC(net, drop);
  902. NF_CT_STAT_INC(net, insert_failed);
  903. return NF_DROP;
  904. }
  905. /* Confirm a connection given skb; places it in hash table */
  906. int
  907. __nf_conntrack_confirm(struct sk_buff *skb)
  908. {
  909. const struct nf_conntrack_zone *zone;
  910. unsigned int hash, reply_hash;
  911. struct nf_conntrack_tuple_hash *h;
  912. struct nf_conn *ct;
  913. struct nf_conn_help *help;
  914. struct hlist_nulls_node *n;
  915. enum ip_conntrack_info ctinfo;
  916. struct net *net;
  917. unsigned int sequence;
  918. int ret = NF_DROP;
  919. ct = nf_ct_get(skb, &ctinfo);
  920. net = nf_ct_net(ct);
  921. /* ipt_REJECT uses nf_conntrack_attach to attach related
  922. ICMP/TCP RST packets in other direction. Actual packet
  923. which created connection will be IP_CT_NEW or for an
  924. expected connection, IP_CT_RELATED. */
  925. if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
  926. return NF_ACCEPT;
  927. zone = nf_ct_zone(ct);
  928. local_bh_disable();
  929. do {
  930. sequence = read_seqcount_begin(&nf_conntrack_generation);
  931. /* reuse the hash saved before */
  932. hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
  933. hash = scale_hash(hash);
  934. reply_hash = hash_conntrack(net,
  935. &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
  936. } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
  937. /* We're not in hash table, and we refuse to set up related
  938. * connections for unconfirmed conns. But packet copies and
  939. * REJECT will give spurious warnings here.
  940. */
  941. /* Another skb with the same unconfirmed conntrack may
  942. * win the race. This may happen for bridge(br_flood)
  943. * or broadcast/multicast packets do skb_clone with
  944. * unconfirmed conntrack.
  945. */
  946. if (unlikely(nf_ct_is_confirmed(ct))) {
  947. WARN_ON_ONCE(1);
  948. nf_conntrack_double_unlock(hash, reply_hash);
  949. local_bh_enable();
  950. return NF_DROP;
  951. }
  952. pr_debug("Confirming conntrack %p\n", ct);
  953. /* We have to check the DYING flag after unlink to prevent
  954. * a race against nf_ct_get_next_corpse() possibly called from
  955. * user context, else we insert an already 'dead' hash, blocking
  956. * further use of that particular connection -JM.
  957. */
  958. nf_ct_del_from_dying_or_unconfirmed_list(ct);
  959. if (unlikely(nf_ct_is_dying(ct))) {
  960. nf_ct_add_to_dying_list(ct);
  961. NF_CT_STAT_INC(net, insert_failed);
  962. goto dying;
  963. }
  964. /* See if there's one in the list already, including reverse:
  965. NAT could have grabbed it without realizing, since we're
  966. not in the hash. If there is, we lost race. */
  967. hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
  968. if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
  969. zone, net))
  970. goto out;
  971. hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
  972. if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
  973. zone, net))
  974. goto out;
  975. /* Timer relative to confirmation time, not original
  976. setting time, otherwise we'd get timer wrap in
  977. weird delay cases. */
  978. ct->timeout += nfct_time_stamp;
  979. __nf_conntrack_insert_prepare(ct);
  980. /* Since the lookup is lockless, hash insertion must be done after
  981. * starting the timer and setting the CONFIRMED bit. The RCU barriers
  982. * guarantee that no other CPU can find the conntrack before the above
  983. * stores are visible.
  984. */
  985. __nf_conntrack_hash_insert(ct, hash, reply_hash);
  986. nf_conntrack_double_unlock(hash, reply_hash);
  987. local_bh_enable();
  988. help = nfct_help(ct);
  989. if (help && help->helper)
  990. nf_conntrack_event_cache(IPCT_HELPER, ct);
  991. nf_conntrack_event_cache(master_ct(ct) ?
  992. IPCT_RELATED : IPCT_NEW, ct);
  993. return NF_ACCEPT;
  994. out:
  995. ret = nf_ct_resolve_clash(skb, h, reply_hash);
  996. dying:
  997. nf_conntrack_double_unlock(hash, reply_hash);
  998. local_bh_enable();
  999. return ret;
  1000. }
  1001. EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
  1002. /* Returns true if a connection correspondings to the tuple (required
  1003. for NAT). */
  1004. int
  1005. nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
  1006. const struct nf_conn *ignored_conntrack)
  1007. {
  1008. struct net *net = nf_ct_net(ignored_conntrack);
  1009. const struct nf_conntrack_zone *zone;
  1010. struct nf_conntrack_tuple_hash *h;
  1011. struct hlist_nulls_head *ct_hash;
  1012. unsigned int hash, hsize;
  1013. struct hlist_nulls_node *n;
  1014. struct nf_conn *ct;
  1015. zone = nf_ct_zone(ignored_conntrack);
  1016. rcu_read_lock();
  1017. begin:
  1018. nf_conntrack_get_ht(&ct_hash, &hsize);
  1019. hash = __hash_conntrack(net, tuple, hsize);
  1020. hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
  1021. ct = nf_ct_tuplehash_to_ctrack(h);
  1022. if (ct == ignored_conntrack)
  1023. continue;
  1024. if (nf_ct_is_expired(ct)) {
  1025. nf_ct_gc_expired(ct);
  1026. continue;
  1027. }
  1028. if (nf_ct_key_equal(h, tuple, zone, net)) {
  1029. /* Tuple is taken already, so caller will need to find
  1030. * a new source port to use.
  1031. *
  1032. * Only exception:
  1033. * If the *original tuples* are identical, then both
  1034. * conntracks refer to the same flow.
  1035. * This is a rare situation, it can occur e.g. when
  1036. * more than one UDP packet is sent from same socket
  1037. * in different threads.
  1038. *
  1039. * Let nf_ct_resolve_clash() deal with this later.
  1040. */
  1041. if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
  1042. &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
  1043. nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
  1044. continue;
  1045. NF_CT_STAT_INC_ATOMIC(net, found);
  1046. rcu_read_unlock();
  1047. return 1;
  1048. }
  1049. }
  1050. if (get_nulls_value(n) != hash) {
  1051. NF_CT_STAT_INC_ATOMIC(net, search_restart);
  1052. goto begin;
  1053. }
  1054. rcu_read_unlock();
  1055. return 0;
  1056. }
  1057. EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
  1058. #define NF_CT_EVICTION_RANGE 8
  1059. /* There's a small race here where we may free a just-assured
  1060. connection. Too bad: we're in trouble anyway. */
  1061. static unsigned int early_drop_list(struct net *net,
  1062. struct hlist_nulls_head *head)
  1063. {
  1064. struct nf_conntrack_tuple_hash *h;
  1065. struct hlist_nulls_node *n;
  1066. unsigned int drops = 0;
  1067. struct nf_conn *tmp;
  1068. hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
  1069. tmp = nf_ct_tuplehash_to_ctrack(h);
  1070. if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
  1071. continue;
  1072. if (nf_ct_is_expired(tmp)) {
  1073. nf_ct_gc_expired(tmp);
  1074. continue;
  1075. }
  1076. if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
  1077. !net_eq(nf_ct_net(tmp), net) ||
  1078. nf_ct_is_dying(tmp))
  1079. continue;
  1080. if (!atomic_inc_not_zero(&tmp->ct_general.use))
  1081. continue;
  1082. /* kill only if still in same netns -- might have moved due to
  1083. * SLAB_TYPESAFE_BY_RCU rules.
  1084. *
  1085. * We steal the timer reference. If that fails timer has
  1086. * already fired or someone else deleted it. Just drop ref
  1087. * and move to next entry.
  1088. */
  1089. if (net_eq(nf_ct_net(tmp), net) &&
  1090. nf_ct_is_confirmed(tmp) &&
  1091. nf_ct_delete(tmp, 0, 0))
  1092. drops++;
  1093. nf_ct_put(tmp);
  1094. }
  1095. return drops;
  1096. }
  1097. static noinline int early_drop(struct net *net, unsigned int hash)
  1098. {
  1099. unsigned int i, bucket;
  1100. for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
  1101. struct hlist_nulls_head *ct_hash;
  1102. unsigned int hsize, drops;
  1103. rcu_read_lock();
  1104. nf_conntrack_get_ht(&ct_hash, &hsize);
  1105. if (!i)
  1106. bucket = reciprocal_scale(hash, hsize);
  1107. else
  1108. bucket = (bucket + 1) % hsize;
  1109. drops = early_drop_list(net, &ct_hash[bucket]);
  1110. rcu_read_unlock();
  1111. if (drops) {
  1112. NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
  1113. return true;
  1114. }
  1115. }
  1116. return false;
  1117. }
  1118. static bool gc_worker_skip_ct(const struct nf_conn *ct)
  1119. {
  1120. return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
  1121. }
  1122. static bool gc_worker_can_early_drop(const struct nf_conn *ct)
  1123. {
  1124. const struct nf_conntrack_l4proto *l4proto;
  1125. if (!test_bit(IPS_ASSURED_BIT, &ct->status))
  1126. return true;
  1127. l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
  1128. if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
  1129. return true;
  1130. return false;
  1131. }
  1132. static void gc_worker(struct work_struct *work)
  1133. {
  1134. unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
  1135. unsigned int i, hashsz, nf_conntrack_max95 = 0;
  1136. unsigned long next_run = GC_SCAN_INTERVAL;
  1137. struct conntrack_gc_work *gc_work;
  1138. gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
  1139. i = gc_work->next_bucket;
  1140. if (gc_work->early_drop)
  1141. nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
  1142. do {
  1143. struct nf_conntrack_tuple_hash *h;
  1144. struct hlist_nulls_head *ct_hash;
  1145. struct hlist_nulls_node *n;
  1146. struct nf_conn *tmp;
  1147. rcu_read_lock();
  1148. nf_conntrack_get_ht(&ct_hash, &hashsz);
  1149. if (i >= hashsz) {
  1150. rcu_read_unlock();
  1151. break;
  1152. }
  1153. hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
  1154. struct net *net;
  1155. tmp = nf_ct_tuplehash_to_ctrack(h);
  1156. if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
  1157. nf_ct_offload_timeout(tmp);
  1158. continue;
  1159. }
  1160. if (nf_ct_is_expired(tmp)) {
  1161. nf_ct_gc_expired(tmp);
  1162. continue;
  1163. }
  1164. if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
  1165. continue;
  1166. net = nf_ct_net(tmp);
  1167. if (atomic_read(&net->ct.count) < nf_conntrack_max95)
  1168. continue;
  1169. /* need to take reference to avoid possible races */
  1170. if (!atomic_inc_not_zero(&tmp->ct_general.use))
  1171. continue;
  1172. if (gc_worker_skip_ct(tmp)) {
  1173. nf_ct_put(tmp);
  1174. continue;
  1175. }
  1176. if (gc_worker_can_early_drop(tmp))
  1177. nf_ct_kill(tmp);
  1178. nf_ct_put(tmp);
  1179. }
  1180. /* could check get_nulls_value() here and restart if ct
  1181. * was moved to another chain. But given gc is best-effort
  1182. * we will just continue with next hash slot.
  1183. */
  1184. rcu_read_unlock();
  1185. cond_resched();
  1186. i++;
  1187. if (time_after(jiffies, end_time) && i < hashsz) {
  1188. gc_work->next_bucket = i;
  1189. next_run = 0;
  1190. break;
  1191. }
  1192. } while (i < hashsz);
  1193. if (gc_work->exiting)
  1194. return;
  1195. /*
  1196. * Eviction will normally happen from the packet path, and not
  1197. * from this gc worker.
  1198. *
  1199. * This worker is only here to reap expired entries when system went
  1200. * idle after a busy period.
  1201. */
  1202. if (next_run) {
  1203. gc_work->early_drop = false;
  1204. gc_work->next_bucket = 0;
  1205. }
  1206. queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
  1207. }
  1208. static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
  1209. {
  1210. INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
  1211. gc_work->exiting = false;
  1212. }
  1213. static struct nf_conn *
  1214. __nf_conntrack_alloc(struct net *net,
  1215. const struct nf_conntrack_zone *zone,
  1216. const struct nf_conntrack_tuple *orig,
  1217. const struct nf_conntrack_tuple *repl,
  1218. gfp_t gfp, u32 hash)
  1219. {
  1220. struct nf_conn *ct;
  1221. /* We don't want any race condition at early drop stage */
  1222. atomic_inc(&net->ct.count);
  1223. if (nf_conntrack_max &&
  1224. unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
  1225. if (!early_drop(net, hash)) {
  1226. if (!conntrack_gc_work.early_drop)
  1227. conntrack_gc_work.early_drop = true;
  1228. atomic_dec(&net->ct.count);
  1229. net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
  1230. return ERR_PTR(-ENOMEM);
  1231. }
  1232. }
  1233. /*
  1234. * Do not use kmem_cache_zalloc(), as this cache uses
  1235. * SLAB_TYPESAFE_BY_RCU.
  1236. */
  1237. ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
  1238. if (ct == NULL)
  1239. goto out;
  1240. spin_lock_init(&ct->lock);
  1241. ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
  1242. ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
  1243. ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
  1244. /* save hash for reusing when confirming */
  1245. *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
  1246. ct->status = 0;
  1247. WRITE_ONCE(ct->timeout, 0);
  1248. write_pnet(&ct->ct_net, net);
  1249. memset(&ct->__nfct_init_offset, 0,
  1250. offsetof(struct nf_conn, proto) -
  1251. offsetof(struct nf_conn, __nfct_init_offset));
  1252. nf_ct_zone_add(ct, zone);
  1253. /* Because we use RCU lookups, we set ct_general.use to zero before
  1254. * this is inserted in any list.
  1255. */
  1256. atomic_set(&ct->ct_general.use, 0);
  1257. return ct;
  1258. out:
  1259. atomic_dec(&net->ct.count);
  1260. return ERR_PTR(-ENOMEM);
  1261. }
  1262. struct nf_conn *nf_conntrack_alloc(struct net *net,
  1263. const struct nf_conntrack_zone *zone,
  1264. const struct nf_conntrack_tuple *orig,
  1265. const struct nf_conntrack_tuple *repl,
  1266. gfp_t gfp)
  1267. {
  1268. return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
  1269. }
  1270. EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
  1271. void nf_conntrack_free(struct nf_conn *ct)
  1272. {
  1273. struct net *net = nf_ct_net(ct);
  1274. /* A freed object has refcnt == 0, that's
  1275. * the golden rule for SLAB_TYPESAFE_BY_RCU
  1276. */
  1277. WARN_ON(atomic_read(&ct->ct_general.use) != 0);
  1278. nf_ct_ext_destroy(ct);
  1279. kmem_cache_free(nf_conntrack_cachep, ct);
  1280. smp_mb__before_atomic();
  1281. atomic_dec(&net->ct.count);
  1282. }
  1283. EXPORT_SYMBOL_GPL(nf_conntrack_free);
  1284. /* Allocate a new conntrack: we return -ENOMEM if classification
  1285. failed due to stress. Otherwise it really is unclassifiable. */
  1286. static noinline struct nf_conntrack_tuple_hash *
  1287. init_conntrack(struct net *net, struct nf_conn *tmpl,
  1288. const struct nf_conntrack_tuple *tuple,
  1289. struct sk_buff *skb,
  1290. unsigned int dataoff, u32 hash)
  1291. {
  1292. struct nf_conn *ct;
  1293. struct nf_conn_help *help;
  1294. struct nf_conntrack_tuple repl_tuple;
  1295. struct nf_conntrack_ecache *ecache;
  1296. struct nf_conntrack_expect *exp = NULL;
  1297. const struct nf_conntrack_zone *zone;
  1298. struct nf_conn_timeout *timeout_ext;
  1299. struct nf_conntrack_zone tmp;
  1300. if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
  1301. pr_debug("Can't invert tuple.\n");
  1302. return NULL;
  1303. }
  1304. zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
  1305. ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
  1306. hash);
  1307. if (IS_ERR(ct))
  1308. return (struct nf_conntrack_tuple_hash *)ct;
  1309. if (!nf_ct_add_synproxy(ct, tmpl)) {
  1310. nf_conntrack_free(ct);
  1311. return ERR_PTR(-ENOMEM);
  1312. }
  1313. timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
  1314. if (timeout_ext)
  1315. nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
  1316. GFP_ATOMIC);
  1317. nf_ct_acct_ext_add(ct, GFP_ATOMIC);
  1318. nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
  1319. nf_ct_labels_ext_add(ct);
  1320. ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
  1321. nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
  1322. ecache ? ecache->expmask : 0,
  1323. GFP_ATOMIC);
  1324. local_bh_disable();
  1325. if (net->ct.expect_count) {
  1326. spin_lock(&nf_conntrack_expect_lock);
  1327. exp = nf_ct_find_expectation(net, zone, tuple);
  1328. if (exp) {
  1329. pr_debug("expectation arrives ct=%p exp=%p\n",
  1330. ct, exp);
  1331. /* Welcome, Mr. Bond. We've been expecting you... */
  1332. __set_bit(IPS_EXPECTED_BIT, &ct->status);
  1333. /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
  1334. ct->master = exp->master;
  1335. if (exp->helper) {
  1336. help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
  1337. if (help)
  1338. rcu_assign_pointer(help->helper, exp->helper);
  1339. }
  1340. #ifdef CONFIG_NF_CONNTRACK_MARK
  1341. ct->mark = exp->master->mark;
  1342. #endif
  1343. #ifdef CONFIG_NF_CONNTRACK_SECMARK
  1344. ct->secmark = exp->master->secmark;
  1345. #endif
  1346. NF_CT_STAT_INC(net, expect_new);
  1347. }
  1348. spin_unlock(&nf_conntrack_expect_lock);
  1349. }
  1350. if (!exp)
  1351. __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
  1352. /* Now it is inserted into the unconfirmed list, bump refcount */
  1353. nf_conntrack_get(&ct->ct_general);
  1354. nf_ct_add_to_unconfirmed_list(ct);
  1355. local_bh_enable();
  1356. if (exp) {
  1357. if (exp->expectfn)
  1358. exp->expectfn(ct, exp);
  1359. nf_ct_expect_put(exp);
  1360. }
  1361. return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
  1362. }
  1363. /* On success, returns 0, sets skb->_nfct | ctinfo */
  1364. static int
  1365. resolve_normal_ct(struct nf_conn *tmpl,
  1366. struct sk_buff *skb,
  1367. unsigned int dataoff,
  1368. u_int8_t protonum,
  1369. const struct nf_hook_state *state)
  1370. {
  1371. const struct nf_conntrack_zone *zone;
  1372. struct nf_conntrack_tuple tuple;
  1373. struct nf_conntrack_tuple_hash *h;
  1374. enum ip_conntrack_info ctinfo;
  1375. struct nf_conntrack_zone tmp;
  1376. struct nf_conn *ct;
  1377. u32 hash;
  1378. if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
  1379. dataoff, state->pf, protonum, state->net,
  1380. &tuple)) {
  1381. pr_debug("Can't get tuple\n");
  1382. return 0;
  1383. }
  1384. /* look for tuple match */
  1385. zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
  1386. hash = hash_conntrack_raw(&tuple, state->net);
  1387. h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
  1388. if (!h) {
  1389. h = init_conntrack(state->net, tmpl, &tuple,
  1390. skb, dataoff, hash);
  1391. if (!h)
  1392. return 0;
  1393. if (IS_ERR(h))
  1394. return PTR_ERR(h);
  1395. }
  1396. ct = nf_ct_tuplehash_to_ctrack(h);
  1397. /* It exists; we have (non-exclusive) reference. */
  1398. if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
  1399. ctinfo = IP_CT_ESTABLISHED_REPLY;
  1400. } else {
  1401. /* Once we've had two way comms, always ESTABLISHED. */
  1402. if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
  1403. pr_debug("normal packet for %p\n", ct);
  1404. ctinfo = IP_CT_ESTABLISHED;
  1405. } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
  1406. pr_debug("related packet for %p\n", ct);
  1407. ctinfo = IP_CT_RELATED;
  1408. } else {
  1409. pr_debug("new packet for %p\n", ct);
  1410. ctinfo = IP_CT_NEW;
  1411. }
  1412. }
  1413. nf_ct_set(skb, ct, ctinfo);
  1414. return 0;
  1415. }
  1416. /*
  1417. * icmp packets need special treatment to handle error messages that are
  1418. * related to a connection.
  1419. *
  1420. * Callers need to check if skb has a conntrack assigned when this
  1421. * helper returns; in such case skb belongs to an already known connection.
  1422. */
  1423. static unsigned int __cold
  1424. nf_conntrack_handle_icmp(struct nf_conn *tmpl,
  1425. struct sk_buff *skb,
  1426. unsigned int dataoff,
  1427. u8 protonum,
  1428. const struct nf_hook_state *state)
  1429. {
  1430. int ret;
  1431. if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
  1432. ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
  1433. #if IS_ENABLED(CONFIG_IPV6)
  1434. else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
  1435. ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
  1436. #endif
  1437. else
  1438. return NF_ACCEPT;
  1439. if (ret <= 0)
  1440. NF_CT_STAT_INC_ATOMIC(state->net, error);
  1441. return ret;
  1442. }
  1443. static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
  1444. enum ip_conntrack_info ctinfo)
  1445. {
  1446. const unsigned int *timeout = nf_ct_timeout_lookup(ct);
  1447. if (!timeout)
  1448. timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
  1449. nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
  1450. return NF_ACCEPT;
  1451. }
  1452. /* Returns verdict for packet, or -1 for invalid. */
  1453. static int nf_conntrack_handle_packet(struct nf_conn *ct,
  1454. struct sk_buff *skb,
  1455. unsigned int dataoff,
  1456. enum ip_conntrack_info ctinfo,
  1457. const struct nf_hook_state *state)
  1458. {
  1459. switch (nf_ct_protonum(ct)) {
  1460. case IPPROTO_TCP:
  1461. return nf_conntrack_tcp_packet(ct, skb, dataoff,
  1462. ctinfo, state);
  1463. case IPPROTO_UDP:
  1464. return nf_conntrack_udp_packet(ct, skb, dataoff,
  1465. ctinfo, state);
  1466. case IPPROTO_ICMP:
  1467. return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
  1468. #if IS_ENABLED(CONFIG_IPV6)
  1469. case IPPROTO_ICMPV6:
  1470. return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
  1471. #endif
  1472. #ifdef CONFIG_NF_CT_PROTO_UDPLITE
  1473. case IPPROTO_UDPLITE:
  1474. return nf_conntrack_udplite_packet(ct, skb, dataoff,
  1475. ctinfo, state);
  1476. #endif
  1477. #ifdef CONFIG_NF_CT_PROTO_SCTP
  1478. case IPPROTO_SCTP:
  1479. return nf_conntrack_sctp_packet(ct, skb, dataoff,
  1480. ctinfo, state);
  1481. #endif
  1482. #ifdef CONFIG_NF_CT_PROTO_DCCP
  1483. case IPPROTO_DCCP:
  1484. return nf_conntrack_dccp_packet(ct, skb, dataoff,
  1485. ctinfo, state);
  1486. #endif
  1487. #ifdef CONFIG_NF_CT_PROTO_GRE
  1488. case IPPROTO_GRE:
  1489. return nf_conntrack_gre_packet(ct, skb, dataoff,
  1490. ctinfo, state);
  1491. #endif
  1492. }
  1493. return generic_packet(ct, skb, ctinfo);
  1494. }
  1495. unsigned int
  1496. nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
  1497. {
  1498. enum ip_conntrack_info ctinfo;
  1499. struct nf_conn *ct, *tmpl;
  1500. u_int8_t protonum;
  1501. int dataoff, ret;
  1502. tmpl = nf_ct_get(skb, &ctinfo);
  1503. if (tmpl || ctinfo == IP_CT_UNTRACKED) {
  1504. /* Previously seen (loopback or untracked)? Ignore. */
  1505. if ((tmpl && !nf_ct_is_template(tmpl)) ||
  1506. ctinfo == IP_CT_UNTRACKED)
  1507. return NF_ACCEPT;
  1508. skb->_nfct = 0;
  1509. }
  1510. /* rcu_read_lock()ed by nf_hook_thresh */
  1511. dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
  1512. if (dataoff <= 0) {
  1513. pr_debug("not prepared to track yet or error occurred\n");
  1514. NF_CT_STAT_INC_ATOMIC(state->net, invalid);
  1515. ret = NF_ACCEPT;
  1516. goto out;
  1517. }
  1518. if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
  1519. ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
  1520. protonum, state);
  1521. if (ret <= 0) {
  1522. ret = -ret;
  1523. goto out;
  1524. }
  1525. /* ICMP[v6] protocol trackers may assign one conntrack. */
  1526. if (skb->_nfct)
  1527. goto out;
  1528. }
  1529. repeat:
  1530. ret = resolve_normal_ct(tmpl, skb, dataoff,
  1531. protonum, state);
  1532. if (ret < 0) {
  1533. /* Too stressed to deal. */
  1534. NF_CT_STAT_INC_ATOMIC(state->net, drop);
  1535. ret = NF_DROP;
  1536. goto out;
  1537. }
  1538. ct = nf_ct_get(skb, &ctinfo);
  1539. if (!ct) {
  1540. /* Not valid part of a connection */
  1541. NF_CT_STAT_INC_ATOMIC(state->net, invalid);
  1542. ret = NF_ACCEPT;
  1543. goto out;
  1544. }
  1545. ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
  1546. if (ret <= 0) {
  1547. /* Invalid: inverse of the return code tells
  1548. * the netfilter core what to do */
  1549. pr_debug("nf_conntrack_in: Can't track with proto module\n");
  1550. nf_conntrack_put(&ct->ct_general);
  1551. skb->_nfct = 0;
  1552. /* Special case: TCP tracker reports an attempt to reopen a
  1553. * closed/aborted connection. We have to go back and create a
  1554. * fresh conntrack.
  1555. */
  1556. if (ret == -NF_REPEAT)
  1557. goto repeat;
  1558. NF_CT_STAT_INC_ATOMIC(state->net, invalid);
  1559. if (ret == -NF_DROP)
  1560. NF_CT_STAT_INC_ATOMIC(state->net, drop);
  1561. ret = -ret;
  1562. goto out;
  1563. }
  1564. if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
  1565. !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
  1566. nf_conntrack_event_cache(IPCT_REPLY, ct);
  1567. out:
  1568. if (tmpl)
  1569. nf_ct_put(tmpl);
  1570. return ret;
  1571. }
  1572. EXPORT_SYMBOL_GPL(nf_conntrack_in);
  1573. /* Alter reply tuple (maybe alter helper). This is for NAT, and is
  1574. implicitly racy: see __nf_conntrack_confirm */
  1575. void nf_conntrack_alter_reply(struct nf_conn *ct,
  1576. const struct nf_conntrack_tuple *newreply)
  1577. {
  1578. struct nf_conn_help *help = nfct_help(ct);
  1579. /* Should be unconfirmed, so not in hash table yet */
  1580. WARN_ON(nf_ct_is_confirmed(ct));
  1581. pr_debug("Altering reply tuple of %p to ", ct);
  1582. nf_ct_dump_tuple(newreply);
  1583. ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
  1584. if (ct->master || (help && !hlist_empty(&help->expectations)))
  1585. return;
  1586. rcu_read_lock();
  1587. __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
  1588. rcu_read_unlock();
  1589. }
  1590. EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
  1591. /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
  1592. void __nf_ct_refresh_acct(struct nf_conn *ct,
  1593. enum ip_conntrack_info ctinfo,
  1594. const struct sk_buff *skb,
  1595. u32 extra_jiffies,
  1596. bool do_acct)
  1597. {
  1598. /* Only update if this is not a fixed timeout */
  1599. if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
  1600. goto acct;
  1601. /* If not in hash table, timer will not be active yet */
  1602. if (nf_ct_is_confirmed(ct))
  1603. extra_jiffies += nfct_time_stamp;
  1604. if (READ_ONCE(ct->timeout) != extra_jiffies)
  1605. WRITE_ONCE(ct->timeout, extra_jiffies);
  1606. acct:
  1607. if (do_acct)
  1608. nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
  1609. }
  1610. EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
  1611. bool nf_ct_kill_acct(struct nf_conn *ct,
  1612. enum ip_conntrack_info ctinfo,
  1613. const struct sk_buff *skb)
  1614. {
  1615. nf_ct_acct_update(ct, CTINFO2DIR(ctinfo), skb->len);
  1616. return nf_ct_delete(ct, 0, 0);
  1617. }
  1618. EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
  1619. #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
  1620. #include <linux/netfilter/nfnetlink.h>
  1621. #include <linux/netfilter/nfnetlink_conntrack.h>
  1622. #include <linux/mutex.h>
  1623. /* Generic function for tcp/udp/sctp/dccp and alike. */
  1624. int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
  1625. const struct nf_conntrack_tuple *tuple)
  1626. {
  1627. if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
  1628. nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
  1629. goto nla_put_failure;
  1630. return 0;
  1631. nla_put_failure:
  1632. return -1;
  1633. }
  1634. EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
  1635. const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
  1636. [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
  1637. [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
  1638. };
  1639. EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
  1640. int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
  1641. struct nf_conntrack_tuple *t,
  1642. u_int32_t flags)
  1643. {
  1644. if (flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) {
  1645. if (!tb[CTA_PROTO_SRC_PORT])
  1646. return -EINVAL;
  1647. t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
  1648. }
  1649. if (flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) {
  1650. if (!tb[CTA_PROTO_DST_PORT])
  1651. return -EINVAL;
  1652. t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
  1653. }
  1654. return 0;
  1655. }
  1656. EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
  1657. unsigned int nf_ct_port_nlattr_tuple_size(void)
  1658. {
  1659. static unsigned int size __read_mostly;
  1660. if (!size)
  1661. size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
  1662. return size;
  1663. }
  1664. EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
  1665. #endif
  1666. /* Used by ipt_REJECT and ip6t_REJECT. */
  1667. static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
  1668. {
  1669. struct nf_conn *ct;
  1670. enum ip_conntrack_info ctinfo;
  1671. /* This ICMP is in reverse direction to the packet which caused it */
  1672. ct = nf_ct_get(skb, &ctinfo);
  1673. if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
  1674. ctinfo = IP_CT_RELATED_REPLY;
  1675. else
  1676. ctinfo = IP_CT_RELATED;
  1677. /* Attach to new skbuff, and increment count */
  1678. nf_ct_set(nskb, ct, ctinfo);
  1679. nf_conntrack_get(skb_nfct(nskb));
  1680. }
  1681. static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
  1682. struct nf_conn *ct,
  1683. enum ip_conntrack_info ctinfo)
  1684. {
  1685. struct nf_conntrack_tuple_hash *h;
  1686. struct nf_conntrack_tuple tuple;
  1687. struct nf_nat_hook *nat_hook;
  1688. unsigned int status;
  1689. int dataoff;
  1690. u16 l3num;
  1691. u8 l4num;
  1692. l3num = nf_ct_l3num(ct);
  1693. dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
  1694. if (dataoff <= 0)
  1695. return -1;
  1696. if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
  1697. l4num, net, &tuple))
  1698. return -1;
  1699. if (ct->status & IPS_SRC_NAT) {
  1700. memcpy(tuple.src.u3.all,
  1701. ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
  1702. sizeof(tuple.src.u3.all));
  1703. tuple.src.u.all =
  1704. ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
  1705. }
  1706. if (ct->status & IPS_DST_NAT) {
  1707. memcpy(tuple.dst.u3.all,
  1708. ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
  1709. sizeof(tuple.dst.u3.all));
  1710. tuple.dst.u.all =
  1711. ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
  1712. }
  1713. h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
  1714. if (!h)
  1715. return 0;
  1716. /* Store status bits of the conntrack that is clashing to re-do NAT
  1717. * mangling according to what it has been done already to this packet.
  1718. */
  1719. status = ct->status;
  1720. nf_ct_put(ct);
  1721. ct = nf_ct_tuplehash_to_ctrack(h);
  1722. nf_ct_set(skb, ct, ctinfo);
  1723. nat_hook = rcu_dereference(nf_nat_hook);
  1724. if (!nat_hook)
  1725. return 0;
  1726. if (status & IPS_SRC_NAT &&
  1727. nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
  1728. IP_CT_DIR_ORIGINAL) == NF_DROP)
  1729. return -1;
  1730. if (status & IPS_DST_NAT &&
  1731. nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
  1732. IP_CT_DIR_ORIGINAL) == NF_DROP)
  1733. return -1;
  1734. return 0;
  1735. }
  1736. /* This packet is coming from userspace via nf_queue, complete the packet
  1737. * processing after the helper invocation in nf_confirm().
  1738. */
  1739. static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
  1740. enum ip_conntrack_info ctinfo)
  1741. {
  1742. const struct nf_conntrack_helper *helper;
  1743. const struct nf_conn_help *help;
  1744. int protoff;
  1745. help = nfct_help(ct);
  1746. if (!help)
  1747. return 0;
  1748. helper = rcu_dereference(help->helper);
  1749. if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
  1750. return 0;
  1751. switch (nf_ct_l3num(ct)) {
  1752. case NFPROTO_IPV4:
  1753. protoff = skb_network_offset(skb) + ip_hdrlen(skb);
  1754. break;
  1755. #if IS_ENABLED(CONFIG_IPV6)
  1756. case NFPROTO_IPV6: {
  1757. __be16 frag_off;
  1758. u8 pnum;
  1759. pnum = ipv6_hdr(skb)->nexthdr;
  1760. protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
  1761. &frag_off);
  1762. if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
  1763. return 0;
  1764. break;
  1765. }
  1766. #endif
  1767. default:
  1768. return 0;
  1769. }
  1770. if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
  1771. !nf_is_loopback_packet(skb)) {
  1772. if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
  1773. NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
  1774. return -1;
  1775. }
  1776. }
  1777. /* We've seen it coming out the other side: confirm it */
  1778. return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
  1779. }
  1780. static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
  1781. {
  1782. enum ip_conntrack_info ctinfo;
  1783. struct nf_conn *ct;
  1784. int err;
  1785. ct = nf_ct_get(skb, &ctinfo);
  1786. if (!ct)
  1787. return 0;
  1788. if (!nf_ct_is_confirmed(ct)) {
  1789. err = __nf_conntrack_update(net, skb, ct, ctinfo);
  1790. if (err < 0)
  1791. return err;
  1792. ct = nf_ct_get(skb, &ctinfo);
  1793. }
  1794. return nf_confirm_cthelper(skb, ct, ctinfo);
  1795. }
  1796. static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
  1797. const struct sk_buff *skb)
  1798. {
  1799. const struct nf_conntrack_tuple *src_tuple;
  1800. const struct nf_conntrack_tuple_hash *hash;
  1801. struct nf_conntrack_tuple srctuple;
  1802. enum ip_conntrack_info ctinfo;
  1803. struct nf_conn *ct;
  1804. ct = nf_ct_get(skb, &ctinfo);
  1805. if (ct) {
  1806. src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
  1807. memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
  1808. return true;
  1809. }
  1810. if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
  1811. NFPROTO_IPV4, dev_net(skb->dev),
  1812. &srctuple))
  1813. return false;
  1814. hash = nf_conntrack_find_get(dev_net(skb->dev),
  1815. &nf_ct_zone_dflt,
  1816. &srctuple);
  1817. if (!hash)
  1818. return false;
  1819. ct = nf_ct_tuplehash_to_ctrack(hash);
  1820. src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
  1821. memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
  1822. nf_ct_put(ct);
  1823. return true;
  1824. }
  1825. /* Bring out ya dead! */
  1826. static struct nf_conn *
  1827. get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
  1828. void *data, unsigned int *bucket)
  1829. {
  1830. struct nf_conntrack_tuple_hash *h;
  1831. struct nf_conn *ct;
  1832. struct hlist_nulls_node *n;
  1833. spinlock_t *lockp;
  1834. for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
  1835. struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
  1836. if (hlist_nulls_empty(hslot))
  1837. continue;
  1838. lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
  1839. local_bh_disable();
  1840. nf_conntrack_lock(lockp);
  1841. hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
  1842. if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
  1843. continue;
  1844. /* All nf_conn objects are added to hash table twice, one
  1845. * for original direction tuple, once for the reply tuple.
  1846. *
  1847. * Exception: In the IPS_NAT_CLASH case, only the reply
  1848. * tuple is added (the original tuple already existed for
  1849. * a different object).
  1850. *
  1851. * We only need to call the iterator once for each
  1852. * conntrack, so we just use the 'reply' direction
  1853. * tuple while iterating.
  1854. */
  1855. ct = nf_ct_tuplehash_to_ctrack(h);
  1856. if (iter(ct, data))
  1857. goto found;
  1858. }
  1859. spin_unlock(lockp);
  1860. local_bh_enable();
  1861. cond_resched();
  1862. }
  1863. return NULL;
  1864. found:
  1865. atomic_inc(&ct->ct_general.use);
  1866. spin_unlock(lockp);
  1867. local_bh_enable();
  1868. return ct;
  1869. }
  1870. static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
  1871. void *data, u32 portid, int report)
  1872. {
  1873. unsigned int bucket = 0;
  1874. struct nf_conn *ct;
  1875. might_sleep();
  1876. mutex_lock(&nf_conntrack_mutex);
  1877. while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
  1878. /* Time to push up daises... */
  1879. nf_ct_delete(ct, portid, report);
  1880. nf_ct_put(ct);
  1881. cond_resched();
  1882. }
  1883. mutex_unlock(&nf_conntrack_mutex);
  1884. }
  1885. struct iter_data {
  1886. int (*iter)(struct nf_conn *i, void *data);
  1887. void *data;
  1888. struct net *net;
  1889. };
  1890. static int iter_net_only(struct nf_conn *i, void *data)
  1891. {
  1892. struct iter_data *d = data;
  1893. if (!net_eq(d->net, nf_ct_net(i)))
  1894. return 0;
  1895. return d->iter(i, d->data);
  1896. }
  1897. static void
  1898. __nf_ct_unconfirmed_destroy(struct net *net)
  1899. {
  1900. int cpu;
  1901. for_each_possible_cpu(cpu) {
  1902. struct nf_conntrack_tuple_hash *h;
  1903. struct hlist_nulls_node *n;
  1904. struct ct_pcpu *pcpu;
  1905. pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
  1906. spin_lock_bh(&pcpu->lock);
  1907. hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
  1908. struct nf_conn *ct;
  1909. ct = nf_ct_tuplehash_to_ctrack(h);
  1910. /* we cannot call iter() on unconfirmed list, the
  1911. * owning cpu can reallocate ct->ext at any time.
  1912. */
  1913. set_bit(IPS_DYING_BIT, &ct->status);
  1914. }
  1915. spin_unlock_bh(&pcpu->lock);
  1916. cond_resched();
  1917. }
  1918. }
  1919. void nf_ct_unconfirmed_destroy(struct net *net)
  1920. {
  1921. might_sleep();
  1922. if (atomic_read(&net->ct.count) > 0) {
  1923. __nf_ct_unconfirmed_destroy(net);
  1924. nf_queue_nf_hook_drop(net);
  1925. synchronize_net();
  1926. }
  1927. }
  1928. EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
  1929. void nf_ct_iterate_cleanup_net(struct net *net,
  1930. int (*iter)(struct nf_conn *i, void *data),
  1931. void *data, u32 portid, int report)
  1932. {
  1933. struct iter_data d;
  1934. might_sleep();
  1935. if (atomic_read(&net->ct.count) == 0)
  1936. return;
  1937. d.iter = iter;
  1938. d.data = data;
  1939. d.net = net;
  1940. nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
  1941. }
  1942. EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
  1943. /**
  1944. * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
  1945. * @iter: callback to invoke for each conntrack
  1946. * @data: data to pass to @iter
  1947. *
  1948. * Like nf_ct_iterate_cleanup, but first marks conntracks on the
  1949. * unconfirmed list as dying (so they will not be inserted into
  1950. * main table).
  1951. *
  1952. * Can only be called in module exit path.
  1953. */
  1954. void
  1955. nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
  1956. {
  1957. struct net *net;
  1958. down_read(&net_rwsem);
  1959. for_each_net(net) {
  1960. if (atomic_read(&net->ct.count) == 0)
  1961. continue;
  1962. __nf_ct_unconfirmed_destroy(net);
  1963. nf_queue_nf_hook_drop(net);
  1964. }
  1965. up_read(&net_rwsem);
  1966. /* Need to wait for netns cleanup worker to finish, if its
  1967. * running -- it might have deleted a net namespace from
  1968. * the global list, so our __nf_ct_unconfirmed_destroy() might
  1969. * not have affected all namespaces.
  1970. */
  1971. net_ns_barrier();
  1972. /* a conntrack could have been unlinked from unconfirmed list
  1973. * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
  1974. * This makes sure its inserted into conntrack table.
  1975. */
  1976. synchronize_net();
  1977. nf_ct_iterate_cleanup(iter, data, 0, 0);
  1978. }
  1979. EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
  1980. static int kill_all(struct nf_conn *i, void *data)
  1981. {
  1982. return net_eq(nf_ct_net(i), data);
  1983. }
  1984. void nf_conntrack_cleanup_start(void)
  1985. {
  1986. conntrack_gc_work.exiting = true;
  1987. RCU_INIT_POINTER(ip_ct_attach, NULL);
  1988. }
  1989. void nf_conntrack_cleanup_end(void)
  1990. {
  1991. RCU_INIT_POINTER(nf_ct_hook, NULL);
  1992. cancel_delayed_work_sync(&conntrack_gc_work.dwork);
  1993. kvfree(nf_conntrack_hash);
  1994. nf_conntrack_proto_fini();
  1995. nf_conntrack_seqadj_fini();
  1996. nf_conntrack_labels_fini();
  1997. nf_conntrack_helper_fini();
  1998. nf_conntrack_timeout_fini();
  1999. nf_conntrack_ecache_fini();
  2000. nf_conntrack_tstamp_fini();
  2001. nf_conntrack_acct_fini();
  2002. nf_conntrack_expect_fini();
  2003. kmem_cache_destroy(nf_conntrack_cachep);
  2004. }
  2005. /*
  2006. * Mishearing the voices in his head, our hero wonders how he's
  2007. * supposed to kill the mall.
  2008. */
  2009. void nf_conntrack_cleanup_net(struct net *net)
  2010. {
  2011. LIST_HEAD(single);
  2012. list_add(&net->exit_list, &single);
  2013. nf_conntrack_cleanup_net_list(&single);
  2014. }
  2015. void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
  2016. {
  2017. int busy;
  2018. struct net *net;
  2019. /*
  2020. * This makes sure all current packets have passed through
  2021. * netfilter framework. Roll on, two-stage module
  2022. * delete...
  2023. */
  2024. synchronize_net();
  2025. i_see_dead_people:
  2026. busy = 0;
  2027. list_for_each_entry(net, net_exit_list, exit_list) {
  2028. nf_ct_iterate_cleanup(kill_all, net, 0, 0);
  2029. if (atomic_read(&net->ct.count) != 0)
  2030. busy = 1;
  2031. }
  2032. if (busy) {
  2033. schedule();
  2034. goto i_see_dead_people;
  2035. }
  2036. list_for_each_entry(net, net_exit_list, exit_list) {
  2037. nf_conntrack_proto_pernet_fini(net);
  2038. nf_conntrack_ecache_pernet_fini(net);
  2039. nf_conntrack_expect_pernet_fini(net);
  2040. free_percpu(net->ct.stat);
  2041. free_percpu(net->ct.pcpu_lists);
  2042. }
  2043. }
  2044. void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
  2045. {
  2046. struct hlist_nulls_head *hash;
  2047. unsigned int nr_slots, i;
  2048. if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
  2049. return NULL;
  2050. BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
  2051. nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
  2052. hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
  2053. if (hash && nulls)
  2054. for (i = 0; i < nr_slots; i++)
  2055. INIT_HLIST_NULLS_HEAD(&hash[i], i);
  2056. return hash;
  2057. }
  2058. EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
  2059. int nf_conntrack_hash_resize(unsigned int hashsize)
  2060. {
  2061. int i, bucket;
  2062. unsigned int old_size;
  2063. struct hlist_nulls_head *hash, *old_hash;
  2064. struct nf_conntrack_tuple_hash *h;
  2065. struct nf_conn *ct;
  2066. if (!hashsize)
  2067. return -EINVAL;
  2068. hash = nf_ct_alloc_hashtable(&hashsize, 1);
  2069. if (!hash)
  2070. return -ENOMEM;
  2071. mutex_lock(&nf_conntrack_mutex);
  2072. old_size = nf_conntrack_htable_size;
  2073. if (old_size == hashsize) {
  2074. mutex_unlock(&nf_conntrack_mutex);
  2075. kvfree(hash);
  2076. return 0;
  2077. }
  2078. local_bh_disable();
  2079. nf_conntrack_all_lock();
  2080. write_seqcount_begin(&nf_conntrack_generation);
  2081. /* Lookups in the old hash might happen in parallel, which means we
  2082. * might get false negatives during connection lookup. New connections
  2083. * created because of a false negative won't make it into the hash
  2084. * though since that required taking the locks.
  2085. */
  2086. for (i = 0; i < nf_conntrack_htable_size; i++) {
  2087. while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
  2088. h = hlist_nulls_entry(nf_conntrack_hash[i].first,
  2089. struct nf_conntrack_tuple_hash, hnnode);
  2090. ct = nf_ct_tuplehash_to_ctrack(h);
  2091. hlist_nulls_del_rcu(&h->hnnode);
  2092. bucket = __hash_conntrack(nf_ct_net(ct),
  2093. &h->tuple, hashsize);
  2094. hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
  2095. }
  2096. }
  2097. old_size = nf_conntrack_htable_size;
  2098. old_hash = nf_conntrack_hash;
  2099. nf_conntrack_hash = hash;
  2100. nf_conntrack_htable_size = hashsize;
  2101. write_seqcount_end(&nf_conntrack_generation);
  2102. nf_conntrack_all_unlock();
  2103. local_bh_enable();
  2104. mutex_unlock(&nf_conntrack_mutex);
  2105. synchronize_net();
  2106. kvfree(old_hash);
  2107. return 0;
  2108. }
  2109. int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
  2110. {
  2111. unsigned int hashsize;
  2112. int rc;
  2113. if (current->nsproxy->net_ns != &init_net)
  2114. return -EOPNOTSUPP;
  2115. /* On boot, we can set this without any fancy locking. */
  2116. if (!nf_conntrack_hash)
  2117. return param_set_uint(val, kp);
  2118. rc = kstrtouint(val, 0, &hashsize);
  2119. if (rc)
  2120. return rc;
  2121. return nf_conntrack_hash_resize(hashsize);
  2122. }
  2123. static __always_inline unsigned int total_extension_size(void)
  2124. {
  2125. /* remember to add new extensions below */
  2126. BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
  2127. return sizeof(struct nf_ct_ext) +
  2128. sizeof(struct nf_conn_help)
  2129. #if IS_ENABLED(CONFIG_NF_NAT)
  2130. + sizeof(struct nf_conn_nat)
  2131. #endif
  2132. + sizeof(struct nf_conn_seqadj)
  2133. + sizeof(struct nf_conn_acct)
  2134. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  2135. + sizeof(struct nf_conntrack_ecache)
  2136. #endif
  2137. #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
  2138. + sizeof(struct nf_conn_tstamp)
  2139. #endif
  2140. #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
  2141. + sizeof(struct nf_conn_timeout)
  2142. #endif
  2143. #ifdef CONFIG_NF_CONNTRACK_LABELS
  2144. + sizeof(struct nf_conn_labels)
  2145. #endif
  2146. #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
  2147. + sizeof(struct nf_conn_synproxy)
  2148. #endif
  2149. ;
  2150. };
  2151. int nf_conntrack_init_start(void)
  2152. {
  2153. unsigned long nr_pages = totalram_pages();
  2154. int max_factor = 8;
  2155. int ret = -ENOMEM;
  2156. int i;
  2157. /* struct nf_ct_ext uses u8 to store offsets/size */
  2158. BUILD_BUG_ON(total_extension_size() > 255u);
  2159. seqcount_spinlock_init(&nf_conntrack_generation,
  2160. &nf_conntrack_locks_all_lock);
  2161. for (i = 0; i < CONNTRACK_LOCKS; i++)
  2162. spin_lock_init(&nf_conntrack_locks[i]);
  2163. if (!nf_conntrack_htable_size) {
  2164. /* Idea from tcp.c: use 1/16384 of memory.
  2165. * On i386: 32MB machine has 512 buckets.
  2166. * >= 1GB machines have 16384 buckets.
  2167. * >= 4GB machines have 65536 buckets.
  2168. */
  2169. nf_conntrack_htable_size
  2170. = (((nr_pages << PAGE_SHIFT) / 16384)
  2171. / sizeof(struct hlist_head));
  2172. if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
  2173. nf_conntrack_htable_size = 65536;
  2174. else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
  2175. nf_conntrack_htable_size = 16384;
  2176. if (nf_conntrack_htable_size < 32)
  2177. nf_conntrack_htable_size = 32;
  2178. /* Use a max. factor of four by default to get the same max as
  2179. * with the old struct list_heads. When a table size is given
  2180. * we use the old value of 8 to avoid reducing the max.
  2181. * entries. */
  2182. max_factor = 4;
  2183. }
  2184. nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
  2185. if (!nf_conntrack_hash)
  2186. return -ENOMEM;
  2187. nf_conntrack_max = max_factor * nf_conntrack_htable_size;
  2188. nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
  2189. sizeof(struct nf_conn),
  2190. NFCT_INFOMASK + 1,
  2191. SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
  2192. if (!nf_conntrack_cachep)
  2193. goto err_cachep;
  2194. ret = nf_conntrack_expect_init();
  2195. if (ret < 0)
  2196. goto err_expect;
  2197. ret = nf_conntrack_acct_init();
  2198. if (ret < 0)
  2199. goto err_acct;
  2200. ret = nf_conntrack_tstamp_init();
  2201. if (ret < 0)
  2202. goto err_tstamp;
  2203. ret = nf_conntrack_ecache_init();
  2204. if (ret < 0)
  2205. goto err_ecache;
  2206. ret = nf_conntrack_timeout_init();
  2207. if (ret < 0)
  2208. goto err_timeout;
  2209. ret = nf_conntrack_helper_init();
  2210. if (ret < 0)
  2211. goto err_helper;
  2212. ret = nf_conntrack_labels_init();
  2213. if (ret < 0)
  2214. goto err_labels;
  2215. ret = nf_conntrack_seqadj_init();
  2216. if (ret < 0)
  2217. goto err_seqadj;
  2218. ret = nf_conntrack_proto_init();
  2219. if (ret < 0)
  2220. goto err_proto;
  2221. conntrack_gc_work_init(&conntrack_gc_work);
  2222. queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
  2223. return 0;
  2224. err_proto:
  2225. nf_conntrack_seqadj_fini();
  2226. err_seqadj:
  2227. nf_conntrack_labels_fini();
  2228. err_labels:
  2229. nf_conntrack_helper_fini();
  2230. err_helper:
  2231. nf_conntrack_timeout_fini();
  2232. err_timeout:
  2233. nf_conntrack_ecache_fini();
  2234. err_ecache:
  2235. nf_conntrack_tstamp_fini();
  2236. err_tstamp:
  2237. nf_conntrack_acct_fini();
  2238. err_acct:
  2239. nf_conntrack_expect_fini();
  2240. err_expect:
  2241. kmem_cache_destroy(nf_conntrack_cachep);
  2242. err_cachep:
  2243. kvfree(nf_conntrack_hash);
  2244. return ret;
  2245. }
  2246. static struct nf_ct_hook nf_conntrack_hook = {
  2247. .update = nf_conntrack_update,
  2248. .destroy = destroy_conntrack,
  2249. .get_tuple_skb = nf_conntrack_get_tuple_skb,
  2250. };
  2251. void nf_conntrack_init_end(void)
  2252. {
  2253. /* For use by REJECT target */
  2254. RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
  2255. RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
  2256. }
  2257. /*
  2258. * We need to use special "null" values, not used in hash table
  2259. */
  2260. #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
  2261. #define DYING_NULLS_VAL ((1<<30)+1)
  2262. int nf_conntrack_init_net(struct net *net)
  2263. {
  2264. int ret = -ENOMEM;
  2265. int cpu;
  2266. BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
  2267. BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
  2268. atomic_set(&net->ct.count, 0);
  2269. net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
  2270. if (!net->ct.pcpu_lists)
  2271. goto err_stat;
  2272. for_each_possible_cpu(cpu) {
  2273. struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
  2274. spin_lock_init(&pcpu->lock);
  2275. INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
  2276. INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
  2277. }
  2278. net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
  2279. if (!net->ct.stat)
  2280. goto err_pcpu_lists;
  2281. ret = nf_conntrack_expect_pernet_init(net);
  2282. if (ret < 0)
  2283. goto err_expect;
  2284. nf_conntrack_acct_pernet_init(net);
  2285. nf_conntrack_tstamp_pernet_init(net);
  2286. nf_conntrack_ecache_pernet_init(net);
  2287. nf_conntrack_helper_pernet_init(net);
  2288. nf_conntrack_proto_pernet_init(net);
  2289. return 0;
  2290. err_expect:
  2291. free_percpu(net->ct.stat);
  2292. err_pcpu_lists:
  2293. free_percpu(net->ct.pcpu_lists);
  2294. err_stat:
  2295. return ret;
  2296. }