ip_tables.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224
  1. /*
  2. * Packet matching code.
  3. *
  4. * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  5. * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
  12. * - increase module usage count as soon as we have rules inside
  13. * a table
  14. * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
  15. * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
  16. */
  17. #include <linux/cache.h>
  18. #include <linux/capability.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/kmod.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/module.h>
  24. #include <linux/icmp.h>
  25. #include <net/ip.h>
  26. #include <net/compat.h>
  27. #include <asm/uaccess.h>
  28. #include <linux/mutex.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/err.h>
  31. #include <linux/cpumask.h>
  32. #include <linux/netfilter/x_tables.h>
  33. #include <linux/netfilter_ipv4/ip_tables.h>
  34. MODULE_LICENSE("GPL");
  35. MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
  36. MODULE_DESCRIPTION("IPv4 packet filter");
  37. /*#define DEBUG_IP_FIREWALL*/
  38. /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
  39. /*#define DEBUG_IP_FIREWALL_USER*/
  40. #ifdef DEBUG_IP_FIREWALL
  41. #define dprintf(format, args...) printk(format , ## args)
  42. #else
  43. #define dprintf(format, args...)
  44. #endif
  45. #ifdef DEBUG_IP_FIREWALL_USER
  46. #define duprintf(format, args...) printk(format , ## args)
  47. #else
  48. #define duprintf(format, args...)
  49. #endif
  50. #ifdef CONFIG_NETFILTER_DEBUG
  51. #define IP_NF_ASSERT(x) \
  52. do { \
  53. if (!(x)) \
  54. printk("IP_NF_ASSERT: %s:%s:%u\n", \
  55. __FUNCTION__, __FILE__, __LINE__); \
  56. } while(0)
  57. #else
  58. #define IP_NF_ASSERT(x)
  59. #endif
  60. #if 0
  61. /* All the better to debug you with... */
  62. #define static
  63. #define inline
  64. #endif
  65. /*
  66. We keep a set of rules for each CPU, so we can avoid write-locking
  67. them in the softirq when updating the counters and therefore
  68. only need to read-lock in the softirq; doing a write_lock_bh() in user
  69. context stops packets coming through and allows user context to read
  70. the counters or update the rules.
  71. Hence the start of any table is given by get_table() below. */
  72. /* Returns whether matches rule or not. */
  73. static inline int
  74. ip_packet_match(const struct iphdr *ip,
  75. const char *indev,
  76. const char *outdev,
  77. const struct ipt_ip *ipinfo,
  78. int isfrag)
  79. {
  80. size_t i;
  81. unsigned long ret;
  82. #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
  83. if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
  84. IPT_INV_SRCIP)
  85. || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
  86. IPT_INV_DSTIP)) {
  87. dprintf("Source or dest mismatch.\n");
  88. dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
  89. NIPQUAD(ip->saddr),
  90. NIPQUAD(ipinfo->smsk.s_addr),
  91. NIPQUAD(ipinfo->src.s_addr),
  92. ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
  93. dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
  94. NIPQUAD(ip->daddr),
  95. NIPQUAD(ipinfo->dmsk.s_addr),
  96. NIPQUAD(ipinfo->dst.s_addr),
  97. ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
  98. return 0;
  99. }
  100. /* Look for ifname matches; this should unroll nicely. */
  101. for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
  102. ret |= (((const unsigned long *)indev)[i]
  103. ^ ((const unsigned long *)ipinfo->iniface)[i])
  104. & ((const unsigned long *)ipinfo->iniface_mask)[i];
  105. }
  106. if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
  107. dprintf("VIA in mismatch (%s vs %s).%s\n",
  108. indev, ipinfo->iniface,
  109. ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
  110. return 0;
  111. }
  112. for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
  113. ret |= (((const unsigned long *)outdev)[i]
  114. ^ ((const unsigned long *)ipinfo->outiface)[i])
  115. & ((const unsigned long *)ipinfo->outiface_mask)[i];
  116. }
  117. if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
  118. dprintf("VIA out mismatch (%s vs %s).%s\n",
  119. outdev, ipinfo->outiface,
  120. ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
  121. return 0;
  122. }
  123. /* Check specific protocol */
  124. if (ipinfo->proto
  125. && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
  126. dprintf("Packet protocol %hi does not match %hi.%s\n",
  127. ip->protocol, ipinfo->proto,
  128. ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
  129. return 0;
  130. }
  131. /* If we have a fragment rule but the packet is not a fragment
  132. * then we return zero */
  133. if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
  134. dprintf("Fragment rule but not fragment.%s\n",
  135. ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
  136. return 0;
  137. }
  138. return 1;
  139. }
  140. static inline int
  141. ip_checkentry(const struct ipt_ip *ip)
  142. {
  143. if (ip->flags & ~IPT_F_MASK) {
  144. duprintf("Unknown flag bits set: %08X\n",
  145. ip->flags & ~IPT_F_MASK);
  146. return 0;
  147. }
  148. if (ip->invflags & ~IPT_INV_MASK) {
  149. duprintf("Unknown invflag bits set: %08X\n",
  150. ip->invflags & ~IPT_INV_MASK);
  151. return 0;
  152. }
  153. return 1;
  154. }
  155. static unsigned int
  156. ipt_error(struct sk_buff **pskb,
  157. const struct net_device *in,
  158. const struct net_device *out,
  159. unsigned int hooknum,
  160. const struct xt_target *target,
  161. const void *targinfo)
  162. {
  163. if (net_ratelimit())
  164. printk("ip_tables: error: `%s'\n", (char *)targinfo);
  165. return NF_DROP;
  166. }
  167. static inline
  168. int do_match(struct ipt_entry_match *m,
  169. const struct sk_buff *skb,
  170. const struct net_device *in,
  171. const struct net_device *out,
  172. int offset,
  173. int *hotdrop)
  174. {
  175. /* Stop iteration if it doesn't match */
  176. if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
  177. offset, skb->nh.iph->ihl*4, hotdrop))
  178. return 1;
  179. else
  180. return 0;
  181. }
  182. static inline struct ipt_entry *
  183. get_entry(void *base, unsigned int offset)
  184. {
  185. return (struct ipt_entry *)(base + offset);
  186. }
  187. /* Returns one of the generic firewall policies, like NF_ACCEPT. */
  188. unsigned int
  189. ipt_do_table(struct sk_buff **pskb,
  190. unsigned int hook,
  191. const struct net_device *in,
  192. const struct net_device *out,
  193. struct xt_table *table)
  194. {
  195. static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
  196. u_int16_t offset;
  197. struct iphdr *ip;
  198. u_int16_t datalen;
  199. int hotdrop = 0;
  200. /* Initializing verdict to NF_DROP keeps gcc happy. */
  201. unsigned int verdict = NF_DROP;
  202. const char *indev, *outdev;
  203. void *table_base;
  204. struct ipt_entry *e, *back;
  205. struct xt_table_info *private;
  206. /* Initialization */
  207. ip = (*pskb)->nh.iph;
  208. datalen = (*pskb)->len - ip->ihl * 4;
  209. indev = in ? in->name : nulldevname;
  210. outdev = out ? out->name : nulldevname;
  211. /* We handle fragments by dealing with the first fragment as
  212. * if it was a normal packet. All other fragments are treated
  213. * normally, except that they will NEVER match rules that ask
  214. * things we don't know, ie. tcp syn flag or ports). If the
  215. * rule is also a fragment-specific rule, non-fragments won't
  216. * match it. */
  217. offset = ntohs(ip->frag_off) & IP_OFFSET;
  218. read_lock_bh(&table->lock);
  219. IP_NF_ASSERT(table->valid_hooks & (1 << hook));
  220. private = table->private;
  221. table_base = (void *)private->entries[smp_processor_id()];
  222. e = get_entry(table_base, private->hook_entry[hook]);
  223. /* For return from builtin chain */
  224. back = get_entry(table_base, private->underflow[hook]);
  225. do {
  226. IP_NF_ASSERT(e);
  227. IP_NF_ASSERT(back);
  228. if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
  229. struct ipt_entry_target *t;
  230. if (IPT_MATCH_ITERATE(e, do_match,
  231. *pskb, in, out,
  232. offset, &hotdrop) != 0)
  233. goto no_match;
  234. ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
  235. t = ipt_get_target(e);
  236. IP_NF_ASSERT(t->u.kernel.target);
  237. /* Standard target? */
  238. if (!t->u.kernel.target->target) {
  239. int v;
  240. v = ((struct ipt_standard_target *)t)->verdict;
  241. if (v < 0) {
  242. /* Pop from stack? */
  243. if (v != IPT_RETURN) {
  244. verdict = (unsigned)(-v) - 1;
  245. break;
  246. }
  247. e = back;
  248. back = get_entry(table_base,
  249. back->comefrom);
  250. continue;
  251. }
  252. if (table_base + v != (void *)e + e->next_offset
  253. && !(e->ip.flags & IPT_F_GOTO)) {
  254. /* Save old back ptr in next entry */
  255. struct ipt_entry *next
  256. = (void *)e + e->next_offset;
  257. next->comefrom
  258. = (void *)back - table_base;
  259. /* set back pointer to next entry */
  260. back = next;
  261. }
  262. e = get_entry(table_base, v);
  263. } else {
  264. /* Targets which reenter must return
  265. abs. verdicts */
  266. #ifdef CONFIG_NETFILTER_DEBUG
  267. ((struct ipt_entry *)table_base)->comefrom
  268. = 0xeeeeeeec;
  269. #endif
  270. verdict = t->u.kernel.target->target(pskb,
  271. in, out,
  272. hook,
  273. t->u.kernel.target,
  274. t->data);
  275. #ifdef CONFIG_NETFILTER_DEBUG
  276. if (((struct ipt_entry *)table_base)->comefrom
  277. != 0xeeeeeeec
  278. && verdict == IPT_CONTINUE) {
  279. printk("Target %s reentered!\n",
  280. t->u.kernel.target->name);
  281. verdict = NF_DROP;
  282. }
  283. ((struct ipt_entry *)table_base)->comefrom
  284. = 0x57acc001;
  285. #endif
  286. /* Target might have changed stuff. */
  287. ip = (*pskb)->nh.iph;
  288. datalen = (*pskb)->len - ip->ihl * 4;
  289. if (verdict == IPT_CONTINUE)
  290. e = (void *)e + e->next_offset;
  291. else
  292. /* Verdict */
  293. break;
  294. }
  295. } else {
  296. no_match:
  297. e = (void *)e + e->next_offset;
  298. }
  299. } while (!hotdrop);
  300. read_unlock_bh(&table->lock);
  301. #ifdef DEBUG_ALLOW_ALL
  302. return NF_ACCEPT;
  303. #else
  304. if (hotdrop)
  305. return NF_DROP;
  306. else return verdict;
  307. #endif
  308. }
  309. /* All zeroes == unconditional rule. */
  310. static inline int
  311. unconditional(const struct ipt_ip *ip)
  312. {
  313. unsigned int i;
  314. for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
  315. if (((__u32 *)ip)[i])
  316. return 0;
  317. return 1;
  318. }
  319. /* Figures out from what hook each rule can be called: returns 0 if
  320. there are loops. Puts hook bitmask in comefrom. */
  321. static int
  322. mark_source_chains(struct xt_table_info *newinfo,
  323. unsigned int valid_hooks, void *entry0)
  324. {
  325. unsigned int hook;
  326. /* No recursion; use packet counter to save back ptrs (reset
  327. to 0 as we leave), and comefrom to save source hook bitmask */
  328. for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
  329. unsigned int pos = newinfo->hook_entry[hook];
  330. struct ipt_entry *e
  331. = (struct ipt_entry *)(entry0 + pos);
  332. if (!(valid_hooks & (1 << hook)))
  333. continue;
  334. /* Set initial back pointer. */
  335. e->counters.pcnt = pos;
  336. for (;;) {
  337. struct ipt_standard_target *t
  338. = (void *)ipt_get_target(e);
  339. int visited = e->comefrom & (1 << hook);
  340. if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
  341. printk("iptables: loop hook %u pos %u %08X.\n",
  342. hook, pos, e->comefrom);
  343. return 0;
  344. }
  345. e->comefrom
  346. |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
  347. /* Unconditional return/END. */
  348. if ((e->target_offset == sizeof(struct ipt_entry)
  349. && (strcmp(t->target.u.user.name,
  350. IPT_STANDARD_TARGET) == 0)
  351. && t->verdict < 0
  352. && unconditional(&e->ip)) || visited) {
  353. unsigned int oldpos, size;
  354. if (t->verdict < -NF_MAX_VERDICT - 1) {
  355. duprintf("mark_source_chains: bad "
  356. "negative verdict (%i)\n",
  357. t->verdict);
  358. return 0;
  359. }
  360. /* Return: backtrack through the last
  361. big jump. */
  362. do {
  363. e->comefrom ^= (1<<NF_IP_NUMHOOKS);
  364. #ifdef DEBUG_IP_FIREWALL_USER
  365. if (e->comefrom
  366. & (1 << NF_IP_NUMHOOKS)) {
  367. duprintf("Back unset "
  368. "on hook %u "
  369. "rule %u\n",
  370. hook, pos);
  371. }
  372. #endif
  373. oldpos = pos;
  374. pos = e->counters.pcnt;
  375. e->counters.pcnt = 0;
  376. /* We're at the start. */
  377. if (pos == oldpos)
  378. goto next;
  379. e = (struct ipt_entry *)
  380. (entry0 + pos);
  381. } while (oldpos == pos + e->next_offset);
  382. /* Move along one */
  383. size = e->next_offset;
  384. e = (struct ipt_entry *)
  385. (entry0 + pos + size);
  386. e->counters.pcnt = pos;
  387. pos += size;
  388. } else {
  389. int newpos = t->verdict;
  390. if (strcmp(t->target.u.user.name,
  391. IPT_STANDARD_TARGET) == 0
  392. && newpos >= 0) {
  393. if (newpos > newinfo->size -
  394. sizeof(struct ipt_entry)) {
  395. duprintf("mark_source_chains: "
  396. "bad verdict (%i)\n",
  397. newpos);
  398. return 0;
  399. }
  400. /* This a jump; chase it. */
  401. duprintf("Jump rule %u -> %u\n",
  402. pos, newpos);
  403. } else {
  404. /* ... this is a fallthru */
  405. newpos = pos + e->next_offset;
  406. }
  407. e = (struct ipt_entry *)
  408. (entry0 + newpos);
  409. e->counters.pcnt = pos;
  410. pos = newpos;
  411. }
  412. }
  413. next:
  414. duprintf("Finished chain %u\n", hook);
  415. }
  416. return 1;
  417. }
  418. static inline int
  419. cleanup_match(struct ipt_entry_match *m, unsigned int *i)
  420. {
  421. if (i && (*i)-- == 0)
  422. return 1;
  423. if (m->u.kernel.match->destroy)
  424. m->u.kernel.match->destroy(m->u.kernel.match, m->data);
  425. module_put(m->u.kernel.match->me);
  426. return 0;
  427. }
  428. static inline int
  429. check_entry(struct ipt_entry *e, const char *name)
  430. {
  431. struct ipt_entry_target *t;
  432. if (!ip_checkentry(&e->ip)) {
  433. duprintf("ip_tables: ip check failed %p %s.\n", e, name);
  434. return -EINVAL;
  435. }
  436. if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
  437. return -EINVAL;
  438. t = ipt_get_target(e);
  439. if (e->target_offset + t->u.target_size > e->next_offset)
  440. return -EINVAL;
  441. return 0;
  442. }
  443. static inline int check_match(struct ipt_entry_match *m, const char *name,
  444. const struct ipt_ip *ip, unsigned int hookmask)
  445. {
  446. struct xt_match *match;
  447. int ret;
  448. match = m->u.kernel.match;
  449. ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
  450. name, hookmask, ip->proto,
  451. ip->invflags & IPT_INV_PROTO);
  452. if (!ret && m->u.kernel.match->checkentry
  453. && !m->u.kernel.match->checkentry(name, ip, match, m->data,
  454. hookmask)) {
  455. duprintf("ip_tables: check failed for `%s'.\n",
  456. m->u.kernel.match->name);
  457. ret = -EINVAL;
  458. }
  459. return ret;
  460. }
  461. static inline int
  462. find_check_match(struct ipt_entry_match *m,
  463. const char *name,
  464. const struct ipt_ip *ip,
  465. unsigned int hookmask,
  466. unsigned int *i)
  467. {
  468. struct xt_match *match;
  469. int ret;
  470. match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
  471. m->u.user.revision),
  472. "ipt_%s", m->u.user.name);
  473. if (IS_ERR(match) || !match) {
  474. duprintf("find_check_match: `%s' not found\n", m->u.user.name);
  475. return match ? PTR_ERR(match) : -ENOENT;
  476. }
  477. m->u.kernel.match = match;
  478. ret = check_match(m, name, ip, hookmask);
  479. if (ret)
  480. goto err;
  481. (*i)++;
  482. return 0;
  483. err:
  484. module_put(m->u.kernel.match->me);
  485. return ret;
  486. }
  487. static inline int check_target(struct ipt_entry *e, const char *name)
  488. {
  489. struct ipt_entry_target *t;
  490. struct xt_target *target;
  491. int ret;
  492. t = ipt_get_target(e);
  493. target = t->u.kernel.target;
  494. ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
  495. name, e->comefrom, e->ip.proto,
  496. e->ip.invflags & IPT_INV_PROTO);
  497. if (!ret && t->u.kernel.target->checkentry
  498. && !t->u.kernel.target->checkentry(name, e, target,
  499. t->data, e->comefrom)) {
  500. duprintf("ip_tables: check failed for `%s'.\n",
  501. t->u.kernel.target->name);
  502. ret = -EINVAL;
  503. }
  504. return ret;
  505. }
  506. static inline int
  507. find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
  508. unsigned int *i)
  509. {
  510. struct ipt_entry_target *t;
  511. struct xt_target *target;
  512. int ret;
  513. unsigned int j;
  514. ret = check_entry(e, name);
  515. if (ret)
  516. return ret;
  517. j = 0;
  518. ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
  519. e->comefrom, &j);
  520. if (ret != 0)
  521. goto cleanup_matches;
  522. t = ipt_get_target(e);
  523. target = try_then_request_module(xt_find_target(AF_INET,
  524. t->u.user.name,
  525. t->u.user.revision),
  526. "ipt_%s", t->u.user.name);
  527. if (IS_ERR(target) || !target) {
  528. duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
  529. ret = target ? PTR_ERR(target) : -ENOENT;
  530. goto cleanup_matches;
  531. }
  532. t->u.kernel.target = target;
  533. ret = check_target(e, name);
  534. if (ret)
  535. goto err;
  536. (*i)++;
  537. return 0;
  538. err:
  539. module_put(t->u.kernel.target->me);
  540. cleanup_matches:
  541. IPT_MATCH_ITERATE(e, cleanup_match, &j);
  542. return ret;
  543. }
  544. static inline int
  545. check_entry_size_and_hooks(struct ipt_entry *e,
  546. struct xt_table_info *newinfo,
  547. unsigned char *base,
  548. unsigned char *limit,
  549. const unsigned int *hook_entries,
  550. const unsigned int *underflows,
  551. unsigned int *i)
  552. {
  553. unsigned int h;
  554. if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
  555. || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
  556. duprintf("Bad offset %p\n", e);
  557. return -EINVAL;
  558. }
  559. if (e->next_offset
  560. < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
  561. duprintf("checking: element %p size %u\n",
  562. e, e->next_offset);
  563. return -EINVAL;
  564. }
  565. /* Check hooks & underflows */
  566. for (h = 0; h < NF_IP_NUMHOOKS; h++) {
  567. if ((unsigned char *)e - base == hook_entries[h])
  568. newinfo->hook_entry[h] = hook_entries[h];
  569. if ((unsigned char *)e - base == underflows[h])
  570. newinfo->underflow[h] = underflows[h];
  571. }
  572. /* FIXME: underflows must be unconditional, standard verdicts
  573. < 0 (not IPT_RETURN). --RR */
  574. /* Clear counters and comefrom */
  575. e->counters = ((struct xt_counters) { 0, 0 });
  576. e->comefrom = 0;
  577. (*i)++;
  578. return 0;
  579. }
  580. static inline int
  581. cleanup_entry(struct ipt_entry *e, unsigned int *i)
  582. {
  583. struct ipt_entry_target *t;
  584. if (i && (*i)-- == 0)
  585. return 1;
  586. /* Cleanup all matches */
  587. IPT_MATCH_ITERATE(e, cleanup_match, NULL);
  588. t = ipt_get_target(e);
  589. if (t->u.kernel.target->destroy)
  590. t->u.kernel.target->destroy(t->u.kernel.target, t->data);
  591. module_put(t->u.kernel.target->me);
  592. return 0;
  593. }
  594. /* Checks and translates the user-supplied table segment (held in
  595. newinfo) */
  596. static int
  597. translate_table(const char *name,
  598. unsigned int valid_hooks,
  599. struct xt_table_info *newinfo,
  600. void *entry0,
  601. unsigned int size,
  602. unsigned int number,
  603. const unsigned int *hook_entries,
  604. const unsigned int *underflows)
  605. {
  606. unsigned int i;
  607. int ret;
  608. newinfo->size = size;
  609. newinfo->number = number;
  610. /* Init all hooks to impossible value. */
  611. for (i = 0; i < NF_IP_NUMHOOKS; i++) {
  612. newinfo->hook_entry[i] = 0xFFFFFFFF;
  613. newinfo->underflow[i] = 0xFFFFFFFF;
  614. }
  615. duprintf("translate_table: size %u\n", newinfo->size);
  616. i = 0;
  617. /* Walk through entries, checking offsets. */
  618. ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
  619. check_entry_size_and_hooks,
  620. newinfo,
  621. entry0,
  622. entry0 + size,
  623. hook_entries, underflows, &i);
  624. if (ret != 0)
  625. return ret;
  626. if (i != number) {
  627. duprintf("translate_table: %u not %u entries\n",
  628. i, number);
  629. return -EINVAL;
  630. }
  631. /* Check hooks all assigned */
  632. for (i = 0; i < NF_IP_NUMHOOKS; i++) {
  633. /* Only hooks which are valid */
  634. if (!(valid_hooks & (1 << i)))
  635. continue;
  636. if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
  637. duprintf("Invalid hook entry %u %u\n",
  638. i, hook_entries[i]);
  639. return -EINVAL;
  640. }
  641. if (newinfo->underflow[i] == 0xFFFFFFFF) {
  642. duprintf("Invalid underflow %u %u\n",
  643. i, underflows[i]);
  644. return -EINVAL;
  645. }
  646. }
  647. if (!mark_source_chains(newinfo, valid_hooks, entry0))
  648. return -ELOOP;
  649. /* Finally, each sanity check must pass */
  650. i = 0;
  651. ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
  652. find_check_entry, name, size, &i);
  653. if (ret != 0) {
  654. IPT_ENTRY_ITERATE(entry0, newinfo->size,
  655. cleanup_entry, &i);
  656. return ret;
  657. }
  658. /* And one copy for every other CPU */
  659. for_each_possible_cpu(i) {
  660. if (newinfo->entries[i] && newinfo->entries[i] != entry0)
  661. memcpy(newinfo->entries[i], entry0, newinfo->size);
  662. }
  663. return ret;
  664. }
  665. /* Gets counters. */
  666. static inline int
  667. add_entry_to_counter(const struct ipt_entry *e,
  668. struct xt_counters total[],
  669. unsigned int *i)
  670. {
  671. ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
  672. (*i)++;
  673. return 0;
  674. }
  675. static inline int
  676. set_entry_to_counter(const struct ipt_entry *e,
  677. struct ipt_counters total[],
  678. unsigned int *i)
  679. {
  680. SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
  681. (*i)++;
  682. return 0;
  683. }
  684. static void
  685. get_counters(const struct xt_table_info *t,
  686. struct xt_counters counters[])
  687. {
  688. unsigned int cpu;
  689. unsigned int i;
  690. unsigned int curcpu;
  691. /* Instead of clearing (by a previous call to memset())
  692. * the counters and using adds, we set the counters
  693. * with data used by 'current' CPU
  694. * We dont care about preemption here.
  695. */
  696. curcpu = raw_smp_processor_id();
  697. i = 0;
  698. IPT_ENTRY_ITERATE(t->entries[curcpu],
  699. t->size,
  700. set_entry_to_counter,
  701. counters,
  702. &i);
  703. for_each_possible_cpu(cpu) {
  704. if (cpu == curcpu)
  705. continue;
  706. i = 0;
  707. IPT_ENTRY_ITERATE(t->entries[cpu],
  708. t->size,
  709. add_entry_to_counter,
  710. counters,
  711. &i);
  712. }
  713. }
  714. static inline struct xt_counters * alloc_counters(struct xt_table *table)
  715. {
  716. unsigned int countersize;
  717. struct xt_counters *counters;
  718. struct xt_table_info *private = table->private;
  719. /* We need atomic snapshot of counters: rest doesn't change
  720. (other than comefrom, which userspace doesn't care
  721. about). */
  722. countersize = sizeof(struct xt_counters) * private->number;
  723. counters = vmalloc_node(countersize, numa_node_id());
  724. if (counters == NULL)
  725. return ERR_PTR(-ENOMEM);
  726. /* First, sum counters... */
  727. write_lock_bh(&table->lock);
  728. get_counters(private, counters);
  729. write_unlock_bh(&table->lock);
  730. return counters;
  731. }
  732. static int
  733. copy_entries_to_user(unsigned int total_size,
  734. struct xt_table *table,
  735. void __user *userptr)
  736. {
  737. unsigned int off, num;
  738. struct ipt_entry *e;
  739. struct xt_counters *counters;
  740. struct xt_table_info *private = table->private;
  741. int ret = 0;
  742. void *loc_cpu_entry;
  743. counters = alloc_counters(table);
  744. if (IS_ERR(counters))
  745. return PTR_ERR(counters);
  746. /* choose the copy that is on our node/cpu, ...
  747. * This choice is lazy (because current thread is
  748. * allowed to migrate to another cpu)
  749. */
  750. loc_cpu_entry = private->entries[raw_smp_processor_id()];
  751. /* ... then copy entire thing ... */
  752. if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
  753. ret = -EFAULT;
  754. goto free_counters;
  755. }
  756. /* FIXME: use iterator macros --RR */
  757. /* ... then go back and fix counters and names */
  758. for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
  759. unsigned int i;
  760. struct ipt_entry_match *m;
  761. struct ipt_entry_target *t;
  762. e = (struct ipt_entry *)(loc_cpu_entry + off);
  763. if (copy_to_user(userptr + off
  764. + offsetof(struct ipt_entry, counters),
  765. &counters[num],
  766. sizeof(counters[num])) != 0) {
  767. ret = -EFAULT;
  768. goto free_counters;
  769. }
  770. for (i = sizeof(struct ipt_entry);
  771. i < e->target_offset;
  772. i += m->u.match_size) {
  773. m = (void *)e + i;
  774. if (copy_to_user(userptr + off + i
  775. + offsetof(struct ipt_entry_match,
  776. u.user.name),
  777. m->u.kernel.match->name,
  778. strlen(m->u.kernel.match->name)+1)
  779. != 0) {
  780. ret = -EFAULT;
  781. goto free_counters;
  782. }
  783. }
  784. t = ipt_get_target(e);
  785. if (copy_to_user(userptr + off + e->target_offset
  786. + offsetof(struct ipt_entry_target,
  787. u.user.name),
  788. t->u.kernel.target->name,
  789. strlen(t->u.kernel.target->name)+1) != 0) {
  790. ret = -EFAULT;
  791. goto free_counters;
  792. }
  793. }
  794. free_counters:
  795. vfree(counters);
  796. return ret;
  797. }
  798. #ifdef CONFIG_COMPAT
  799. struct compat_delta {
  800. struct compat_delta *next;
  801. unsigned int offset;
  802. short delta;
  803. };
  804. static struct compat_delta *compat_offsets = NULL;
  805. static int compat_add_offset(unsigned int offset, short delta)
  806. {
  807. struct compat_delta *tmp;
  808. tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
  809. if (!tmp)
  810. return -ENOMEM;
  811. tmp->offset = offset;
  812. tmp->delta = delta;
  813. if (compat_offsets) {
  814. tmp->next = compat_offsets->next;
  815. compat_offsets->next = tmp;
  816. } else {
  817. compat_offsets = tmp;
  818. tmp->next = NULL;
  819. }
  820. return 0;
  821. }
  822. static void compat_flush_offsets(void)
  823. {
  824. struct compat_delta *tmp, *next;
  825. if (compat_offsets) {
  826. for(tmp = compat_offsets; tmp; tmp = next) {
  827. next = tmp->next;
  828. kfree(tmp);
  829. }
  830. compat_offsets = NULL;
  831. }
  832. }
  833. static short compat_calc_jump(unsigned int offset)
  834. {
  835. struct compat_delta *tmp;
  836. short delta;
  837. for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
  838. if (tmp->offset < offset)
  839. delta += tmp->delta;
  840. return delta;
  841. }
  842. static void compat_standard_from_user(void *dst, void *src)
  843. {
  844. int v = *(compat_int_t *)src;
  845. if (v > 0)
  846. v += compat_calc_jump(v);
  847. memcpy(dst, &v, sizeof(v));
  848. }
  849. static int compat_standard_to_user(void __user *dst, void *src)
  850. {
  851. compat_int_t cv = *(int *)src;
  852. if (cv > 0)
  853. cv -= compat_calc_jump(cv);
  854. return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
  855. }
  856. static inline int
  857. compat_calc_match(struct ipt_entry_match *m, int * size)
  858. {
  859. *size += xt_compat_match_offset(m->u.kernel.match);
  860. return 0;
  861. }
  862. static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
  863. void *base, struct xt_table_info *newinfo)
  864. {
  865. struct ipt_entry_target *t;
  866. unsigned int entry_offset;
  867. int off, i, ret;
  868. off = 0;
  869. entry_offset = (void *)e - base;
  870. IPT_MATCH_ITERATE(e, compat_calc_match, &off);
  871. t = ipt_get_target(e);
  872. off += xt_compat_target_offset(t->u.kernel.target);
  873. newinfo->size -= off;
  874. ret = compat_add_offset(entry_offset, off);
  875. if (ret)
  876. return ret;
  877. for (i = 0; i< NF_IP_NUMHOOKS; i++) {
  878. if (info->hook_entry[i] && (e < (struct ipt_entry *)
  879. (base + info->hook_entry[i])))
  880. newinfo->hook_entry[i] -= off;
  881. if (info->underflow[i] && (e < (struct ipt_entry *)
  882. (base + info->underflow[i])))
  883. newinfo->underflow[i] -= off;
  884. }
  885. return 0;
  886. }
  887. static int compat_table_info(struct xt_table_info *info,
  888. struct xt_table_info *newinfo)
  889. {
  890. void *loc_cpu_entry;
  891. int i;
  892. if (!newinfo || !info)
  893. return -EINVAL;
  894. memset(newinfo, 0, sizeof(struct xt_table_info));
  895. newinfo->size = info->size;
  896. newinfo->number = info->number;
  897. for (i = 0; i < NF_IP_NUMHOOKS; i++) {
  898. newinfo->hook_entry[i] = info->hook_entry[i];
  899. newinfo->underflow[i] = info->underflow[i];
  900. }
  901. loc_cpu_entry = info->entries[raw_smp_processor_id()];
  902. return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
  903. compat_calc_entry, info, loc_cpu_entry, newinfo);
  904. }
  905. #endif
  906. static int get_info(void __user *user, int *len, int compat)
  907. {
  908. char name[IPT_TABLE_MAXNAMELEN];
  909. struct xt_table *t;
  910. int ret;
  911. if (*len != sizeof(struct ipt_getinfo)) {
  912. duprintf("length %u != %u\n", *len,
  913. (unsigned int)sizeof(struct ipt_getinfo));
  914. return -EINVAL;
  915. }
  916. if (copy_from_user(name, user, sizeof(name)) != 0)
  917. return -EFAULT;
  918. name[IPT_TABLE_MAXNAMELEN-1] = '\0';
  919. #ifdef CONFIG_COMPAT
  920. if (compat)
  921. xt_compat_lock(AF_INET);
  922. #endif
  923. t = try_then_request_module(xt_find_table_lock(AF_INET, name),
  924. "iptable_%s", name);
  925. if (t && !IS_ERR(t)) {
  926. struct ipt_getinfo info;
  927. struct xt_table_info *private = t->private;
  928. #ifdef CONFIG_COMPAT
  929. if (compat) {
  930. struct xt_table_info tmp;
  931. ret = compat_table_info(private, &tmp);
  932. compat_flush_offsets();
  933. private = &tmp;
  934. }
  935. #endif
  936. info.valid_hooks = t->valid_hooks;
  937. memcpy(info.hook_entry, private->hook_entry,
  938. sizeof(info.hook_entry));
  939. memcpy(info.underflow, private->underflow,
  940. sizeof(info.underflow));
  941. info.num_entries = private->number;
  942. info.size = private->size;
  943. strcpy(info.name, name);
  944. if (copy_to_user(user, &info, *len) != 0)
  945. ret = -EFAULT;
  946. else
  947. ret = 0;
  948. xt_table_unlock(t);
  949. module_put(t->me);
  950. } else
  951. ret = t ? PTR_ERR(t) : -ENOENT;
  952. #ifdef CONFIG_COMPAT
  953. if (compat)
  954. xt_compat_unlock(AF_INET);
  955. #endif
  956. return ret;
  957. }
  958. static int
  959. get_entries(struct ipt_get_entries __user *uptr, int *len)
  960. {
  961. int ret;
  962. struct ipt_get_entries get;
  963. struct xt_table *t;
  964. if (*len < sizeof(get)) {
  965. duprintf("get_entries: %u < %d\n", *len,
  966. (unsigned int)sizeof(get));
  967. return -EINVAL;
  968. }
  969. if (copy_from_user(&get, uptr, sizeof(get)) != 0)
  970. return -EFAULT;
  971. if (*len != sizeof(struct ipt_get_entries) + get.size) {
  972. duprintf("get_entries: %u != %u\n", *len,
  973. (unsigned int)(sizeof(struct ipt_get_entries) +
  974. get.size));
  975. return -EINVAL;
  976. }
  977. t = xt_find_table_lock(AF_INET, get.name);
  978. if (t && !IS_ERR(t)) {
  979. struct xt_table_info *private = t->private;
  980. duprintf("t->private->number = %u\n",
  981. private->number);
  982. if (get.size == private->size)
  983. ret = copy_entries_to_user(private->size,
  984. t, uptr->entrytable);
  985. else {
  986. duprintf("get_entries: I've got %u not %u!\n",
  987. private->size,
  988. get.size);
  989. ret = -EINVAL;
  990. }
  991. module_put(t->me);
  992. xt_table_unlock(t);
  993. } else
  994. ret = t ? PTR_ERR(t) : -ENOENT;
  995. return ret;
  996. }
  997. static int
  998. __do_replace(const char *name, unsigned int valid_hooks,
  999. struct xt_table_info *newinfo, unsigned int num_counters,
  1000. void __user *counters_ptr)
  1001. {
  1002. int ret;
  1003. struct xt_table *t;
  1004. struct xt_table_info *oldinfo;
  1005. struct xt_counters *counters;
  1006. void *loc_cpu_old_entry;
  1007. ret = 0;
  1008. counters = vmalloc(num_counters * sizeof(struct xt_counters));
  1009. if (!counters) {
  1010. ret = -ENOMEM;
  1011. goto out;
  1012. }
  1013. t = try_then_request_module(xt_find_table_lock(AF_INET, name),
  1014. "iptable_%s", name);
  1015. if (!t || IS_ERR(t)) {
  1016. ret = t ? PTR_ERR(t) : -ENOENT;
  1017. goto free_newinfo_counters_untrans;
  1018. }
  1019. /* You lied! */
  1020. if (valid_hooks != t->valid_hooks) {
  1021. duprintf("Valid hook crap: %08X vs %08X\n",
  1022. valid_hooks, t->valid_hooks);
  1023. ret = -EINVAL;
  1024. goto put_module;
  1025. }
  1026. oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
  1027. if (!oldinfo)
  1028. goto put_module;
  1029. /* Update module usage count based on number of rules */
  1030. duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
  1031. oldinfo->number, oldinfo->initial_entries, newinfo->number);
  1032. if ((oldinfo->number > oldinfo->initial_entries) ||
  1033. (newinfo->number <= oldinfo->initial_entries))
  1034. module_put(t->me);
  1035. if ((oldinfo->number > oldinfo->initial_entries) &&
  1036. (newinfo->number <= oldinfo->initial_entries))
  1037. module_put(t->me);
  1038. /* Get the old counters. */
  1039. get_counters(oldinfo, counters);
  1040. /* Decrease module usage counts and free resource */
  1041. loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
  1042. IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
  1043. xt_free_table_info(oldinfo);
  1044. if (copy_to_user(counters_ptr, counters,
  1045. sizeof(struct xt_counters) * num_counters) != 0)
  1046. ret = -EFAULT;
  1047. vfree(counters);
  1048. xt_table_unlock(t);
  1049. return ret;
  1050. put_module:
  1051. module_put(t->me);
  1052. xt_table_unlock(t);
  1053. free_newinfo_counters_untrans:
  1054. vfree(counters);
  1055. out:
  1056. return ret;
  1057. }
  1058. static int
  1059. do_replace(void __user *user, unsigned int len)
  1060. {
  1061. int ret;
  1062. struct ipt_replace tmp;
  1063. struct xt_table_info *newinfo;
  1064. void *loc_cpu_entry;
  1065. if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
  1066. return -EFAULT;
  1067. /* Hack: Causes ipchains to give correct error msg --RR */
  1068. if (len != sizeof(tmp) + tmp.size)
  1069. return -ENOPROTOOPT;
  1070. /* overflow check */
  1071. if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
  1072. SMP_CACHE_BYTES)
  1073. return -ENOMEM;
  1074. if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
  1075. return -ENOMEM;
  1076. newinfo = xt_alloc_table_info(tmp.size);
  1077. if (!newinfo)
  1078. return -ENOMEM;
  1079. /* choose the copy that is our node/cpu */
  1080. loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
  1081. if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
  1082. tmp.size) != 0) {
  1083. ret = -EFAULT;
  1084. goto free_newinfo;
  1085. }
  1086. ret = translate_table(tmp.name, tmp.valid_hooks,
  1087. newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
  1088. tmp.hook_entry, tmp.underflow);
  1089. if (ret != 0)
  1090. goto free_newinfo;
  1091. duprintf("ip_tables: Translated table\n");
  1092. ret = __do_replace(tmp.name, tmp.valid_hooks,
  1093. newinfo, tmp.num_counters,
  1094. tmp.counters);
  1095. if (ret)
  1096. goto free_newinfo_untrans;
  1097. return 0;
  1098. free_newinfo_untrans:
  1099. IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
  1100. free_newinfo:
  1101. xt_free_table_info(newinfo);
  1102. return ret;
  1103. }
  1104. /* We're lazy, and add to the first CPU; overflow works its fey magic
  1105. * and everything is OK. */
  1106. static inline int
  1107. add_counter_to_entry(struct ipt_entry *e,
  1108. const struct xt_counters addme[],
  1109. unsigned int *i)
  1110. {
  1111. #if 0
  1112. duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
  1113. *i,
  1114. (long unsigned int)e->counters.pcnt,
  1115. (long unsigned int)e->counters.bcnt,
  1116. (long unsigned int)addme[*i].pcnt,
  1117. (long unsigned int)addme[*i].bcnt);
  1118. #endif
  1119. ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
  1120. (*i)++;
  1121. return 0;
  1122. }
  1123. static int
  1124. do_add_counters(void __user *user, unsigned int len, int compat)
  1125. {
  1126. unsigned int i;
  1127. struct xt_counters_info tmp;
  1128. struct xt_counters *paddc;
  1129. unsigned int num_counters;
  1130. char *name;
  1131. int size;
  1132. void *ptmp;
  1133. struct xt_table *t;
  1134. struct xt_table_info *private;
  1135. int ret = 0;
  1136. void *loc_cpu_entry;
  1137. #ifdef CONFIG_COMPAT
  1138. struct compat_xt_counters_info compat_tmp;
  1139. if (compat) {
  1140. ptmp = &compat_tmp;
  1141. size = sizeof(struct compat_xt_counters_info);
  1142. } else
  1143. #endif
  1144. {
  1145. ptmp = &tmp;
  1146. size = sizeof(struct xt_counters_info);
  1147. }
  1148. if (copy_from_user(ptmp, user, size) != 0)
  1149. return -EFAULT;
  1150. #ifdef CONFIG_COMPAT
  1151. if (compat) {
  1152. num_counters = compat_tmp.num_counters;
  1153. name = compat_tmp.name;
  1154. } else
  1155. #endif
  1156. {
  1157. num_counters = tmp.num_counters;
  1158. name = tmp.name;
  1159. }
  1160. if (len != size + num_counters * sizeof(struct xt_counters))
  1161. return -EINVAL;
  1162. paddc = vmalloc_node(len - size, numa_node_id());
  1163. if (!paddc)
  1164. return -ENOMEM;
  1165. if (copy_from_user(paddc, user + size, len - size) != 0) {
  1166. ret = -EFAULT;
  1167. goto free;
  1168. }
  1169. t = xt_find_table_lock(AF_INET, name);
  1170. if (!t || IS_ERR(t)) {
  1171. ret = t ? PTR_ERR(t) : -ENOENT;
  1172. goto free;
  1173. }
  1174. write_lock_bh(&t->lock);
  1175. private = t->private;
  1176. if (private->number != num_counters) {
  1177. ret = -EINVAL;
  1178. goto unlock_up_free;
  1179. }
  1180. i = 0;
  1181. /* Choose the copy that is on our node */
  1182. loc_cpu_entry = private->entries[raw_smp_processor_id()];
  1183. IPT_ENTRY_ITERATE(loc_cpu_entry,
  1184. private->size,
  1185. add_counter_to_entry,
  1186. paddc,
  1187. &i);
  1188. unlock_up_free:
  1189. write_unlock_bh(&t->lock);
  1190. xt_table_unlock(t);
  1191. module_put(t->me);
  1192. free:
  1193. vfree(paddc);
  1194. return ret;
  1195. }
  1196. #ifdef CONFIG_COMPAT
  1197. struct compat_ipt_replace {
  1198. char name[IPT_TABLE_MAXNAMELEN];
  1199. u32 valid_hooks;
  1200. u32 num_entries;
  1201. u32 size;
  1202. u32 hook_entry[NF_IP_NUMHOOKS];
  1203. u32 underflow[NF_IP_NUMHOOKS];
  1204. u32 num_counters;
  1205. compat_uptr_t counters; /* struct ipt_counters * */
  1206. struct compat_ipt_entry entries[0];
  1207. };
  1208. static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
  1209. void __user **dstptr, compat_uint_t *size)
  1210. {
  1211. return xt_compat_match_to_user(m, dstptr, size);
  1212. }
  1213. static int compat_copy_entry_to_user(struct ipt_entry *e,
  1214. void __user **dstptr, compat_uint_t *size)
  1215. {
  1216. struct ipt_entry_target *t;
  1217. struct compat_ipt_entry __user *ce;
  1218. u_int16_t target_offset, next_offset;
  1219. compat_uint_t origsize;
  1220. int ret;
  1221. ret = -EFAULT;
  1222. origsize = *size;
  1223. ce = (struct compat_ipt_entry __user *)*dstptr;
  1224. if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
  1225. goto out;
  1226. *dstptr += sizeof(struct compat_ipt_entry);
  1227. ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
  1228. target_offset = e->target_offset - (origsize - *size);
  1229. if (ret)
  1230. goto out;
  1231. t = ipt_get_target(e);
  1232. ret = xt_compat_target_to_user(t, dstptr, size);
  1233. if (ret)
  1234. goto out;
  1235. ret = -EFAULT;
  1236. next_offset = e->next_offset - (origsize - *size);
  1237. if (put_user(target_offset, &ce->target_offset))
  1238. goto out;
  1239. if (put_user(next_offset, &ce->next_offset))
  1240. goto out;
  1241. return 0;
  1242. out:
  1243. return ret;
  1244. }
  1245. static inline int
  1246. compat_check_calc_match(struct ipt_entry_match *m,
  1247. const char *name,
  1248. const struct ipt_ip *ip,
  1249. unsigned int hookmask,
  1250. int *size, int *i)
  1251. {
  1252. struct xt_match *match;
  1253. match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
  1254. m->u.user.revision),
  1255. "ipt_%s", m->u.user.name);
  1256. if (IS_ERR(match) || !match) {
  1257. duprintf("compat_check_calc_match: `%s' not found\n",
  1258. m->u.user.name);
  1259. return match ? PTR_ERR(match) : -ENOENT;
  1260. }
  1261. m->u.kernel.match = match;
  1262. *size += xt_compat_match_offset(match);
  1263. (*i)++;
  1264. return 0;
  1265. }
  1266. static inline int
  1267. check_compat_entry_size_and_hooks(struct ipt_entry *e,
  1268. struct xt_table_info *newinfo,
  1269. unsigned int *size,
  1270. unsigned char *base,
  1271. unsigned char *limit,
  1272. unsigned int *hook_entries,
  1273. unsigned int *underflows,
  1274. unsigned int *i,
  1275. const char *name)
  1276. {
  1277. struct ipt_entry_target *t;
  1278. struct xt_target *target;
  1279. unsigned int entry_offset;
  1280. int ret, off, h, j;
  1281. duprintf("check_compat_entry_size_and_hooks %p\n", e);
  1282. if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
  1283. || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
  1284. duprintf("Bad offset %p, limit = %p\n", e, limit);
  1285. return -EINVAL;
  1286. }
  1287. if (e->next_offset < sizeof(struct compat_ipt_entry) +
  1288. sizeof(struct compat_xt_entry_target)) {
  1289. duprintf("checking: element %p size %u\n",
  1290. e, e->next_offset);
  1291. return -EINVAL;
  1292. }
  1293. ret = check_entry(e, name);
  1294. if (ret)
  1295. return ret;
  1296. off = 0;
  1297. entry_offset = (void *)e - (void *)base;
  1298. j = 0;
  1299. ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
  1300. e->comefrom, &off, &j);
  1301. if (ret != 0)
  1302. goto cleanup_matches;
  1303. t = ipt_get_target(e);
  1304. target = try_then_request_module(xt_find_target(AF_INET,
  1305. t->u.user.name,
  1306. t->u.user.revision),
  1307. "ipt_%s", t->u.user.name);
  1308. if (IS_ERR(target) || !target) {
  1309. duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
  1310. t->u.user.name);
  1311. ret = target ? PTR_ERR(target) : -ENOENT;
  1312. goto cleanup_matches;
  1313. }
  1314. t->u.kernel.target = target;
  1315. off += xt_compat_target_offset(target);
  1316. *size += off;
  1317. ret = compat_add_offset(entry_offset, off);
  1318. if (ret)
  1319. goto out;
  1320. /* Check hooks & underflows */
  1321. for (h = 0; h < NF_IP_NUMHOOKS; h++) {
  1322. if ((unsigned char *)e - base == hook_entries[h])
  1323. newinfo->hook_entry[h] = hook_entries[h];
  1324. if ((unsigned char *)e - base == underflows[h])
  1325. newinfo->underflow[h] = underflows[h];
  1326. }
  1327. /* Clear counters and comefrom */
  1328. e->counters = ((struct ipt_counters) { 0, 0 });
  1329. e->comefrom = 0;
  1330. (*i)++;
  1331. return 0;
  1332. out:
  1333. module_put(t->u.kernel.target->me);
  1334. cleanup_matches:
  1335. IPT_MATCH_ITERATE(e, cleanup_match, &j);
  1336. return ret;
  1337. }
  1338. static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
  1339. void **dstptr, compat_uint_t *size, const char *name,
  1340. const struct ipt_ip *ip, unsigned int hookmask)
  1341. {
  1342. xt_compat_match_from_user(m, dstptr, size);
  1343. return 0;
  1344. }
  1345. static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
  1346. unsigned int *size, const char *name,
  1347. struct xt_table_info *newinfo, unsigned char *base)
  1348. {
  1349. struct ipt_entry_target *t;
  1350. struct xt_target *target;
  1351. struct ipt_entry *de;
  1352. unsigned int origsize;
  1353. int ret, h;
  1354. ret = 0;
  1355. origsize = *size;
  1356. de = (struct ipt_entry *)*dstptr;
  1357. memcpy(de, e, sizeof(struct ipt_entry));
  1358. *dstptr += sizeof(struct compat_ipt_entry);
  1359. ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
  1360. name, &de->ip, de->comefrom);
  1361. if (ret)
  1362. return ret;
  1363. de->target_offset = e->target_offset - (origsize - *size);
  1364. t = ipt_get_target(e);
  1365. target = t->u.kernel.target;
  1366. xt_compat_target_from_user(t, dstptr, size);
  1367. de->next_offset = e->next_offset - (origsize - *size);
  1368. for (h = 0; h < NF_IP_NUMHOOKS; h++) {
  1369. if ((unsigned char *)de - base < newinfo->hook_entry[h])
  1370. newinfo->hook_entry[h] -= origsize - *size;
  1371. if ((unsigned char *)de - base < newinfo->underflow[h])
  1372. newinfo->underflow[h] -= origsize - *size;
  1373. }
  1374. return ret;
  1375. }
  1376. static inline int compat_check_entry(struct ipt_entry *e, const char *name)
  1377. {
  1378. int ret;
  1379. ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom);
  1380. if (ret)
  1381. return ret;
  1382. return check_target(e, name);
  1383. }
  1384. static int
  1385. translate_compat_table(const char *name,
  1386. unsigned int valid_hooks,
  1387. struct xt_table_info **pinfo,
  1388. void **pentry0,
  1389. unsigned int total_size,
  1390. unsigned int number,
  1391. unsigned int *hook_entries,
  1392. unsigned int *underflows)
  1393. {
  1394. unsigned int i, j;
  1395. struct xt_table_info *newinfo, *info;
  1396. void *pos, *entry0, *entry1;
  1397. unsigned int size;
  1398. int ret;
  1399. info = *pinfo;
  1400. entry0 = *pentry0;
  1401. size = total_size;
  1402. info->number = number;
  1403. /* Init all hooks to impossible value. */
  1404. for (i = 0; i < NF_IP_NUMHOOKS; i++) {
  1405. info->hook_entry[i] = 0xFFFFFFFF;
  1406. info->underflow[i] = 0xFFFFFFFF;
  1407. }
  1408. duprintf("translate_compat_table: size %u\n", info->size);
  1409. j = 0;
  1410. xt_compat_lock(AF_INET);
  1411. /* Walk through entries, checking offsets. */
  1412. ret = IPT_ENTRY_ITERATE(entry0, total_size,
  1413. check_compat_entry_size_and_hooks,
  1414. info, &size, entry0,
  1415. entry0 + total_size,
  1416. hook_entries, underflows, &j, name);
  1417. if (ret != 0)
  1418. goto out_unlock;
  1419. ret = -EINVAL;
  1420. if (j != number) {
  1421. duprintf("translate_compat_table: %u not %u entries\n",
  1422. j, number);
  1423. goto out_unlock;
  1424. }
  1425. /* Check hooks all assigned */
  1426. for (i = 0; i < NF_IP_NUMHOOKS; i++) {
  1427. /* Only hooks which are valid */
  1428. if (!(valid_hooks & (1 << i)))
  1429. continue;
  1430. if (info->hook_entry[i] == 0xFFFFFFFF) {
  1431. duprintf("Invalid hook entry %u %u\n",
  1432. i, hook_entries[i]);
  1433. goto out_unlock;
  1434. }
  1435. if (info->underflow[i] == 0xFFFFFFFF) {
  1436. duprintf("Invalid underflow %u %u\n",
  1437. i, underflows[i]);
  1438. goto out_unlock;
  1439. }
  1440. }
  1441. ret = -ENOMEM;
  1442. newinfo = xt_alloc_table_info(size);
  1443. if (!newinfo)
  1444. goto out_unlock;
  1445. newinfo->number = number;
  1446. for (i = 0; i < NF_IP_NUMHOOKS; i++) {
  1447. newinfo->hook_entry[i] = info->hook_entry[i];
  1448. newinfo->underflow[i] = info->underflow[i];
  1449. }
  1450. entry1 = newinfo->entries[raw_smp_processor_id()];
  1451. pos = entry1;
  1452. size = total_size;
  1453. ret = IPT_ENTRY_ITERATE(entry0, total_size,
  1454. compat_copy_entry_from_user, &pos, &size,
  1455. name, newinfo, entry1);
  1456. compat_flush_offsets();
  1457. xt_compat_unlock(AF_INET);
  1458. if (ret)
  1459. goto free_newinfo;
  1460. ret = -ELOOP;
  1461. if (!mark_source_chains(newinfo, valid_hooks, entry1))
  1462. goto free_newinfo;
  1463. ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
  1464. name);
  1465. if (ret)
  1466. goto free_newinfo;
  1467. /* And one copy for every other CPU */
  1468. for_each_possible_cpu(i)
  1469. if (newinfo->entries[i] && newinfo->entries[i] != entry1)
  1470. memcpy(newinfo->entries[i], entry1, newinfo->size);
  1471. *pinfo = newinfo;
  1472. *pentry0 = entry1;
  1473. xt_free_table_info(info);
  1474. return 0;
  1475. free_newinfo:
  1476. xt_free_table_info(newinfo);
  1477. out:
  1478. IPT_ENTRY_ITERATE(entry0, total_size, cleanup_entry, &j);
  1479. return ret;
  1480. out_unlock:
  1481. compat_flush_offsets();
  1482. xt_compat_unlock(AF_INET);
  1483. goto out;
  1484. }
  1485. static int
  1486. compat_do_replace(void __user *user, unsigned int len)
  1487. {
  1488. int ret;
  1489. struct compat_ipt_replace tmp;
  1490. struct xt_table_info *newinfo;
  1491. void *loc_cpu_entry;
  1492. if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
  1493. return -EFAULT;
  1494. /* Hack: Causes ipchains to give correct error msg --RR */
  1495. if (len != sizeof(tmp) + tmp.size)
  1496. return -ENOPROTOOPT;
  1497. /* overflow check */
  1498. if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
  1499. SMP_CACHE_BYTES)
  1500. return -ENOMEM;
  1501. if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
  1502. return -ENOMEM;
  1503. newinfo = xt_alloc_table_info(tmp.size);
  1504. if (!newinfo)
  1505. return -ENOMEM;
  1506. /* choose the copy that is our node/cpu */
  1507. loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
  1508. if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
  1509. tmp.size) != 0) {
  1510. ret = -EFAULT;
  1511. goto free_newinfo;
  1512. }
  1513. ret = translate_compat_table(tmp.name, tmp.valid_hooks,
  1514. &newinfo, &loc_cpu_entry, tmp.size,
  1515. tmp.num_entries, tmp.hook_entry, tmp.underflow);
  1516. if (ret != 0)
  1517. goto free_newinfo;
  1518. duprintf("compat_do_replace: Translated table\n");
  1519. ret = __do_replace(tmp.name, tmp.valid_hooks,
  1520. newinfo, tmp.num_counters,
  1521. compat_ptr(tmp.counters));
  1522. if (ret)
  1523. goto free_newinfo_untrans;
  1524. return 0;
  1525. free_newinfo_untrans:
  1526. IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
  1527. free_newinfo:
  1528. xt_free_table_info(newinfo);
  1529. return ret;
  1530. }
  1531. static int
  1532. compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
  1533. unsigned int len)
  1534. {
  1535. int ret;
  1536. if (!capable(CAP_NET_ADMIN))
  1537. return -EPERM;
  1538. switch (cmd) {
  1539. case IPT_SO_SET_REPLACE:
  1540. ret = compat_do_replace(user, len);
  1541. break;
  1542. case IPT_SO_SET_ADD_COUNTERS:
  1543. ret = do_add_counters(user, len, 1);
  1544. break;
  1545. default:
  1546. duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
  1547. ret = -EINVAL;
  1548. }
  1549. return ret;
  1550. }
  1551. struct compat_ipt_get_entries
  1552. {
  1553. char name[IPT_TABLE_MAXNAMELEN];
  1554. compat_uint_t size;
  1555. struct compat_ipt_entry entrytable[0];
  1556. };
  1557. static int compat_copy_entries_to_user(unsigned int total_size,
  1558. struct xt_table *table, void __user *userptr)
  1559. {
  1560. unsigned int off, num;
  1561. struct compat_ipt_entry e;
  1562. struct xt_counters *counters;
  1563. struct xt_table_info *private = table->private;
  1564. void __user *pos;
  1565. unsigned int size;
  1566. int ret = 0;
  1567. void *loc_cpu_entry;
  1568. counters = alloc_counters(table);
  1569. if (IS_ERR(counters))
  1570. return PTR_ERR(counters);
  1571. /* choose the copy that is on our node/cpu, ...
  1572. * This choice is lazy (because current thread is
  1573. * allowed to migrate to another cpu)
  1574. */
  1575. loc_cpu_entry = private->entries[raw_smp_processor_id()];
  1576. pos = userptr;
  1577. size = total_size;
  1578. ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
  1579. compat_copy_entry_to_user, &pos, &size);
  1580. if (ret)
  1581. goto free_counters;
  1582. /* ... then go back and fix counters and names */
  1583. for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
  1584. unsigned int i;
  1585. struct ipt_entry_match m;
  1586. struct ipt_entry_target t;
  1587. ret = -EFAULT;
  1588. if (copy_from_user(&e, userptr + off,
  1589. sizeof(struct compat_ipt_entry)))
  1590. goto free_counters;
  1591. if (copy_to_user(userptr + off +
  1592. offsetof(struct compat_ipt_entry, counters),
  1593. &counters[num], sizeof(counters[num])))
  1594. goto free_counters;
  1595. for (i = sizeof(struct compat_ipt_entry);
  1596. i < e.target_offset; i += m.u.match_size) {
  1597. if (copy_from_user(&m, userptr + off + i,
  1598. sizeof(struct ipt_entry_match)))
  1599. goto free_counters;
  1600. if (copy_to_user(userptr + off + i +
  1601. offsetof(struct ipt_entry_match, u.user.name),
  1602. m.u.kernel.match->name,
  1603. strlen(m.u.kernel.match->name) + 1))
  1604. goto free_counters;
  1605. }
  1606. if (copy_from_user(&t, userptr + off + e.target_offset,
  1607. sizeof(struct ipt_entry_target)))
  1608. goto free_counters;
  1609. if (copy_to_user(userptr + off + e.target_offset +
  1610. offsetof(struct ipt_entry_target, u.user.name),
  1611. t.u.kernel.target->name,
  1612. strlen(t.u.kernel.target->name) + 1))
  1613. goto free_counters;
  1614. }
  1615. ret = 0;
  1616. free_counters:
  1617. vfree(counters);
  1618. return ret;
  1619. }
  1620. static int
  1621. compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
  1622. {
  1623. int ret;
  1624. struct compat_ipt_get_entries get;
  1625. struct xt_table *t;
  1626. if (*len < sizeof(get)) {
  1627. duprintf("compat_get_entries: %u < %u\n",
  1628. *len, (unsigned int)sizeof(get));
  1629. return -EINVAL;
  1630. }
  1631. if (copy_from_user(&get, uptr, sizeof(get)) != 0)
  1632. return -EFAULT;
  1633. if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
  1634. duprintf("compat_get_entries: %u != %u\n", *len,
  1635. (unsigned int)(sizeof(struct compat_ipt_get_entries) +
  1636. get.size));
  1637. return -EINVAL;
  1638. }
  1639. xt_compat_lock(AF_INET);
  1640. t = xt_find_table_lock(AF_INET, get.name);
  1641. if (t && !IS_ERR(t)) {
  1642. struct xt_table_info *private = t->private;
  1643. struct xt_table_info info;
  1644. duprintf("t->private->number = %u\n",
  1645. private->number);
  1646. ret = compat_table_info(private, &info);
  1647. if (!ret && get.size == info.size) {
  1648. ret = compat_copy_entries_to_user(private->size,
  1649. t, uptr->entrytable);
  1650. } else if (!ret) {
  1651. duprintf("compat_get_entries: I've got %u not %u!\n",
  1652. private->size,
  1653. get.size);
  1654. ret = -EINVAL;
  1655. }
  1656. compat_flush_offsets();
  1657. module_put(t->me);
  1658. xt_table_unlock(t);
  1659. } else
  1660. ret = t ? PTR_ERR(t) : -ENOENT;
  1661. xt_compat_unlock(AF_INET);
  1662. return ret;
  1663. }
  1664. static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
  1665. static int
  1666. compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
  1667. {
  1668. int ret;
  1669. if (!capable(CAP_NET_ADMIN))
  1670. return -EPERM;
  1671. switch (cmd) {
  1672. case IPT_SO_GET_INFO:
  1673. ret = get_info(user, len, 1);
  1674. break;
  1675. case IPT_SO_GET_ENTRIES:
  1676. ret = compat_get_entries(user, len);
  1677. break;
  1678. default:
  1679. ret = do_ipt_get_ctl(sk, cmd, user, len);
  1680. }
  1681. return ret;
  1682. }
  1683. #endif
  1684. static int
  1685. do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
  1686. {
  1687. int ret;
  1688. if (!capable(CAP_NET_ADMIN))
  1689. return -EPERM;
  1690. switch (cmd) {
  1691. case IPT_SO_SET_REPLACE:
  1692. ret = do_replace(user, len);
  1693. break;
  1694. case IPT_SO_SET_ADD_COUNTERS:
  1695. ret = do_add_counters(user, len, 0);
  1696. break;
  1697. default:
  1698. duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
  1699. ret = -EINVAL;
  1700. }
  1701. return ret;
  1702. }
  1703. static int
  1704. do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
  1705. {
  1706. int ret;
  1707. if (!capable(CAP_NET_ADMIN))
  1708. return -EPERM;
  1709. switch (cmd) {
  1710. case IPT_SO_GET_INFO:
  1711. ret = get_info(user, len, 0);
  1712. break;
  1713. case IPT_SO_GET_ENTRIES:
  1714. ret = get_entries(user, len);
  1715. break;
  1716. case IPT_SO_GET_REVISION_MATCH:
  1717. case IPT_SO_GET_REVISION_TARGET: {
  1718. struct ipt_get_revision rev;
  1719. int target;
  1720. if (*len != sizeof(rev)) {
  1721. ret = -EINVAL;
  1722. break;
  1723. }
  1724. if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
  1725. ret = -EFAULT;
  1726. break;
  1727. }
  1728. if (cmd == IPT_SO_GET_REVISION_TARGET)
  1729. target = 1;
  1730. else
  1731. target = 0;
  1732. try_then_request_module(xt_find_revision(AF_INET, rev.name,
  1733. rev.revision,
  1734. target, &ret),
  1735. "ipt_%s", rev.name);
  1736. break;
  1737. }
  1738. default:
  1739. duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
  1740. ret = -EINVAL;
  1741. }
  1742. return ret;
  1743. }
  1744. int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
  1745. {
  1746. int ret;
  1747. struct xt_table_info *newinfo;
  1748. static struct xt_table_info bootstrap
  1749. = { 0, 0, 0, { 0 }, { 0 }, { } };
  1750. void *loc_cpu_entry;
  1751. newinfo = xt_alloc_table_info(repl->size);
  1752. if (!newinfo)
  1753. return -ENOMEM;
  1754. /* choose the copy on our node/cpu
  1755. * but dont care of preemption
  1756. */
  1757. loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
  1758. memcpy(loc_cpu_entry, repl->entries, repl->size);
  1759. ret = translate_table(table->name, table->valid_hooks,
  1760. newinfo, loc_cpu_entry, repl->size,
  1761. repl->num_entries,
  1762. repl->hook_entry,
  1763. repl->underflow);
  1764. if (ret != 0) {
  1765. xt_free_table_info(newinfo);
  1766. return ret;
  1767. }
  1768. ret = xt_register_table(table, &bootstrap, newinfo);
  1769. if (ret != 0) {
  1770. xt_free_table_info(newinfo);
  1771. return ret;
  1772. }
  1773. return 0;
  1774. }
  1775. void ipt_unregister_table(struct xt_table *table)
  1776. {
  1777. struct xt_table_info *private;
  1778. void *loc_cpu_entry;
  1779. private = xt_unregister_table(table);
  1780. /* Decrease module usage counts and free resources */
  1781. loc_cpu_entry = private->entries[raw_smp_processor_id()];
  1782. IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
  1783. xt_free_table_info(private);
  1784. }
  1785. /* Returns 1 if the type and code is matched by the range, 0 otherwise */
  1786. static inline int
  1787. icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
  1788. u_int8_t type, u_int8_t code,
  1789. int invert)
  1790. {
  1791. return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
  1792. ^ invert;
  1793. }
  1794. static int
  1795. icmp_match(const struct sk_buff *skb,
  1796. const struct net_device *in,
  1797. const struct net_device *out,
  1798. const struct xt_match *match,
  1799. const void *matchinfo,
  1800. int offset,
  1801. unsigned int protoff,
  1802. int *hotdrop)
  1803. {
  1804. struct icmphdr _icmph, *ic;
  1805. const struct ipt_icmp *icmpinfo = matchinfo;
  1806. /* Must not be a fragment. */
  1807. if (offset)
  1808. return 0;
  1809. ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
  1810. if (ic == NULL) {
  1811. /* We've been asked to examine this packet, and we
  1812. * can't. Hence, no choice but to drop.
  1813. */
  1814. duprintf("Dropping evil ICMP tinygram.\n");
  1815. *hotdrop = 1;
  1816. return 0;
  1817. }
  1818. return icmp_type_code_match(icmpinfo->type,
  1819. icmpinfo->code[0],
  1820. icmpinfo->code[1],
  1821. ic->type, ic->code,
  1822. !!(icmpinfo->invflags&IPT_ICMP_INV));
  1823. }
  1824. /* Called when user tries to insert an entry of this type. */
  1825. static int
  1826. icmp_checkentry(const char *tablename,
  1827. const void *info,
  1828. const struct xt_match *match,
  1829. void *matchinfo,
  1830. unsigned int hook_mask)
  1831. {
  1832. const struct ipt_icmp *icmpinfo = matchinfo;
  1833. /* Must specify no unknown invflags */
  1834. return !(icmpinfo->invflags & ~IPT_ICMP_INV);
  1835. }
  1836. /* The built-in targets: standard (NULL) and error. */
  1837. static struct xt_target ipt_standard_target = {
  1838. .name = IPT_STANDARD_TARGET,
  1839. .targetsize = sizeof(int),
  1840. .family = AF_INET,
  1841. #ifdef CONFIG_COMPAT
  1842. .compatsize = sizeof(compat_int_t),
  1843. .compat_from_user = compat_standard_from_user,
  1844. .compat_to_user = compat_standard_to_user,
  1845. #endif
  1846. };
  1847. static struct xt_target ipt_error_target = {
  1848. .name = IPT_ERROR_TARGET,
  1849. .target = ipt_error,
  1850. .targetsize = IPT_FUNCTION_MAXNAMELEN,
  1851. .family = AF_INET,
  1852. };
  1853. static struct nf_sockopt_ops ipt_sockopts = {
  1854. .pf = PF_INET,
  1855. .set_optmin = IPT_BASE_CTL,
  1856. .set_optmax = IPT_SO_SET_MAX+1,
  1857. .set = do_ipt_set_ctl,
  1858. #ifdef CONFIG_COMPAT
  1859. .compat_set = compat_do_ipt_set_ctl,
  1860. #endif
  1861. .get_optmin = IPT_BASE_CTL,
  1862. .get_optmax = IPT_SO_GET_MAX+1,
  1863. .get = do_ipt_get_ctl,
  1864. #ifdef CONFIG_COMPAT
  1865. .compat_get = compat_do_ipt_get_ctl,
  1866. #endif
  1867. };
  1868. static struct xt_match icmp_matchstruct = {
  1869. .name = "icmp",
  1870. .match = icmp_match,
  1871. .matchsize = sizeof(struct ipt_icmp),
  1872. .proto = IPPROTO_ICMP,
  1873. .family = AF_INET,
  1874. .checkentry = icmp_checkentry,
  1875. };
  1876. static int __init ip_tables_init(void)
  1877. {
  1878. int ret;
  1879. ret = xt_proto_init(AF_INET);
  1880. if (ret < 0)
  1881. goto err1;
  1882. /* Noone else will be downing sem now, so we won't sleep */
  1883. ret = xt_register_target(&ipt_standard_target);
  1884. if (ret < 0)
  1885. goto err2;
  1886. ret = xt_register_target(&ipt_error_target);
  1887. if (ret < 0)
  1888. goto err3;
  1889. ret = xt_register_match(&icmp_matchstruct);
  1890. if (ret < 0)
  1891. goto err4;
  1892. /* Register setsockopt */
  1893. ret = nf_register_sockopt(&ipt_sockopts);
  1894. if (ret < 0)
  1895. goto err5;
  1896. printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
  1897. return 0;
  1898. err5:
  1899. xt_unregister_match(&icmp_matchstruct);
  1900. err4:
  1901. xt_unregister_target(&ipt_error_target);
  1902. err3:
  1903. xt_unregister_target(&ipt_standard_target);
  1904. err2:
  1905. xt_proto_fini(AF_INET);
  1906. err1:
  1907. return ret;
  1908. }
  1909. static void __exit ip_tables_fini(void)
  1910. {
  1911. nf_unregister_sockopt(&ipt_sockopts);
  1912. xt_unregister_match(&icmp_matchstruct);
  1913. xt_unregister_target(&ipt_error_target);
  1914. xt_unregister_target(&ipt_standard_target);
  1915. xt_proto_fini(AF_INET);
  1916. }
  1917. EXPORT_SYMBOL(ipt_register_table);
  1918. EXPORT_SYMBOL(ipt_unregister_table);
  1919. EXPORT_SYMBOL(ipt_do_table);
  1920. module_init(ip_tables_init);
  1921. module_exit(ip_tables_fini);