xt_hashlimit.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. /* iptables match extension to limit the number of packets per second
  2. * seperately for each hashbucket (sourceip/sourceport/dstip/dstport)
  3. *
  4. * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
  5. *
  6. * $Id: xt_hashlimit.c,v 1.1.1.1 2007/06/12 07:27:14 eyryu Exp $
  7. *
  8. * Development of this code was funded by Astaro AG, http://www.astaro.com/
  9. */
  10. #include <linux/module.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/random.h>
  13. #include <linux/jhash.h>
  14. #include <linux/slab.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/list.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/mm.h>
  21. #include <linux/in.h>
  22. #include <linux/ip.h>
  23. #include <linux/ipv6.h>
  24. #include <linux/netfilter/x_tables.h>
  25. #include <linux/netfilter_ipv4/ip_tables.h>
  26. #include <linux/netfilter_ipv6/ip6_tables.h>
  27. #include <linux/netfilter/xt_hashlimit.h>
  28. #include <linux/mutex.h>
  29. MODULE_LICENSE("GPL");
  30. MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  31. MODULE_DESCRIPTION("iptables match for limiting per hash-bucket");
  32. MODULE_ALIAS("ipt_hashlimit");
  33. MODULE_ALIAS("ip6t_hashlimit");
  34. /* need to declare this at the top */
  35. static struct proc_dir_entry *hashlimit_procdir4;
  36. static struct proc_dir_entry *hashlimit_procdir6;
  37. static const struct file_operations dl_file_ops;
  38. /* hash table crap */
  39. struct dsthash_dst {
  40. union {
  41. struct {
  42. __be32 src;
  43. __be32 dst;
  44. } ip;
  45. struct {
  46. __be32 src[4];
  47. __be32 dst[4];
  48. } ip6;
  49. } addr;
  50. __be16 src_port;
  51. __be16 dst_port;
  52. };
  53. struct dsthash_ent {
  54. /* static / read-only parts in the beginning */
  55. struct hlist_node node;
  56. struct dsthash_dst dst;
  57. /* modified structure members in the end */
  58. unsigned long expires; /* precalculated expiry time */
  59. struct {
  60. unsigned long prev; /* last modification */
  61. u_int32_t credit;
  62. u_int32_t credit_cap, cost;
  63. } rateinfo;
  64. };
  65. struct xt_hashlimit_htable {
  66. struct hlist_node node; /* global list of all htables */
  67. atomic_t use;
  68. int family;
  69. struct hashlimit_cfg cfg; /* config */
  70. /* used internally */
  71. spinlock_t lock; /* lock for list_head */
  72. u_int32_t rnd; /* random seed for hash */
  73. int rnd_initialized;
  74. unsigned int count; /* number entries in table */
  75. struct timer_list timer; /* timer for gc */
  76. /* seq_file stuff */
  77. struct proc_dir_entry *pde;
  78. struct hlist_head hash[0]; /* hashtable itself */
  79. };
  80. static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
  81. static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */
  82. static HLIST_HEAD(hashlimit_htables);
  83. static struct kmem_cache *hashlimit_cachep __read_mostly;
  84. static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
  85. {
  86. return !memcmp(&ent->dst, b, sizeof(ent->dst));
  87. }
  88. static u_int32_t
  89. hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
  90. {
  91. return jhash(dst, sizeof(*dst), ht->rnd) % ht->cfg.size;
  92. }
  93. static struct dsthash_ent *
  94. dsthash_find(const struct xt_hashlimit_htable *ht, struct dsthash_dst *dst)
  95. {
  96. struct dsthash_ent *ent;
  97. struct hlist_node *pos;
  98. u_int32_t hash = hash_dst(ht, dst);
  99. if (!hlist_empty(&ht->hash[hash])) {
  100. hlist_for_each_entry(ent, pos, &ht->hash[hash], node)
  101. if (dst_cmp(ent, dst))
  102. return ent;
  103. }
  104. return NULL;
  105. }
  106. /* allocate dsthash_ent, initialize dst, put in htable and lock it */
  107. static struct dsthash_ent *
  108. dsthash_alloc_init(struct xt_hashlimit_htable *ht, struct dsthash_dst *dst)
  109. {
  110. struct dsthash_ent *ent;
  111. /* initialize hash with random val at the time we allocate
  112. * the first hashtable entry */
  113. if (!ht->rnd_initialized) {
  114. get_random_bytes(&ht->rnd, 4);
  115. ht->rnd_initialized = 1;
  116. }
  117. if (ht->cfg.max && ht->count >= ht->cfg.max) {
  118. /* FIXME: do something. question is what.. */
  119. if (net_ratelimit())
  120. printk(KERN_WARNING
  121. "xt_hashlimit: max count of %u reached\n",
  122. ht->cfg.max);
  123. return NULL;
  124. }
  125. ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
  126. if (!ent) {
  127. if (net_ratelimit())
  128. printk(KERN_ERR
  129. "xt_hashlimit: can't allocate dsthash_ent\n");
  130. return NULL;
  131. }
  132. memcpy(&ent->dst, dst, sizeof(ent->dst));
  133. hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]);
  134. ht->count++;
  135. return ent;
  136. }
  137. static inline void
  138. dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
  139. {
  140. hlist_del(&ent->node);
  141. kmem_cache_free(hashlimit_cachep, ent);
  142. ht->count--;
  143. }
  144. static void htable_gc(unsigned long htlong);
  145. static int htable_create(struct xt_hashlimit_info *minfo, int family)
  146. {
  147. struct xt_hashlimit_htable *hinfo;
  148. unsigned int size;
  149. unsigned int i;
  150. if (minfo->cfg.size)
  151. size = minfo->cfg.size;
  152. else {
  153. size = ((num_physpages << PAGE_SHIFT) / 16384) /
  154. sizeof(struct list_head);
  155. if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
  156. size = 8192;
  157. if (size < 16)
  158. size = 16;
  159. }
  160. /* FIXME: don't use vmalloc() here or anywhere else -HW */
  161. hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
  162. sizeof(struct list_head) * size);
  163. if (!hinfo) {
  164. printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
  165. return -1;
  166. }
  167. minfo->hinfo = hinfo;
  168. /* copy match config into hashtable config */
  169. memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
  170. hinfo->cfg.size = size;
  171. if (!hinfo->cfg.max)
  172. hinfo->cfg.max = 8 * hinfo->cfg.size;
  173. else if (hinfo->cfg.max < hinfo->cfg.size)
  174. hinfo->cfg.max = hinfo->cfg.size;
  175. for (i = 0; i < hinfo->cfg.size; i++)
  176. INIT_HLIST_HEAD(&hinfo->hash[i]);
  177. atomic_set(&hinfo->use, 1);
  178. hinfo->count = 0;
  179. hinfo->family = family;
  180. hinfo->rnd_initialized = 0;
  181. spin_lock_init(&hinfo->lock);
  182. hinfo->pde = create_proc_entry(minfo->name, 0,
  183. family == AF_INET ? hashlimit_procdir4 :
  184. hashlimit_procdir6);
  185. if (!hinfo->pde) {
  186. vfree(hinfo);
  187. return -1;
  188. }
  189. hinfo->pde->proc_fops = &dl_file_ops;
  190. hinfo->pde->data = hinfo;
  191. init_timer(&hinfo->timer);
  192. hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
  193. hinfo->timer.data = (unsigned long )hinfo;
  194. hinfo->timer.function = htable_gc;
  195. add_timer(&hinfo->timer);
  196. spin_lock_bh(&hashlimit_lock);
  197. hlist_add_head(&hinfo->node, &hashlimit_htables);
  198. spin_unlock_bh(&hashlimit_lock);
  199. return 0;
  200. }
  201. static int select_all(struct xt_hashlimit_htable *ht, struct dsthash_ent *he)
  202. {
  203. return 1;
  204. }
  205. static int select_gc(struct xt_hashlimit_htable *ht, struct dsthash_ent *he)
  206. {
  207. return (jiffies >= he->expires);
  208. }
  209. static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
  210. int (*select)(struct xt_hashlimit_htable *ht,
  211. struct dsthash_ent *he))
  212. {
  213. unsigned int i;
  214. /* lock hash table and iterate over it */
  215. spin_lock_bh(&ht->lock);
  216. for (i = 0; i < ht->cfg.size; i++) {
  217. struct dsthash_ent *dh;
  218. struct hlist_node *pos, *n;
  219. hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) {
  220. if ((*select)(ht, dh))
  221. dsthash_free(ht, dh);
  222. }
  223. }
  224. spin_unlock_bh(&ht->lock);
  225. }
  226. /* hash table garbage collector, run by timer */
  227. static void htable_gc(unsigned long htlong)
  228. {
  229. struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
  230. htable_selective_cleanup(ht, select_gc);
  231. /* re-add the timer accordingly */
  232. ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
  233. add_timer(&ht->timer);
  234. }
  235. static void htable_destroy(struct xt_hashlimit_htable *hinfo)
  236. {
  237. /* remove timer, if it is pending */
  238. if (timer_pending(&hinfo->timer))
  239. del_timer(&hinfo->timer);
  240. /* remove proc entry */
  241. remove_proc_entry(hinfo->pde->name,
  242. hinfo->family == AF_INET ? hashlimit_procdir4 :
  243. hashlimit_procdir6);
  244. htable_selective_cleanup(hinfo, select_all);
  245. vfree(hinfo);
  246. }
  247. static struct xt_hashlimit_htable *htable_find_get(char *name, int family)
  248. {
  249. struct xt_hashlimit_htable *hinfo;
  250. struct hlist_node *pos;
  251. spin_lock_bh(&hashlimit_lock);
  252. hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
  253. if (!strcmp(name, hinfo->pde->name) &&
  254. hinfo->family == family) {
  255. atomic_inc(&hinfo->use);
  256. spin_unlock_bh(&hashlimit_lock);
  257. return hinfo;
  258. }
  259. }
  260. spin_unlock_bh(&hashlimit_lock);
  261. return NULL;
  262. }
  263. static void htable_put(struct xt_hashlimit_htable *hinfo)
  264. {
  265. if (atomic_dec_and_test(&hinfo->use)) {
  266. spin_lock_bh(&hashlimit_lock);
  267. hlist_del(&hinfo->node);
  268. spin_unlock_bh(&hashlimit_lock);
  269. htable_destroy(hinfo);
  270. }
  271. }
  272. /* The algorithm used is the Simple Token Bucket Filter (TBF)
  273. * see net/sched/sch_tbf.c in the linux source tree
  274. */
  275. /* Rusty: This is my (non-mathematically-inclined) understanding of
  276. this algorithm. The `average rate' in jiffies becomes your initial
  277. amount of credit `credit' and the most credit you can ever have
  278. `credit_cap'. The `peak rate' becomes the cost of passing the
  279. test, `cost'.
  280. `prev' tracks the last packet hit: you gain one credit per jiffy.
  281. If you get credit balance more than this, the extra credit is
  282. discarded. Every time the match passes, you lose `cost' credits;
  283. if you don't have that many, the test fails.
  284. See Alexey's formal explanation in net/sched/sch_tbf.c.
  285. To get the maximum range, we multiply by this factor (ie. you get N
  286. credits per jiffy). We want to allow a rate as low as 1 per day
  287. (slowest userspace tool allows), which means
  288. CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
  289. */
  290. #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
  291. /* Repeated shift and or gives us all 1s, final shift and add 1 gives
  292. * us the power of 2 below the theoretical max, so GCC simply does a
  293. * shift. */
  294. #define _POW2_BELOW2(x) ((x)|((x)>>1))
  295. #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
  296. #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
  297. #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
  298. #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
  299. #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
  300. #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
  301. /* Precision saver. */
  302. static inline u_int32_t
  303. user2credits(u_int32_t user)
  304. {
  305. /* If multiplying would overflow... */
  306. if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
  307. /* Divide first. */
  308. return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
  309. return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
  310. }
  311. static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
  312. {
  313. dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
  314. if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
  315. dh->rateinfo.credit = dh->rateinfo.credit_cap;
  316. dh->rateinfo.prev = now;
  317. }
  318. static int
  319. hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst,
  320. const struct sk_buff *skb, unsigned int protoff)
  321. {
  322. __be16 _ports[2], *ports;
  323. int nexthdr;
  324. memset(dst, 0, sizeof(*dst));
  325. switch (hinfo->family) {
  326. case AF_INET:
  327. if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
  328. dst->addr.ip.dst = skb->nh.iph->daddr;
  329. if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
  330. dst->addr.ip.src = skb->nh.iph->saddr;
  331. if (!(hinfo->cfg.mode &
  332. (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
  333. return 0;
  334. nexthdr = skb->nh.iph->protocol;
  335. break;
  336. #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
  337. case AF_INET6:
  338. if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
  339. memcpy(&dst->addr.ip6.dst, &skb->nh.ipv6h->daddr,
  340. sizeof(dst->addr.ip6.dst));
  341. if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
  342. memcpy(&dst->addr.ip6.src, &skb->nh.ipv6h->saddr,
  343. sizeof(dst->addr.ip6.src));
  344. if (!(hinfo->cfg.mode &
  345. (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
  346. return 0;
  347. nexthdr = ipv6_find_hdr(skb, &protoff, -1, NULL);
  348. if (nexthdr < 0)
  349. return -1;
  350. break;
  351. #endif
  352. default:
  353. BUG();
  354. return 0;
  355. }
  356. switch (nexthdr) {
  357. case IPPROTO_TCP:
  358. case IPPROTO_UDP:
  359. case IPPROTO_UDPLITE:
  360. case IPPROTO_SCTP:
  361. case IPPROTO_DCCP:
  362. ports = skb_header_pointer(skb, protoff, sizeof(_ports),
  363. &_ports);
  364. break;
  365. default:
  366. _ports[0] = _ports[1] = 0;
  367. ports = _ports;
  368. break;
  369. }
  370. if (!ports)
  371. return -1;
  372. if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT)
  373. dst->src_port = ports[0];
  374. if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT)
  375. dst->dst_port = ports[1];
  376. return 0;
  377. }
  378. static int
  379. hashlimit_match(const struct sk_buff *skb,
  380. const struct net_device *in,
  381. const struct net_device *out,
  382. const struct xt_match *match,
  383. const void *matchinfo,
  384. int offset,
  385. unsigned int protoff,
  386. int *hotdrop)
  387. {
  388. struct xt_hashlimit_info *r =
  389. ((struct xt_hashlimit_info *)matchinfo)->u.master;
  390. struct xt_hashlimit_htable *hinfo = r->hinfo;
  391. unsigned long now = jiffies;
  392. struct dsthash_ent *dh;
  393. struct dsthash_dst dst;
  394. if (hashlimit_init_dst(hinfo, &dst, skb, protoff) < 0)
  395. goto hotdrop;
  396. spin_lock_bh(&hinfo->lock);
  397. dh = dsthash_find(hinfo, &dst);
  398. if (!dh) {
  399. dh = dsthash_alloc_init(hinfo, &dst);
  400. if (!dh) {
  401. spin_unlock_bh(&hinfo->lock);
  402. goto hotdrop;
  403. }
  404. dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
  405. dh->rateinfo.prev = jiffies;
  406. dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
  407. hinfo->cfg.burst);
  408. dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
  409. hinfo->cfg.burst);
  410. dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
  411. } else {
  412. /* update expiration timeout */
  413. dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
  414. rateinfo_recalc(dh, now);
  415. }
  416. if (dh->rateinfo.credit >= dh->rateinfo.cost) {
  417. /* We're underlimit. */
  418. dh->rateinfo.credit -= dh->rateinfo.cost;
  419. spin_unlock_bh(&hinfo->lock);
  420. return 1;
  421. }
  422. spin_unlock_bh(&hinfo->lock);
  423. /* default case: we're overlimit, thus don't match */
  424. return 0;
  425. hotdrop:
  426. *hotdrop = 1;
  427. return 0;
  428. }
  429. static int
  430. hashlimit_checkentry(const char *tablename,
  431. const void *inf,
  432. const struct xt_match *match,
  433. void *matchinfo,
  434. unsigned int hook_mask)
  435. {
  436. struct xt_hashlimit_info *r = matchinfo;
  437. /* Check for overflow. */
  438. if (r->cfg.burst == 0 ||
  439. user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) {
  440. printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
  441. r->cfg.avg, r->cfg.burst);
  442. return 0;
  443. }
  444. if (r->cfg.mode == 0 ||
  445. r->cfg.mode > (XT_HASHLIMIT_HASH_DPT |
  446. XT_HASHLIMIT_HASH_DIP |
  447. XT_HASHLIMIT_HASH_SIP |
  448. XT_HASHLIMIT_HASH_SPT))
  449. return 0;
  450. if (!r->cfg.gc_interval)
  451. return 0;
  452. if (!r->cfg.expire)
  453. return 0;
  454. if (r->name[sizeof(r->name) - 1] != '\0')
  455. return 0;
  456. /* This is the best we've got: We cannot release and re-grab lock,
  457. * since checkentry() is called before x_tables.c grabs xt_mutex.
  458. * We also cannot grab the hashtable spinlock, since htable_create will
  459. * call vmalloc, and that can sleep. And we cannot just re-search
  460. * the list of htable's in htable_create(), since then we would
  461. * create duplicate proc files. -HW */
  462. mutex_lock(&hlimit_mutex);
  463. r->hinfo = htable_find_get(r->name, match->family);
  464. if (!r->hinfo && htable_create(r, match->family) != 0) {
  465. mutex_unlock(&hlimit_mutex);
  466. return 0;
  467. }
  468. mutex_unlock(&hlimit_mutex);
  469. /* Ugly hack: For SMP, we only want to use one set */
  470. r->u.master = r;
  471. return 1;
  472. }
  473. static void
  474. hashlimit_destroy(const struct xt_match *match, void *matchinfo)
  475. {
  476. struct xt_hashlimit_info *r = matchinfo;
  477. htable_put(r->hinfo);
  478. }
  479. #ifdef CONFIG_COMPAT
  480. struct compat_xt_hashlimit_info {
  481. char name[IFNAMSIZ];
  482. struct hashlimit_cfg cfg;
  483. compat_uptr_t hinfo;
  484. compat_uptr_t master;
  485. };
  486. static void compat_from_user(void *dst, void *src)
  487. {
  488. int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
  489. memcpy(dst, src, off);
  490. memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
  491. }
  492. static int compat_to_user(void __user *dst, void *src)
  493. {
  494. int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
  495. return copy_to_user(dst, src, off) ? -EFAULT : 0;
  496. }
  497. #endif
  498. static struct xt_match xt_hashlimit[] = {
  499. {
  500. .name = "hashlimit",
  501. .family = AF_INET,
  502. .match = hashlimit_match,
  503. .matchsize = sizeof(struct xt_hashlimit_info),
  504. #ifdef CONFIG_COMPAT
  505. .compatsize = sizeof(struct compat_xt_hashlimit_info),
  506. .compat_from_user = compat_from_user,
  507. .compat_to_user = compat_to_user,
  508. #endif
  509. .checkentry = hashlimit_checkentry,
  510. .destroy = hashlimit_destroy,
  511. .me = THIS_MODULE
  512. },
  513. {
  514. .name = "hashlimit",
  515. .family = AF_INET6,
  516. .match = hashlimit_match,
  517. .matchsize = sizeof(struct xt_hashlimit_info),
  518. #ifdef CONFIG_COMPAT
  519. .compatsize = sizeof(struct compat_xt_hashlimit_info),
  520. .compat_from_user = compat_from_user,
  521. .compat_to_user = compat_to_user,
  522. #endif
  523. .checkentry = hashlimit_checkentry,
  524. .destroy = hashlimit_destroy,
  525. .me = THIS_MODULE
  526. },
  527. };
  528. /* PROC stuff */
  529. static void *dl_seq_start(struct seq_file *s, loff_t *pos)
  530. {
  531. struct proc_dir_entry *pde = s->private;
  532. struct xt_hashlimit_htable *htable = pde->data;
  533. unsigned int *bucket;
  534. spin_lock_bh(&htable->lock);
  535. if (*pos >= htable->cfg.size)
  536. return NULL;
  537. bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
  538. if (!bucket)
  539. return ERR_PTR(-ENOMEM);
  540. *bucket = *pos;
  541. return bucket;
  542. }
  543. static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
  544. {
  545. struct proc_dir_entry *pde = s->private;
  546. struct xt_hashlimit_htable *htable = pde->data;
  547. unsigned int *bucket = (unsigned int *)v;
  548. *pos = ++(*bucket);
  549. if (*pos >= htable->cfg.size) {
  550. kfree(v);
  551. return NULL;
  552. }
  553. return bucket;
  554. }
  555. static void dl_seq_stop(struct seq_file *s, void *v)
  556. {
  557. struct proc_dir_entry *pde = s->private;
  558. struct xt_hashlimit_htable *htable = pde->data;
  559. unsigned int *bucket = (unsigned int *)v;
  560. kfree(bucket);
  561. spin_unlock_bh(&htable->lock);
  562. }
  563. static int dl_seq_real_show(struct dsthash_ent *ent, int family,
  564. struct seq_file *s)
  565. {
  566. /* recalculate to show accurate numbers */
  567. rateinfo_recalc(ent, jiffies);
  568. switch (family) {
  569. case AF_INET:
  570. return seq_printf(s, "%ld %u.%u.%u.%u:%u->"
  571. "%u.%u.%u.%u:%u %u %u %u\n",
  572. (long)(ent->expires - jiffies)/HZ,
  573. NIPQUAD(ent->dst.addr.ip.src),
  574. ntohs(ent->dst.src_port),
  575. NIPQUAD(ent->dst.addr.ip.dst),
  576. ntohs(ent->dst.dst_port),
  577. ent->rateinfo.credit, ent->rateinfo.credit_cap,
  578. ent->rateinfo.cost);
  579. case AF_INET6:
  580. return seq_printf(s, "%ld " NIP6_FMT ":%u->"
  581. NIP6_FMT ":%u %u %u %u\n",
  582. (long)(ent->expires - jiffies)/HZ,
  583. NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.src),
  584. ntohs(ent->dst.src_port),
  585. NIP6(*(struct in6_addr *)&ent->dst.addr.ip6.dst),
  586. ntohs(ent->dst.dst_port),
  587. ent->rateinfo.credit, ent->rateinfo.credit_cap,
  588. ent->rateinfo.cost);
  589. default:
  590. BUG();
  591. return 0;
  592. }
  593. }
  594. static int dl_seq_show(struct seq_file *s, void *v)
  595. {
  596. struct proc_dir_entry *pde = s->private;
  597. struct xt_hashlimit_htable *htable = pde->data;
  598. unsigned int *bucket = (unsigned int *)v;
  599. struct dsthash_ent *ent;
  600. struct hlist_node *pos;
  601. if (!hlist_empty(&htable->hash[*bucket])) {
  602. hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node)
  603. if (dl_seq_real_show(ent, htable->family, s))
  604. return 1;
  605. }
  606. return 0;
  607. }
  608. static struct seq_operations dl_seq_ops = {
  609. .start = dl_seq_start,
  610. .next = dl_seq_next,
  611. .stop = dl_seq_stop,
  612. .show = dl_seq_show
  613. };
  614. static int dl_proc_open(struct inode *inode, struct file *file)
  615. {
  616. int ret = seq_open(file, &dl_seq_ops);
  617. if (!ret) {
  618. struct seq_file *sf = file->private_data;
  619. sf->private = PDE(inode);
  620. }
  621. return ret;
  622. }
  623. static const struct file_operations dl_file_ops = {
  624. .owner = THIS_MODULE,
  625. .open = dl_proc_open,
  626. .read = seq_read,
  627. .llseek = seq_lseek,
  628. .release = seq_release
  629. };
  630. static int __init xt_hashlimit_init(void)
  631. {
  632. int err;
  633. err = xt_register_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit));
  634. if (err < 0)
  635. goto err1;
  636. err = -ENOMEM;
  637. hashlimit_cachep = kmem_cache_create("xt_hashlimit",
  638. sizeof(struct dsthash_ent), 0, 0,
  639. NULL, NULL);
  640. if (!hashlimit_cachep) {
  641. printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
  642. goto err2;
  643. }
  644. hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", proc_net);
  645. if (!hashlimit_procdir4) {
  646. printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
  647. "entry\n");
  648. goto err3;
  649. }
  650. hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", proc_net);
  651. if (!hashlimit_procdir6) {
  652. printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
  653. "entry\n");
  654. goto err4;
  655. }
  656. return 0;
  657. err4:
  658. remove_proc_entry("ipt_hashlimit", proc_net);
  659. err3:
  660. kmem_cache_destroy(hashlimit_cachep);
  661. err2:
  662. xt_unregister_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit));
  663. err1:
  664. return err;
  665. }
  666. static void __exit xt_hashlimit_fini(void)
  667. {
  668. remove_proc_entry("ipt_hashlimit", proc_net);
  669. remove_proc_entry("ip6t_hashlimit", proc_net);
  670. kmem_cache_destroy(hashlimit_cachep);
  671. xt_unregister_matches(xt_hashlimit, ARRAY_SIZE(xt_hashlimit));
  672. }
  673. module_init(xt_hashlimit_init);
  674. module_exit(xt_hashlimit_fini);