cls_u32.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /*
  2. * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. *
  11. * The filters are packed to hash tables of key nodes
  12. * with a set of 32bit key/mask pairs at every node.
  13. * Nodes reference next level hash tables etc.
  14. *
  15. * This scheme is the best universal classifier I managed to
  16. * invent; it is not super-fast, but it is not slow (provided you
  17. * program it correctly), and general enough. And its relative
  18. * speed grows as the number of rules becomes larger.
  19. *
  20. * It seems that it represents the best middle point between
  21. * speed and manageability both by human and by machine.
  22. *
  23. * It is especially useful for link sharing combined with QoS;
  24. * pure RSVP doesn't need such a general approach and can use
  25. * much simpler (and faster) schemes, sort of cls_rsvp.c.
  26. *
  27. * JHS: We should remove the CONFIG_NET_CLS_IND from here
  28. * eventually when the meta match extension is made available
  29. *
  30. * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
  31. */
  32. #include <asm/uaccess.h>
  33. #include <asm/system.h>
  34. #include <linux/bitops.h>
  35. #include <linux/module.h>
  36. #include <linux/types.h>
  37. #include <linux/kernel.h>
  38. #include <linux/string.h>
  39. #include <linux/mm.h>
  40. #include <linux/socket.h>
  41. #include <linux/sockios.h>
  42. #include <linux/in.h>
  43. #include <linux/errno.h>
  44. #include <linux/interrupt.h>
  45. #include <linux/if_ether.h>
  46. #include <linux/inet.h>
  47. #include <linux/netdevice.h>
  48. #include <linux/etherdevice.h>
  49. #include <linux/notifier.h>
  50. #include <linux/rtnetlink.h>
  51. #include <net/ip.h>
  52. #include <net/route.h>
  53. #include <linux/skbuff.h>
  54. #include <net/sock.h>
  55. #include <net/act_api.h>
  56. #include <net/pkt_cls.h>
  57. struct tc_u_knode
  58. {
  59. struct tc_u_knode *next;
  60. u32 handle;
  61. struct tc_u_hnode *ht_up;
  62. struct tcf_exts exts;
  63. #ifdef CONFIG_NET_CLS_IND
  64. char indev[IFNAMSIZ];
  65. #endif
  66. u8 fshift;
  67. struct tcf_result res;
  68. struct tc_u_hnode *ht_down;
  69. #ifdef CONFIG_CLS_U32_PERF
  70. struct tc_u32_pcnt *pf;
  71. #endif
  72. #ifdef CONFIG_CLS_U32_MARK
  73. struct tc_u32_mark mark;
  74. #endif
  75. struct tc_u32_sel sel;
  76. };
  77. struct tc_u_hnode
  78. {
  79. struct tc_u_hnode *next;
  80. u32 handle;
  81. u32 prio;
  82. struct tc_u_common *tp_c;
  83. int refcnt;
  84. unsigned divisor;
  85. struct tc_u_knode *ht[1];
  86. };
  87. struct tc_u_common
  88. {
  89. struct tc_u_common *next;
  90. struct tc_u_hnode *hlist;
  91. struct Qdisc *q;
  92. int refcnt;
  93. u32 hgenerator;
  94. };
  95. static struct tcf_ext_map u32_ext_map = {
  96. .action = TCA_U32_ACT,
  97. .police = TCA_U32_POLICE
  98. };
  99. static struct tc_u_common *u32_list;
  100. static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
  101. {
  102. unsigned h = (key & sel->hmask)>>fshift;
  103. return h;
  104. }
  105. static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
  106. {
  107. struct {
  108. struct tc_u_knode *knode;
  109. u8 *ptr;
  110. } stack[TC_U32_MAXDEPTH];
  111. struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
  112. u8 *ptr = skb->nh.raw;
  113. struct tc_u_knode *n;
  114. int sdepth = 0;
  115. int off2 = 0;
  116. int sel = 0;
  117. #ifdef CONFIG_CLS_U32_PERF
  118. int j;
  119. #endif
  120. int i, r;
  121. next_ht:
  122. n = ht->ht[sel];
  123. next_knode:
  124. if (n) {
  125. struct tc_u32_key *key = n->sel.keys;
  126. #ifdef CONFIG_CLS_U32_PERF
  127. n->pf->rcnt +=1;
  128. j = 0;
  129. #endif
  130. #ifdef CONFIG_CLS_U32_MARK
  131. if ((skb->mark & n->mark.mask) != n->mark.val) {
  132. n = n->next;
  133. goto next_knode;
  134. } else {
  135. n->mark.success++;
  136. }
  137. #endif
  138. for (i = n->sel.nkeys; i>0; i--, key++) {
  139. if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
  140. n = n->next;
  141. goto next_knode;
  142. }
  143. #ifdef CONFIG_CLS_U32_PERF
  144. n->pf->kcnts[j] +=1;
  145. j++;
  146. #endif
  147. }
  148. if (n->ht_down == NULL) {
  149. check_terminal:
  150. if (n->sel.flags&TC_U32_TERMINAL) {
  151. *res = n->res;
  152. #ifdef CONFIG_NET_CLS_IND
  153. if (!tcf_match_indev(skb, n->indev)) {
  154. n = n->next;
  155. goto next_knode;
  156. }
  157. #endif
  158. #ifdef CONFIG_CLS_U32_PERF
  159. n->pf->rhit +=1;
  160. #endif
  161. r = tcf_exts_exec(skb, &n->exts, res);
  162. if (r < 0) {
  163. n = n->next;
  164. goto next_knode;
  165. }
  166. return r;
  167. }
  168. n = n->next;
  169. goto next_knode;
  170. }
  171. /* PUSH */
  172. if (sdepth >= TC_U32_MAXDEPTH)
  173. goto deadloop;
  174. stack[sdepth].knode = n;
  175. stack[sdepth].ptr = ptr;
  176. sdepth++;
  177. ht = n->ht_down;
  178. sel = 0;
  179. if (ht->divisor)
  180. sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
  181. if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
  182. goto next_ht;
  183. if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
  184. off2 = n->sel.off + 3;
  185. if (n->sel.flags&TC_U32_VAROFFSET)
  186. off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
  187. off2 &= ~3;
  188. }
  189. if (n->sel.flags&TC_U32_EAT) {
  190. ptr += off2;
  191. off2 = 0;
  192. }
  193. if (ptr < skb->tail)
  194. goto next_ht;
  195. }
  196. /* POP */
  197. if (sdepth--) {
  198. n = stack[sdepth].knode;
  199. ht = n->ht_up;
  200. ptr = stack[sdepth].ptr;
  201. goto check_terminal;
  202. }
  203. return -1;
  204. deadloop:
  205. if (net_ratelimit())
  206. printk("cls_u32: dead loop\n");
  207. return -1;
  208. }
  209. static __inline__ struct tc_u_hnode *
  210. u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
  211. {
  212. struct tc_u_hnode *ht;
  213. for (ht = tp_c->hlist; ht; ht = ht->next)
  214. if (ht->handle == handle)
  215. break;
  216. return ht;
  217. }
  218. static __inline__ struct tc_u_knode *
  219. u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
  220. {
  221. unsigned sel;
  222. struct tc_u_knode *n = NULL;
  223. sel = TC_U32_HASH(handle);
  224. if (sel > ht->divisor)
  225. goto out;
  226. for (n = ht->ht[sel]; n; n = n->next)
  227. if (n->handle == handle)
  228. break;
  229. out:
  230. return n;
  231. }
  232. static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
  233. {
  234. struct tc_u_hnode *ht;
  235. struct tc_u_common *tp_c = tp->data;
  236. if (TC_U32_HTID(handle) == TC_U32_ROOT)
  237. ht = tp->root;
  238. else
  239. ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
  240. if (!ht)
  241. return 0;
  242. if (TC_U32_KEY(handle) == 0)
  243. return (unsigned long)ht;
  244. return (unsigned long)u32_lookup_key(ht, handle);
  245. }
  246. static void u32_put(struct tcf_proto *tp, unsigned long f)
  247. {
  248. }
  249. static u32 gen_new_htid(struct tc_u_common *tp_c)
  250. {
  251. int i = 0x800;
  252. do {
  253. if (++tp_c->hgenerator == 0x7FF)
  254. tp_c->hgenerator = 1;
  255. } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
  256. return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
  257. }
  258. static int u32_init(struct tcf_proto *tp)
  259. {
  260. struct tc_u_hnode *root_ht;
  261. struct tc_u_common *tp_c;
  262. for (tp_c = u32_list; tp_c; tp_c = tp_c->next)
  263. if (tp_c->q == tp->q)
  264. break;
  265. root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
  266. if (root_ht == NULL)
  267. return -ENOBUFS;
  268. root_ht->divisor = 0;
  269. root_ht->refcnt++;
  270. root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
  271. root_ht->prio = tp->prio;
  272. if (tp_c == NULL) {
  273. tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
  274. if (tp_c == NULL) {
  275. kfree(root_ht);
  276. return -ENOBUFS;
  277. }
  278. tp_c->q = tp->q;
  279. tp_c->next = u32_list;
  280. u32_list = tp_c;
  281. }
  282. tp_c->refcnt++;
  283. root_ht->next = tp_c->hlist;
  284. tp_c->hlist = root_ht;
  285. root_ht->tp_c = tp_c;
  286. tp->root = root_ht;
  287. tp->data = tp_c;
  288. return 0;
  289. }
  290. static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
  291. {
  292. tcf_unbind_filter(tp, &n->res);
  293. tcf_exts_destroy(tp, &n->exts);
  294. if (n->ht_down)
  295. n->ht_down->refcnt--;
  296. #ifdef CONFIG_CLS_U32_PERF
  297. kfree(n->pf);
  298. #endif
  299. kfree(n);
  300. return 0;
  301. }
  302. static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
  303. {
  304. struct tc_u_knode **kp;
  305. struct tc_u_hnode *ht = key->ht_up;
  306. if (ht) {
  307. for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
  308. if (*kp == key) {
  309. tcf_tree_lock(tp);
  310. *kp = key->next;
  311. tcf_tree_unlock(tp);
  312. u32_destroy_key(tp, key);
  313. return 0;
  314. }
  315. }
  316. }
  317. BUG_TRAP(0);
  318. return 0;
  319. }
  320. static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
  321. {
  322. struct tc_u_knode *n;
  323. unsigned h;
  324. for (h=0; h<=ht->divisor; h++) {
  325. while ((n = ht->ht[h]) != NULL) {
  326. ht->ht[h] = n->next;
  327. u32_destroy_key(tp, n);
  328. }
  329. }
  330. }
  331. static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
  332. {
  333. struct tc_u_common *tp_c = tp->data;
  334. struct tc_u_hnode **hn;
  335. BUG_TRAP(!ht->refcnt);
  336. u32_clear_hnode(tp, ht);
  337. for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
  338. if (*hn == ht) {
  339. *hn = ht->next;
  340. kfree(ht);
  341. return 0;
  342. }
  343. }
  344. BUG_TRAP(0);
  345. return -ENOENT;
  346. }
  347. static void u32_destroy(struct tcf_proto *tp)
  348. {
  349. struct tc_u_common *tp_c = tp->data;
  350. struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
  351. BUG_TRAP(root_ht != NULL);
  352. if (root_ht && --root_ht->refcnt == 0)
  353. u32_destroy_hnode(tp, root_ht);
  354. if (--tp_c->refcnt == 0) {
  355. struct tc_u_hnode *ht;
  356. struct tc_u_common **tp_cp;
  357. for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) {
  358. if (*tp_cp == tp_c) {
  359. *tp_cp = tp_c->next;
  360. break;
  361. }
  362. }
  363. for (ht=tp_c->hlist; ht; ht = ht->next)
  364. u32_clear_hnode(tp, ht);
  365. while ((ht = tp_c->hlist) != NULL) {
  366. tp_c->hlist = ht->next;
  367. BUG_TRAP(ht->refcnt == 0);
  368. kfree(ht);
  369. };
  370. kfree(tp_c);
  371. }
  372. tp->data = NULL;
  373. }
  374. static int u32_delete(struct tcf_proto *tp, unsigned long arg)
  375. {
  376. struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
  377. if (ht == NULL)
  378. return 0;
  379. if (TC_U32_KEY(ht->handle))
  380. return u32_delete_key(tp, (struct tc_u_knode*)ht);
  381. if (tp->root == ht)
  382. return -EINVAL;
  383. if (--ht->refcnt == 0)
  384. u32_destroy_hnode(tp, ht);
  385. return 0;
  386. }
  387. static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
  388. {
  389. struct tc_u_knode *n;
  390. unsigned i = 0x7FF;
  391. for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
  392. if (i < TC_U32_NODE(n->handle))
  393. i = TC_U32_NODE(n->handle);
  394. i++;
  395. return handle|(i>0xFFF ? 0xFFF : i);
  396. }
  397. static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
  398. struct tc_u_hnode *ht,
  399. struct tc_u_knode *n, struct rtattr **tb,
  400. struct rtattr *est)
  401. {
  402. int err;
  403. struct tcf_exts e;
  404. err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
  405. if (err < 0)
  406. return err;
  407. err = -EINVAL;
  408. if (tb[TCA_U32_LINK-1]) {
  409. u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]);
  410. struct tc_u_hnode *ht_down = NULL;
  411. if (TC_U32_KEY(handle))
  412. goto errout;
  413. if (handle) {
  414. ht_down = u32_lookup_ht(ht->tp_c, handle);
  415. if (ht_down == NULL)
  416. goto errout;
  417. ht_down->refcnt++;
  418. }
  419. tcf_tree_lock(tp);
  420. ht_down = xchg(&n->ht_down, ht_down);
  421. tcf_tree_unlock(tp);
  422. if (ht_down)
  423. ht_down->refcnt--;
  424. }
  425. if (tb[TCA_U32_CLASSID-1]) {
  426. n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
  427. tcf_bind_filter(tp, &n->res, base);
  428. }
  429. #ifdef CONFIG_NET_CLS_IND
  430. if (tb[TCA_U32_INDEV-1]) {
  431. int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
  432. if (err < 0)
  433. goto errout;
  434. }
  435. #endif
  436. tcf_exts_change(tp, &n->exts, &e);
  437. return 0;
  438. errout:
  439. tcf_exts_destroy(tp, &e);
  440. return err;
  441. }
  442. static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
  443. struct rtattr **tca,
  444. unsigned long *arg)
  445. {
  446. struct tc_u_common *tp_c = tp->data;
  447. struct tc_u_hnode *ht;
  448. struct tc_u_knode *n;
  449. struct tc_u32_sel *s;
  450. struct rtattr *opt = tca[TCA_OPTIONS-1];
  451. struct rtattr *tb[TCA_U32_MAX];
  452. u32 htid;
  453. int err;
  454. if (opt == NULL)
  455. return handle ? -EINVAL : 0;
  456. if (rtattr_parse_nested(tb, TCA_U32_MAX, opt) < 0)
  457. return -EINVAL;
  458. if ((n = (struct tc_u_knode*)*arg) != NULL) {
  459. if (TC_U32_KEY(n->handle) == 0)
  460. return -EINVAL;
  461. return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE-1]);
  462. }
  463. if (tb[TCA_U32_DIVISOR-1]) {
  464. unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]);
  465. if (--divisor > 0x100)
  466. return -EINVAL;
  467. if (TC_U32_KEY(handle))
  468. return -EINVAL;
  469. if (handle == 0) {
  470. handle = gen_new_htid(tp->data);
  471. if (handle == 0)
  472. return -ENOMEM;
  473. }
  474. ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
  475. if (ht == NULL)
  476. return -ENOBUFS;
  477. ht->tp_c = tp_c;
  478. ht->refcnt = 0;
  479. ht->divisor = divisor;
  480. ht->handle = handle;
  481. ht->prio = tp->prio;
  482. ht->next = tp_c->hlist;
  483. tp_c->hlist = ht;
  484. *arg = (unsigned long)ht;
  485. return 0;
  486. }
  487. if (tb[TCA_U32_HASH-1]) {
  488. htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]);
  489. if (TC_U32_HTID(htid) == TC_U32_ROOT) {
  490. ht = tp->root;
  491. htid = ht->handle;
  492. } else {
  493. ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
  494. if (ht == NULL)
  495. return -EINVAL;
  496. }
  497. } else {
  498. ht = tp->root;
  499. htid = ht->handle;
  500. }
  501. if (ht->divisor < TC_U32_HASH(htid))
  502. return -EINVAL;
  503. if (handle) {
  504. if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
  505. return -EINVAL;
  506. handle = htid | TC_U32_NODE(handle);
  507. } else
  508. handle = gen_new_kid(ht, htid);
  509. if (tb[TCA_U32_SEL-1] == 0 ||
  510. RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel))
  511. return -EINVAL;
  512. s = RTA_DATA(tb[TCA_U32_SEL-1]);
  513. n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
  514. if (n == NULL)
  515. return -ENOBUFS;
  516. #ifdef CONFIG_CLS_U32_PERF
  517. n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
  518. if (n->pf == NULL) {
  519. kfree(n);
  520. return -ENOBUFS;
  521. }
  522. #endif
  523. memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
  524. n->ht_up = ht;
  525. n->handle = handle;
  526. {
  527. u8 i = 0;
  528. u32 mask = s->hmask;
  529. if (mask) {
  530. while (!(mask & 1)) {
  531. i++;
  532. mask>>=1;
  533. }
  534. }
  535. n->fshift = i;
  536. }
  537. #ifdef CONFIG_CLS_U32_MARK
  538. if (tb[TCA_U32_MARK-1]) {
  539. struct tc_u32_mark *mark;
  540. if (RTA_PAYLOAD(tb[TCA_U32_MARK-1]) < sizeof(struct tc_u32_mark)) {
  541. #ifdef CONFIG_CLS_U32_PERF
  542. kfree(n->pf);
  543. #endif
  544. kfree(n);
  545. return -EINVAL;
  546. }
  547. mark = RTA_DATA(tb[TCA_U32_MARK-1]);
  548. memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
  549. n->mark.success = 0;
  550. }
  551. #endif
  552. err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE-1]);
  553. if (err == 0) {
  554. struct tc_u_knode **ins;
  555. for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
  556. if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
  557. break;
  558. n->next = *ins;
  559. wmb();
  560. *ins = n;
  561. *arg = (unsigned long)n;
  562. return 0;
  563. }
  564. #ifdef CONFIG_CLS_U32_PERF
  565. kfree(n->pf);
  566. #endif
  567. kfree(n);
  568. return err;
  569. }
  570. static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
  571. {
  572. struct tc_u_common *tp_c = tp->data;
  573. struct tc_u_hnode *ht;
  574. struct tc_u_knode *n;
  575. unsigned h;
  576. if (arg->stop)
  577. return;
  578. for (ht = tp_c->hlist; ht; ht = ht->next) {
  579. if (ht->prio != tp->prio)
  580. continue;
  581. if (arg->count >= arg->skip) {
  582. if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
  583. arg->stop = 1;
  584. return;
  585. }
  586. }
  587. arg->count++;
  588. for (h = 0; h <= ht->divisor; h++) {
  589. for (n = ht->ht[h]; n; n = n->next) {
  590. if (arg->count < arg->skip) {
  591. arg->count++;
  592. continue;
  593. }
  594. if (arg->fn(tp, (unsigned long)n, arg) < 0) {
  595. arg->stop = 1;
  596. return;
  597. }
  598. arg->count++;
  599. }
  600. }
  601. }
  602. }
  603. static int u32_dump(struct tcf_proto *tp, unsigned long fh,
  604. struct sk_buff *skb, struct tcmsg *t)
  605. {
  606. struct tc_u_knode *n = (struct tc_u_knode*)fh;
  607. unsigned char *b = skb->tail;
  608. struct rtattr *rta;
  609. if (n == NULL)
  610. return skb->len;
  611. t->tcm_handle = n->handle;
  612. rta = (struct rtattr*)b;
  613. RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
  614. if (TC_U32_KEY(n->handle) == 0) {
  615. struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
  616. u32 divisor = ht->divisor+1;
  617. RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor);
  618. } else {
  619. RTA_PUT(skb, TCA_U32_SEL,
  620. sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
  621. &n->sel);
  622. if (n->ht_up) {
  623. u32 htid = n->handle & 0xFFFFF000;
  624. RTA_PUT(skb, TCA_U32_HASH, 4, &htid);
  625. }
  626. if (n->res.classid)
  627. RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid);
  628. if (n->ht_down)
  629. RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle);
  630. #ifdef CONFIG_CLS_U32_MARK
  631. if (n->mark.val || n->mark.mask)
  632. RTA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
  633. #endif
  634. if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
  635. goto rtattr_failure;
  636. #ifdef CONFIG_NET_CLS_IND
  637. if(strlen(n->indev))
  638. RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
  639. #endif
  640. #ifdef CONFIG_CLS_U32_PERF
  641. RTA_PUT(skb, TCA_U32_PCNT,
  642. sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
  643. n->pf);
  644. #endif
  645. }
  646. rta->rta_len = skb->tail - b;
  647. if (TC_U32_KEY(n->handle))
  648. if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
  649. goto rtattr_failure;
  650. return skb->len;
  651. rtattr_failure:
  652. skb_trim(skb, b - skb->data);
  653. return -1;
  654. }
  655. static struct tcf_proto_ops cls_u32_ops = {
  656. .next = NULL,
  657. .kind = "u32",
  658. .classify = u32_classify,
  659. .init = u32_init,
  660. .destroy = u32_destroy,
  661. .get = u32_get,
  662. .put = u32_put,
  663. .change = u32_change,
  664. .delete = u32_delete,
  665. .walk = u32_walk,
  666. .dump = u32_dump,
  667. .owner = THIS_MODULE,
  668. };
  669. static int __init init_u32(void)
  670. {
  671. printk("u32 classifier\n");
  672. #ifdef CONFIG_CLS_U32_PERF
  673. printk(" Performance counters on\n");
  674. #endif
  675. #ifdef CONFIG_NET_CLS_POLICE
  676. printk(" OLD policer on \n");
  677. #endif
  678. #ifdef CONFIG_NET_CLS_IND
  679. printk(" input device check on \n");
  680. #endif
  681. #ifdef CONFIG_NET_CLS_ACT
  682. printk(" Actions configured \n");
  683. #endif
  684. return register_tcf_proto_ops(&cls_u32_ops);
  685. }
  686. static void __exit exit_u32(void)
  687. {
  688. unregister_tcf_proto_ops(&cls_u32_ops);
  689. }
  690. module_init(init_u32)
  691. module_exit(exit_u32)
  692. MODULE_LICENSE("GPL");