cls_tcindex.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
  4. *
  5. * Written 1998,1999 by Werner Almesberger, EPFL ICA
  6. */
  7. #include <linux/module.h>
  8. #include <linux/types.h>
  9. #include <linux/kernel.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/errno.h>
  12. #include <linux/slab.h>
  13. #include <linux/refcount.h>
  14. #include <net/act_api.h>
  15. #include <net/netlink.h>
  16. #include <net/pkt_cls.h>
  17. #include <net/sch_generic.h>
  18. /*
  19. * Passing parameters to the root seems to be done more awkwardly than really
  20. * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
  21. * verified. FIXME.
  22. */
  23. #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
  24. #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
  25. struct tcindex_data;
  26. struct tcindex_filter_result {
  27. struct tcf_exts exts;
  28. struct tcf_result res;
  29. struct tcindex_data *p;
  30. struct rcu_work rwork;
  31. };
  32. struct tcindex_filter {
  33. u16 key;
  34. struct tcindex_filter_result result;
  35. struct tcindex_filter __rcu *next;
  36. struct rcu_work rwork;
  37. };
  38. struct tcindex_data {
  39. struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
  40. struct tcindex_filter __rcu **h; /* imperfect hash; */
  41. struct tcf_proto *tp;
  42. u16 mask; /* AND key with mask */
  43. u32 shift; /* shift ANDed key to the right */
  44. u32 hash; /* hash table size; 0 if undefined */
  45. u32 alloc_hash; /* allocated size */
  46. u32 fall_through; /* 0: only classify if explicit match */
  47. refcount_t refcnt; /* a temporary refcnt for perfect hash */
  48. struct rcu_work rwork;
  49. };
  50. static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
  51. {
  52. return tcf_exts_has_actions(&r->exts) || r->res.classid;
  53. }
  54. static void tcindex_data_get(struct tcindex_data *p)
  55. {
  56. refcount_inc(&p->refcnt);
  57. }
  58. static void tcindex_data_put(struct tcindex_data *p)
  59. {
  60. if (refcount_dec_and_test(&p->refcnt)) {
  61. kfree(p->perfect);
  62. kfree(p->h);
  63. kfree(p);
  64. }
  65. }
  66. static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
  67. u16 key)
  68. {
  69. if (p->perfect) {
  70. struct tcindex_filter_result *f = p->perfect + key;
  71. return tcindex_filter_is_set(f) ? f : NULL;
  72. } else if (p->h) {
  73. struct tcindex_filter __rcu **fp;
  74. struct tcindex_filter *f;
  75. fp = &p->h[key % p->hash];
  76. for (f = rcu_dereference_bh_rtnl(*fp);
  77. f;
  78. fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
  79. if (f->key == key)
  80. return &f->result;
  81. }
  82. return NULL;
  83. }
  84. static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  85. struct tcf_result *res)
  86. {
  87. struct tcindex_data *p = rcu_dereference_bh(tp->root);
  88. struct tcindex_filter_result *f;
  89. int key = (skb->tc_index & p->mask) >> p->shift;
  90. pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
  91. skb, tp, res, p);
  92. f = tcindex_lookup(p, key);
  93. if (!f) {
  94. struct Qdisc *q = tcf_block_q(tp->chain->block);
  95. if (!p->fall_through)
  96. return -1;
  97. res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key);
  98. res->class = 0;
  99. pr_debug("alg 0x%x\n", res->classid);
  100. return 0;
  101. }
  102. *res = f->res;
  103. pr_debug("map 0x%x\n", res->classid);
  104. return tcf_exts_exec(skb, &f->exts, res);
  105. }
  106. static void *tcindex_get(struct tcf_proto *tp, u32 handle)
  107. {
  108. struct tcindex_data *p = rtnl_dereference(tp->root);
  109. struct tcindex_filter_result *r;
  110. pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
  111. if (p->perfect && handle >= p->alloc_hash)
  112. return NULL;
  113. r = tcindex_lookup(p, handle);
  114. return r && tcindex_filter_is_set(r) ? r : NULL;
  115. }
  116. static int tcindex_init(struct tcf_proto *tp)
  117. {
  118. struct tcindex_data *p;
  119. pr_debug("tcindex_init(tp %p)\n", tp);
  120. p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
  121. if (!p)
  122. return -ENOMEM;
  123. p->mask = 0xffff;
  124. p->hash = DEFAULT_HASH_SIZE;
  125. p->fall_through = 1;
  126. refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
  127. rcu_assign_pointer(tp->root, p);
  128. return 0;
  129. }
  130. static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
  131. {
  132. tcf_exts_destroy(&r->exts);
  133. tcf_exts_put_net(&r->exts);
  134. tcindex_data_put(r->p);
  135. }
  136. static void tcindex_destroy_rexts_work(struct work_struct *work)
  137. {
  138. struct tcindex_filter_result *r;
  139. r = container_of(to_rcu_work(work),
  140. struct tcindex_filter_result,
  141. rwork);
  142. rtnl_lock();
  143. __tcindex_destroy_rexts(r);
  144. rtnl_unlock();
  145. }
  146. static void __tcindex_destroy_fexts(struct tcindex_filter *f)
  147. {
  148. tcf_exts_destroy(&f->result.exts);
  149. tcf_exts_put_net(&f->result.exts);
  150. kfree(f);
  151. }
  152. static void tcindex_destroy_fexts_work(struct work_struct *work)
  153. {
  154. struct tcindex_filter *f = container_of(to_rcu_work(work),
  155. struct tcindex_filter,
  156. rwork);
  157. rtnl_lock();
  158. __tcindex_destroy_fexts(f);
  159. rtnl_unlock();
  160. }
  161. static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
  162. bool rtnl_held, struct netlink_ext_ack *extack)
  163. {
  164. struct tcindex_data *p = rtnl_dereference(tp->root);
  165. struct tcindex_filter_result *r = arg;
  166. struct tcindex_filter __rcu **walk;
  167. struct tcindex_filter *f = NULL;
  168. pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p);
  169. if (p->perfect) {
  170. if (!r->res.class)
  171. return -ENOENT;
  172. } else {
  173. int i;
  174. for (i = 0; i < p->hash; i++) {
  175. walk = p->h + i;
  176. for (f = rtnl_dereference(*walk); f;
  177. walk = &f->next, f = rtnl_dereference(*walk)) {
  178. if (&f->result == r)
  179. goto found;
  180. }
  181. }
  182. return -ENOENT;
  183. found:
  184. rcu_assign_pointer(*walk, rtnl_dereference(f->next));
  185. }
  186. tcf_unbind_filter(tp, &r->res);
  187. /* all classifiers are required to call tcf_exts_destroy() after rcu
  188. * grace period, since converted-to-rcu actions are relying on that
  189. * in cleanup() callback
  190. */
  191. if (f) {
  192. if (tcf_exts_get_net(&f->result.exts))
  193. tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work);
  194. else
  195. __tcindex_destroy_fexts(f);
  196. } else {
  197. tcindex_data_get(p);
  198. if (tcf_exts_get_net(&r->exts))
  199. tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
  200. else
  201. __tcindex_destroy_rexts(r);
  202. }
  203. *last = false;
  204. return 0;
  205. }
  206. static void tcindex_destroy_work(struct work_struct *work)
  207. {
  208. struct tcindex_data *p = container_of(to_rcu_work(work),
  209. struct tcindex_data,
  210. rwork);
  211. tcindex_data_put(p);
  212. }
  213. static inline int
  214. valid_perfect_hash(struct tcindex_data *p)
  215. {
  216. return p->hash > (p->mask >> p->shift);
  217. }
  218. static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
  219. [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
  220. [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
  221. [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
  222. [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
  223. [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
  224. };
  225. static int tcindex_filter_result_init(struct tcindex_filter_result *r,
  226. struct tcindex_data *p,
  227. struct net *net)
  228. {
  229. memset(r, 0, sizeof(*r));
  230. r->p = p;
  231. return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
  232. TCA_TCINDEX_POLICE);
  233. }
  234. static void tcindex_free_perfect_hash(struct tcindex_data *cp);
  235. static void tcindex_partial_destroy_work(struct work_struct *work)
  236. {
  237. struct tcindex_data *p = container_of(to_rcu_work(work),
  238. struct tcindex_data,
  239. rwork);
  240. rtnl_lock();
  241. if (p->perfect)
  242. tcindex_free_perfect_hash(p);
  243. kfree(p);
  244. rtnl_unlock();
  245. }
  246. static void tcindex_free_perfect_hash(struct tcindex_data *cp)
  247. {
  248. int i;
  249. for (i = 0; i < cp->hash; i++)
  250. tcf_exts_destroy(&cp->perfect[i].exts);
  251. kfree(cp->perfect);
  252. }
  253. static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
  254. {
  255. int i, err = 0;
  256. cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
  257. GFP_KERNEL | __GFP_NOWARN);
  258. if (!cp->perfect)
  259. return -ENOMEM;
  260. for (i = 0; i < cp->hash; i++) {
  261. err = tcf_exts_init(&cp->perfect[i].exts, net,
  262. TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
  263. if (err < 0)
  264. goto errout;
  265. cp->perfect[i].p = cp;
  266. }
  267. return 0;
  268. errout:
  269. tcindex_free_perfect_hash(cp);
  270. return err;
  271. }
  272. static int
  273. tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
  274. u32 handle, struct tcindex_data *p,
  275. struct tcindex_filter_result *r, struct nlattr **tb,
  276. struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
  277. {
  278. struct tcindex_filter_result new_filter_result, *old_r = r;
  279. struct tcindex_data *cp = NULL, *oldp;
  280. struct tcindex_filter *f = NULL; /* make gcc behave */
  281. struct tcf_result cr = {};
  282. int err, balloc = 0;
  283. struct tcf_exts e;
  284. err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
  285. if (err < 0)
  286. return err;
  287. err = tcf_exts_validate(net, tp, tb, est, &e, ovr, true, extack);
  288. if (err < 0)
  289. goto errout;
  290. err = -ENOMEM;
  291. /* tcindex_data attributes must look atomic to classifier/lookup so
  292. * allocate new tcindex data and RCU assign it onto root. Keeping
  293. * perfect hash and hash pointers from old data.
  294. */
  295. cp = kzalloc(sizeof(*cp), GFP_KERNEL);
  296. if (!cp)
  297. goto errout;
  298. cp->mask = p->mask;
  299. cp->shift = p->shift;
  300. cp->hash = p->hash;
  301. cp->alloc_hash = p->alloc_hash;
  302. cp->fall_through = p->fall_through;
  303. cp->tp = tp;
  304. refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
  305. if (tb[TCA_TCINDEX_HASH])
  306. cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
  307. if (tb[TCA_TCINDEX_MASK])
  308. cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
  309. if (tb[TCA_TCINDEX_SHIFT]) {
  310. cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
  311. if (cp->shift > 16) {
  312. err = -EINVAL;
  313. goto errout;
  314. }
  315. }
  316. if (!cp->hash) {
  317. /* Hash not specified, use perfect hash if the upper limit
  318. * of the hashing index is below the threshold.
  319. */
  320. if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
  321. cp->hash = (cp->mask >> cp->shift) + 1;
  322. else
  323. cp->hash = DEFAULT_HASH_SIZE;
  324. }
  325. if (p->perfect) {
  326. int i;
  327. if (tcindex_alloc_perfect_hash(net, cp) < 0)
  328. goto errout;
  329. cp->alloc_hash = cp->hash;
  330. for (i = 0; i < min(cp->hash, p->hash); i++)
  331. cp->perfect[i].res = p->perfect[i].res;
  332. balloc = 1;
  333. }
  334. cp->h = p->h;
  335. err = tcindex_filter_result_init(&new_filter_result, cp, net);
  336. if (err < 0)
  337. goto errout_alloc;
  338. if (old_r)
  339. cr = r->res;
  340. err = -EBUSY;
  341. /* Hash already allocated, make sure that we still meet the
  342. * requirements for the allocated hash.
  343. */
  344. if (cp->perfect) {
  345. if (!valid_perfect_hash(cp) ||
  346. cp->hash > cp->alloc_hash)
  347. goto errout_alloc;
  348. } else if (cp->h && cp->hash != cp->alloc_hash) {
  349. goto errout_alloc;
  350. }
  351. err = -EINVAL;
  352. if (tb[TCA_TCINDEX_FALL_THROUGH])
  353. cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
  354. if (!cp->perfect && !cp->h)
  355. cp->alloc_hash = cp->hash;
  356. /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
  357. * but then, we'd fail handles that may become valid after some future
  358. * mask change. While this is extremely unlikely to ever matter,
  359. * the check below is safer (and also more backwards-compatible).
  360. */
  361. if (cp->perfect || valid_perfect_hash(cp))
  362. if (handle >= cp->alloc_hash)
  363. goto errout_alloc;
  364. err = -ENOMEM;
  365. if (!cp->perfect && !cp->h) {
  366. if (valid_perfect_hash(cp)) {
  367. if (tcindex_alloc_perfect_hash(net, cp) < 0)
  368. goto errout_alloc;
  369. balloc = 1;
  370. } else {
  371. struct tcindex_filter __rcu **hash;
  372. hash = kcalloc(cp->hash,
  373. sizeof(struct tcindex_filter *),
  374. GFP_KERNEL);
  375. if (!hash)
  376. goto errout_alloc;
  377. cp->h = hash;
  378. balloc = 2;
  379. }
  380. }
  381. if (cp->perfect)
  382. r = cp->perfect + handle;
  383. else
  384. r = tcindex_lookup(cp, handle) ? : &new_filter_result;
  385. if (r == &new_filter_result) {
  386. f = kzalloc(sizeof(*f), GFP_KERNEL);
  387. if (!f)
  388. goto errout_alloc;
  389. f->key = handle;
  390. f->next = NULL;
  391. err = tcindex_filter_result_init(&f->result, cp, net);
  392. if (err < 0) {
  393. kfree(f);
  394. goto errout_alloc;
  395. }
  396. }
  397. if (tb[TCA_TCINDEX_CLASSID]) {
  398. cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
  399. tcf_bind_filter(tp, &cr, base);
  400. }
  401. if (old_r && old_r != r) {
  402. err = tcindex_filter_result_init(old_r, cp, net);
  403. if (err < 0) {
  404. kfree(f);
  405. goto errout_alloc;
  406. }
  407. }
  408. oldp = p;
  409. r->res = cr;
  410. tcf_exts_change(&r->exts, &e);
  411. rcu_assign_pointer(tp->root, cp);
  412. if (r == &new_filter_result) {
  413. struct tcindex_filter *nfp;
  414. struct tcindex_filter __rcu **fp;
  415. f->result.res = r->res;
  416. tcf_exts_change(&f->result.exts, &r->exts);
  417. fp = cp->h + (handle % cp->hash);
  418. for (nfp = rtnl_dereference(*fp);
  419. nfp;
  420. fp = &nfp->next, nfp = rtnl_dereference(*fp))
  421. ; /* nothing */
  422. rcu_assign_pointer(*fp, f);
  423. } else {
  424. tcf_exts_destroy(&new_filter_result.exts);
  425. }
  426. if (oldp)
  427. tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
  428. return 0;
  429. errout_alloc:
  430. if (balloc == 1)
  431. tcindex_free_perfect_hash(cp);
  432. else if (balloc == 2)
  433. kfree(cp->h);
  434. tcf_exts_destroy(&new_filter_result.exts);
  435. errout:
  436. kfree(cp);
  437. tcf_exts_destroy(&e);
  438. return err;
  439. }
  440. static int
  441. tcindex_change(struct net *net, struct sk_buff *in_skb,
  442. struct tcf_proto *tp, unsigned long base, u32 handle,
  443. struct nlattr **tca, void **arg, bool ovr,
  444. bool rtnl_held, struct netlink_ext_ack *extack)
  445. {
  446. struct nlattr *opt = tca[TCA_OPTIONS];
  447. struct nlattr *tb[TCA_TCINDEX_MAX + 1];
  448. struct tcindex_data *p = rtnl_dereference(tp->root);
  449. struct tcindex_filter_result *r = *arg;
  450. int err;
  451. pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
  452. "p %p,r %p,*arg %p\n",
  453. tp, handle, tca, arg, opt, p, r, *arg);
  454. if (!opt)
  455. return 0;
  456. err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt,
  457. tcindex_policy, NULL);
  458. if (err < 0)
  459. return err;
  460. return tcindex_set_parms(net, tp, base, handle, p, r, tb,
  461. tca[TCA_RATE], ovr, extack);
  462. }
  463. static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker,
  464. bool rtnl_held)
  465. {
  466. struct tcindex_data *p = rtnl_dereference(tp->root);
  467. struct tcindex_filter *f, *next;
  468. int i;
  469. pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
  470. if (p->perfect) {
  471. for (i = 0; i < p->hash; i++) {
  472. if (!p->perfect[i].res.class)
  473. continue;
  474. if (walker->count >= walker->skip) {
  475. if (walker->fn(tp, p->perfect + i, walker) < 0) {
  476. walker->stop = 1;
  477. return;
  478. }
  479. }
  480. walker->count++;
  481. }
  482. }
  483. if (!p->h)
  484. return;
  485. for (i = 0; i < p->hash; i++) {
  486. for (f = rtnl_dereference(p->h[i]); f; f = next) {
  487. next = rtnl_dereference(f->next);
  488. if (walker->count >= walker->skip) {
  489. if (walker->fn(tp, &f->result, walker) < 0) {
  490. walker->stop = 1;
  491. return;
  492. }
  493. }
  494. walker->count++;
  495. }
  496. }
  497. }
  498. static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
  499. struct netlink_ext_ack *extack)
  500. {
  501. struct tcindex_data *p = rtnl_dereference(tp->root);
  502. int i;
  503. pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
  504. if (p->perfect) {
  505. for (i = 0; i < p->hash; i++) {
  506. struct tcindex_filter_result *r = p->perfect + i;
  507. /* tcf_queue_work() does not guarantee the ordering we
  508. * want, so we have to take this refcnt temporarily to
  509. * ensure 'p' is freed after all tcindex_filter_result
  510. * here. Imperfect hash does not need this, because it
  511. * uses linked lists rather than an array.
  512. */
  513. tcindex_data_get(p);
  514. tcf_unbind_filter(tp, &r->res);
  515. if (tcf_exts_get_net(&r->exts))
  516. tcf_queue_work(&r->rwork,
  517. tcindex_destroy_rexts_work);
  518. else
  519. __tcindex_destroy_rexts(r);
  520. }
  521. }
  522. for (i = 0; p->h && i < p->hash; i++) {
  523. struct tcindex_filter *f, *next;
  524. bool last;
  525. for (f = rtnl_dereference(p->h[i]); f; f = next) {
  526. next = rtnl_dereference(f->next);
  527. tcindex_delete(tp, &f->result, &last, rtnl_held, NULL);
  528. }
  529. }
  530. tcf_queue_work(&p->rwork, tcindex_destroy_work);
  531. }
  532. static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh,
  533. struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
  534. {
  535. struct tcindex_data *p = rtnl_dereference(tp->root);
  536. struct tcindex_filter_result *r = fh;
  537. struct nlattr *nest;
  538. pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n",
  539. tp, fh, skb, t, p, r);
  540. pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
  541. nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
  542. if (nest == NULL)
  543. goto nla_put_failure;
  544. if (!fh) {
  545. t->tcm_handle = ~0; /* whatever ... */
  546. if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
  547. nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
  548. nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
  549. nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
  550. goto nla_put_failure;
  551. nla_nest_end(skb, nest);
  552. } else {
  553. if (p->perfect) {
  554. t->tcm_handle = r - p->perfect;
  555. } else {
  556. struct tcindex_filter *f;
  557. struct tcindex_filter __rcu **fp;
  558. int i;
  559. t->tcm_handle = 0;
  560. for (i = 0; !t->tcm_handle && i < p->hash; i++) {
  561. fp = &p->h[i];
  562. for (f = rtnl_dereference(*fp);
  563. !t->tcm_handle && f;
  564. fp = &f->next, f = rtnl_dereference(*fp)) {
  565. if (&f->result == r)
  566. t->tcm_handle = f->key;
  567. }
  568. }
  569. }
  570. pr_debug("handle = %d\n", t->tcm_handle);
  571. if (r->res.class &&
  572. nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
  573. goto nla_put_failure;
  574. if (tcf_exts_dump(skb, &r->exts) < 0)
  575. goto nla_put_failure;
  576. nla_nest_end(skb, nest);
  577. if (tcf_exts_dump_stats(skb, &r->exts) < 0)
  578. goto nla_put_failure;
  579. }
  580. return skb->len;
  581. nla_put_failure:
  582. nla_nest_cancel(skb, nest);
  583. return -1;
  584. }
  585. static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl,
  586. void *q, unsigned long base)
  587. {
  588. struct tcindex_filter_result *r = fh;
  589. if (r && r->res.classid == classid) {
  590. if (cl)
  591. __tcf_bind_filter(q, &r->res, base);
  592. else
  593. __tcf_unbind_filter(q, &r->res);
  594. }
  595. }
  596. static struct tcf_proto_ops cls_tcindex_ops __read_mostly = {
  597. .kind = "tcindex",
  598. .classify = tcindex_classify,
  599. .init = tcindex_init,
  600. .destroy = tcindex_destroy,
  601. .get = tcindex_get,
  602. .change = tcindex_change,
  603. .delete = tcindex_delete,
  604. .walk = tcindex_walk,
  605. .dump = tcindex_dump,
  606. .bind_class = tcindex_bind_class,
  607. .owner = THIS_MODULE,
  608. };
  609. static int __init init_tcindex(void)
  610. {
  611. return register_tcf_proto_ops(&cls_tcindex_ops);
  612. }
  613. static void __exit exit_tcindex(void)
  614. {
  615. unregister_tcf_proto_ops(&cls_tcindex_ops);
  616. }
  617. module_init(init_tcindex)
  618. module_exit(exit_tcindex)
  619. MODULE_LICENSE("GPL");