ip_set_bitmap_gen.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
  3. #ifndef __IP_SET_BITMAP_IP_GEN_H
  4. #define __IP_SET_BITMAP_IP_GEN_H
  5. #define mtype_do_test IPSET_TOKEN(MTYPE, _do_test)
  6. #define mtype_gc_test IPSET_TOKEN(MTYPE, _gc_test)
  7. #define mtype_is_filled IPSET_TOKEN(MTYPE, _is_filled)
  8. #define mtype_do_add IPSET_TOKEN(MTYPE, _do_add)
  9. #define mtype_ext_cleanup IPSET_TOKEN(MTYPE, _ext_cleanup)
  10. #define mtype_do_del IPSET_TOKEN(MTYPE, _do_del)
  11. #define mtype_do_list IPSET_TOKEN(MTYPE, _do_list)
  12. #define mtype_do_head IPSET_TOKEN(MTYPE, _do_head)
  13. #define mtype_adt_elem IPSET_TOKEN(MTYPE, _adt_elem)
  14. #define mtype_add_timeout IPSET_TOKEN(MTYPE, _add_timeout)
  15. #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
  16. #define mtype_kadt IPSET_TOKEN(MTYPE, _kadt)
  17. #define mtype_uadt IPSET_TOKEN(MTYPE, _uadt)
  18. #define mtype_destroy IPSET_TOKEN(MTYPE, _destroy)
  19. #define mtype_memsize IPSET_TOKEN(MTYPE, _memsize)
  20. #define mtype_flush IPSET_TOKEN(MTYPE, _flush)
  21. #define mtype_head IPSET_TOKEN(MTYPE, _head)
  22. #define mtype_same_set IPSET_TOKEN(MTYPE, _same_set)
  23. #define mtype_elem IPSET_TOKEN(MTYPE, _elem)
  24. #define mtype_test IPSET_TOKEN(MTYPE, _test)
  25. #define mtype_add IPSET_TOKEN(MTYPE, _add)
  26. #define mtype_del IPSET_TOKEN(MTYPE, _del)
  27. #define mtype_list IPSET_TOKEN(MTYPE, _list)
  28. #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
  29. #define mtype MTYPE
  30. #define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id)))
  31. static void
  32. mtype_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
  33. {
  34. struct mtype *map = set->data;
  35. timer_setup(&map->gc, gc, 0);
  36. mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
  37. }
  38. static void
  39. mtype_ext_cleanup(struct ip_set *set)
  40. {
  41. struct mtype *map = set->data;
  42. u32 id;
  43. for (id = 0; id < map->elements; id++)
  44. if (test_bit(id, map->members))
  45. ip_set_ext_destroy(set, get_ext(set, map, id));
  46. }
  47. static void
  48. mtype_destroy(struct ip_set *set)
  49. {
  50. struct mtype *map = set->data;
  51. if (SET_WITH_TIMEOUT(set))
  52. del_timer_sync(&map->gc);
  53. if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
  54. mtype_ext_cleanup(set);
  55. ip_set_free(map->members);
  56. ip_set_free(map);
  57. set->data = NULL;
  58. }
  59. static void
  60. mtype_flush(struct ip_set *set)
  61. {
  62. struct mtype *map = set->data;
  63. if (set->extensions & IPSET_EXT_DESTROY)
  64. mtype_ext_cleanup(set);
  65. bitmap_zero(map->members, map->elements);
  66. set->elements = 0;
  67. set->ext_size = 0;
  68. }
  69. /* Calculate the actual memory size of the set data */
  70. static size_t
  71. mtype_memsize(const struct mtype *map, size_t dsize)
  72. {
  73. return sizeof(*map) + map->memsize +
  74. map->elements * dsize;
  75. }
  76. static int
  77. mtype_head(struct ip_set *set, struct sk_buff *skb)
  78. {
  79. const struct mtype *map = set->data;
  80. struct nlattr *nested;
  81. size_t memsize = mtype_memsize(map, set->dsize) + set->ext_size;
  82. nested = nla_nest_start(skb, IPSET_ATTR_DATA);
  83. if (!nested)
  84. goto nla_put_failure;
  85. if (mtype_do_head(skb, map) ||
  86. nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
  87. nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
  88. nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
  89. goto nla_put_failure;
  90. if (unlikely(ip_set_put_flags(skb, set)))
  91. goto nla_put_failure;
  92. nla_nest_end(skb, nested);
  93. return 0;
  94. nla_put_failure:
  95. return -EMSGSIZE;
  96. }
  97. static int
  98. mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
  99. struct ip_set_ext *mext, u32 flags)
  100. {
  101. struct mtype *map = set->data;
  102. const struct mtype_adt_elem *e = value;
  103. void *x = get_ext(set, map, e->id);
  104. int ret = mtype_do_test(e, map, set->dsize);
  105. if (ret <= 0)
  106. return ret;
  107. return ip_set_match_extensions(set, ext, mext, flags, x);
  108. }
  109. static int
  110. mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
  111. struct ip_set_ext *mext, u32 flags)
  112. {
  113. struct mtype *map = set->data;
  114. const struct mtype_adt_elem *e = value;
  115. void *x = get_ext(set, map, e->id);
  116. int ret = mtype_do_add(e, map, flags, set->dsize);
  117. if (ret == IPSET_ADD_FAILED) {
  118. if (SET_WITH_TIMEOUT(set) &&
  119. ip_set_timeout_expired(ext_timeout(x, set))) {
  120. set->elements--;
  121. ret = 0;
  122. } else if (!(flags & IPSET_FLAG_EXIST)) {
  123. set_bit(e->id, map->members);
  124. return -IPSET_ERR_EXIST;
  125. }
  126. /* Element is re-added, cleanup extensions */
  127. ip_set_ext_destroy(set, x);
  128. }
  129. if (ret > 0)
  130. set->elements--;
  131. if (SET_WITH_TIMEOUT(set))
  132. #ifdef IP_SET_BITMAP_STORED_TIMEOUT
  133. mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret);
  134. #else
  135. ip_set_timeout_set(ext_timeout(x, set), ext->timeout);
  136. #endif
  137. if (SET_WITH_COUNTER(set))
  138. ip_set_init_counter(ext_counter(x, set), ext);
  139. if (SET_WITH_COMMENT(set))
  140. ip_set_init_comment(set, ext_comment(x, set), ext);
  141. if (SET_WITH_SKBINFO(set))
  142. ip_set_init_skbinfo(ext_skbinfo(x, set), ext);
  143. /* Activate element */
  144. set_bit(e->id, map->members);
  145. set->elements++;
  146. return 0;
  147. }
  148. static int
  149. mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
  150. struct ip_set_ext *mext, u32 flags)
  151. {
  152. struct mtype *map = set->data;
  153. const struct mtype_adt_elem *e = value;
  154. void *x = get_ext(set, map, e->id);
  155. if (mtype_do_del(e, map))
  156. return -IPSET_ERR_EXIST;
  157. ip_set_ext_destroy(set, x);
  158. set->elements--;
  159. if (SET_WITH_TIMEOUT(set) &&
  160. ip_set_timeout_expired(ext_timeout(x, set)))
  161. return -IPSET_ERR_EXIST;
  162. return 0;
  163. }
  164. #ifndef IP_SET_BITMAP_STORED_TIMEOUT
  165. static bool
  166. mtype_is_filled(const struct mtype_elem *x)
  167. {
  168. return true;
  169. }
  170. #endif
  171. static int
  172. mtype_list(const struct ip_set *set,
  173. struct sk_buff *skb, struct netlink_callback *cb)
  174. {
  175. struct mtype *map = set->data;
  176. struct nlattr *adt, *nested;
  177. void *x;
  178. u32 id, first = cb->args[IPSET_CB_ARG0];
  179. int ret = 0;
  180. adt = nla_nest_start(skb, IPSET_ATTR_ADT);
  181. if (!adt)
  182. return -EMSGSIZE;
  183. /* Extensions may be replaced */
  184. rcu_read_lock();
  185. for (; cb->args[IPSET_CB_ARG0] < map->elements;
  186. cb->args[IPSET_CB_ARG0]++) {
  187. cond_resched_rcu();
  188. id = cb->args[IPSET_CB_ARG0];
  189. x = get_ext(set, map, id);
  190. if (!test_bit(id, map->members) ||
  191. (SET_WITH_TIMEOUT(set) &&
  192. #ifdef IP_SET_BITMAP_STORED_TIMEOUT
  193. mtype_is_filled(x) &&
  194. #endif
  195. ip_set_timeout_expired(ext_timeout(x, set))))
  196. continue;
  197. nested = nla_nest_start(skb, IPSET_ATTR_DATA);
  198. if (!nested) {
  199. if (id == first) {
  200. nla_nest_cancel(skb, adt);
  201. ret = -EMSGSIZE;
  202. goto out;
  203. }
  204. goto nla_put_failure;
  205. }
  206. if (mtype_do_list(skb, map, id, set->dsize))
  207. goto nla_put_failure;
  208. if (ip_set_put_extensions(skb, set, x, mtype_is_filled(x)))
  209. goto nla_put_failure;
  210. nla_nest_end(skb, nested);
  211. }
  212. nla_nest_end(skb, adt);
  213. /* Set listing finished */
  214. cb->args[IPSET_CB_ARG0] = 0;
  215. goto out;
  216. nla_put_failure:
  217. nla_nest_cancel(skb, nested);
  218. if (unlikely(id == first)) {
  219. cb->args[IPSET_CB_ARG0] = 0;
  220. ret = -EMSGSIZE;
  221. }
  222. nla_nest_end(skb, adt);
  223. out:
  224. rcu_read_unlock();
  225. return ret;
  226. }
  227. static void
  228. mtype_gc(struct timer_list *t)
  229. {
  230. struct mtype *map = from_timer(map, t, gc);
  231. struct ip_set *set = map->set;
  232. void *x;
  233. u32 id;
  234. /* We run parallel with other readers (test element)
  235. * but adding/deleting new entries is locked out
  236. */
  237. spin_lock_bh(&set->lock);
  238. for (id = 0; id < map->elements; id++)
  239. if (mtype_gc_test(id, map, set->dsize)) {
  240. x = get_ext(set, map, id);
  241. if (ip_set_timeout_expired(ext_timeout(x, set))) {
  242. clear_bit(id, map->members);
  243. ip_set_ext_destroy(set, x);
  244. set->elements--;
  245. }
  246. }
  247. spin_unlock_bh(&set->lock);
  248. map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
  249. add_timer(&map->gc);
  250. }
  251. static const struct ip_set_type_variant mtype = {
  252. .kadt = mtype_kadt,
  253. .uadt = mtype_uadt,
  254. .adt = {
  255. [IPSET_ADD] = mtype_add,
  256. [IPSET_DEL] = mtype_del,
  257. [IPSET_TEST] = mtype_test,
  258. },
  259. .destroy = mtype_destroy,
  260. .flush = mtype_flush,
  261. .head = mtype_head,
  262. .list = mtype_list,
  263. .same_set = mtype_same_set,
  264. };
  265. #endif /* __IP_SET_BITMAP_IP_GEN_H */