nf_queue.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /*
  2. * Rusty Russell (C)2000 -- This code is GPL.
  3. * Patrick McHardy (c) 2006-2012
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/proc_fs.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/netfilter.h>
  12. #include <linux/netfilter_ipv4.h>
  13. #include <linux/netfilter_ipv6.h>
  14. #include <linux/netfilter_bridge.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/rcupdate.h>
  17. #include <net/protocol.h>
  18. #include <net/netfilter/nf_queue.h>
  19. #include <net/dst.h>
  20. #include "nf_internals.h"
  21. /*
  22. * Hook for nfnetlink_queue to register its queue handler.
  23. * We do this so that most of the NFQUEUE code can be modular.
  24. *
  25. * Once the queue is registered it must reinject all packets it
  26. * receives, no matter what.
  27. */
  28. /* return EBUSY when somebody else is registered, return EEXIST if the
  29. * same handler is registered, return 0 in case of success. */
  30. void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
  31. {
  32. /* should never happen, we only have one queueing backend in kernel */
  33. WARN_ON(rcu_access_pointer(net->nf.queue_handler));
  34. rcu_assign_pointer(net->nf.queue_handler, qh);
  35. }
  36. EXPORT_SYMBOL(nf_register_queue_handler);
  37. /* The caller must flush their queue before this */
  38. void nf_unregister_queue_handler(struct net *net)
  39. {
  40. RCU_INIT_POINTER(net->nf.queue_handler, NULL);
  41. }
  42. EXPORT_SYMBOL(nf_unregister_queue_handler);
  43. static void nf_queue_sock_put(struct sock *sk)
  44. {
  45. #ifdef CONFIG_INET
  46. sock_gen_put(sk);
  47. #else
  48. sock_put(sk);
  49. #endif
  50. }
  51. static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  52. {
  53. struct nf_hook_state *state = &entry->state;
  54. /* Release those devices we held, or Alexey will kill me. */
  55. if (state->in)
  56. dev_put(state->in);
  57. if (state->out)
  58. dev_put(state->out);
  59. if (state->sk)
  60. nf_queue_sock_put(state->sk);
  61. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  62. if (entry->physin)
  63. dev_put(entry->physin);
  64. if (entry->physout)
  65. dev_put(entry->physout);
  66. #endif
  67. }
  68. void nf_queue_entry_free(struct nf_queue_entry *entry)
  69. {
  70. nf_queue_entry_release_refs(entry);
  71. kfree(entry);
  72. }
  73. EXPORT_SYMBOL_GPL(nf_queue_entry_free);
  74. static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
  75. {
  76. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  77. const struct sk_buff *skb = entry->skb;
  78. struct nf_bridge_info *nf_bridge;
  79. nf_bridge = nf_bridge_info_get(skb);
  80. if (nf_bridge) {
  81. entry->physin = nf_bridge_get_physindev(skb);
  82. entry->physout = nf_bridge_get_physoutdev(skb);
  83. } else {
  84. entry->physin = NULL;
  85. entry->physout = NULL;
  86. }
  87. #endif
  88. }
  89. /* Bump dev refs so they don't vanish while packet is out */
  90. bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
  91. {
  92. struct nf_hook_state *state = &entry->state;
  93. if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
  94. return false;
  95. if (state->in)
  96. dev_hold(state->in);
  97. if (state->out)
  98. dev_hold(state->out);
  99. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  100. if (entry->physin)
  101. dev_hold(entry->physin);
  102. if (entry->physout)
  103. dev_hold(entry->physout);
  104. #endif
  105. return true;
  106. }
  107. EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
  108. void nf_queue_nf_hook_drop(struct net *net)
  109. {
  110. const struct nf_queue_handler *qh;
  111. rcu_read_lock();
  112. qh = rcu_dereference(net->nf.queue_handler);
  113. if (qh)
  114. qh->nf_hook_drop(net);
  115. rcu_read_unlock();
  116. }
  117. EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
  118. static void nf_ip_saveroute(const struct sk_buff *skb,
  119. struct nf_queue_entry *entry)
  120. {
  121. struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
  122. if (entry->state.hook == NF_INET_LOCAL_OUT) {
  123. const struct iphdr *iph = ip_hdr(skb);
  124. rt_info->tos = iph->tos;
  125. rt_info->daddr = iph->daddr;
  126. rt_info->saddr = iph->saddr;
  127. rt_info->mark = skb->mark;
  128. }
  129. }
  130. static void nf_ip6_saveroute(const struct sk_buff *skb,
  131. struct nf_queue_entry *entry)
  132. {
  133. struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
  134. if (entry->state.hook == NF_INET_LOCAL_OUT) {
  135. const struct ipv6hdr *iph = ipv6_hdr(skb);
  136. rt_info->daddr = iph->daddr;
  137. rt_info->saddr = iph->saddr;
  138. rt_info->mark = skb->mark;
  139. }
  140. }
  141. static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
  142. unsigned int index, unsigned int queuenum)
  143. {
  144. struct nf_queue_entry *entry = NULL;
  145. const struct nf_queue_handler *qh;
  146. struct net *net = state->net;
  147. unsigned int route_key_size;
  148. int status;
  149. /* QUEUE == DROP if no one is waiting, to be safe. */
  150. qh = rcu_dereference(net->nf.queue_handler);
  151. if (!qh)
  152. return -ESRCH;
  153. switch (state->pf) {
  154. case AF_INET:
  155. route_key_size = sizeof(struct ip_rt_info);
  156. break;
  157. case AF_INET6:
  158. route_key_size = sizeof(struct ip6_rt_info);
  159. break;
  160. default:
  161. route_key_size = 0;
  162. break;
  163. }
  164. if (skb_sk_is_prefetched(skb)) {
  165. struct sock *sk = skb->sk;
  166. if (!sk_is_refcounted(sk)) {
  167. if (!refcount_inc_not_zero(&sk->sk_refcnt))
  168. return -ENOTCONN;
  169. /* drop refcount on skb_orphan */
  170. skb->destructor = sock_edemux;
  171. }
  172. }
  173. entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
  174. if (!entry)
  175. return -ENOMEM;
  176. if (skb_dst(skb) && !skb_dst_force(skb)) {
  177. kfree(entry);
  178. return -ENETDOWN;
  179. }
  180. *entry = (struct nf_queue_entry) {
  181. .skb = skb,
  182. .state = *state,
  183. .hook_index = index,
  184. .size = sizeof(*entry) + route_key_size,
  185. };
  186. __nf_queue_entry_init_physdevs(entry);
  187. if (!nf_queue_entry_get_refs(entry)) {
  188. kfree(entry);
  189. return -ENOTCONN;
  190. }
  191. switch (entry->state.pf) {
  192. case AF_INET:
  193. nf_ip_saveroute(skb, entry);
  194. break;
  195. case AF_INET6:
  196. nf_ip6_saveroute(skb, entry);
  197. break;
  198. }
  199. status = qh->outfn(entry, queuenum);
  200. if (status < 0) {
  201. nf_queue_entry_free(entry);
  202. return status;
  203. }
  204. return 0;
  205. }
  206. /* Packets leaving via this function must come back through nf_reinject(). */
  207. int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
  208. unsigned int index, unsigned int verdict)
  209. {
  210. int ret;
  211. ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
  212. if (ret < 0) {
  213. if (ret == -ESRCH &&
  214. (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
  215. return 1;
  216. kfree_skb(skb);
  217. }
  218. return 0;
  219. }
  220. EXPORT_SYMBOL_GPL(nf_queue);
  221. static unsigned int nf_iterate(struct sk_buff *skb,
  222. struct nf_hook_state *state,
  223. const struct nf_hook_entries *hooks,
  224. unsigned int *index)
  225. {
  226. const struct nf_hook_entry *hook;
  227. unsigned int verdict, i = *index;
  228. while (i < hooks->num_hook_entries) {
  229. hook = &hooks->hooks[i];
  230. repeat:
  231. verdict = nf_hook_entry_hookfn(hook, skb, state);
  232. if (verdict != NF_ACCEPT) {
  233. *index = i;
  234. if (verdict != NF_REPEAT)
  235. return verdict;
  236. goto repeat;
  237. }
  238. i++;
  239. }
  240. *index = i;
  241. return NF_ACCEPT;
  242. }
  243. static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
  244. {
  245. switch (pf) {
  246. #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
  247. case NFPROTO_BRIDGE:
  248. return rcu_dereference(net->nf.hooks_bridge[hooknum]);
  249. #endif
  250. case NFPROTO_IPV4:
  251. return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
  252. case NFPROTO_IPV6:
  253. return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
  254. default:
  255. WARN_ON_ONCE(1);
  256. return NULL;
  257. }
  258. return NULL;
  259. }
  260. /* Caller must hold rcu read-side lock */
  261. void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
  262. {
  263. const struct nf_hook_entry *hook_entry;
  264. const struct nf_hook_entries *hooks;
  265. struct sk_buff *skb = entry->skb;
  266. const struct net *net;
  267. unsigned int i;
  268. int err;
  269. u8 pf;
  270. net = entry->state.net;
  271. pf = entry->state.pf;
  272. hooks = nf_hook_entries_head(net, pf, entry->state.hook);
  273. i = entry->hook_index;
  274. if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
  275. kfree_skb(skb);
  276. nf_queue_entry_free(entry);
  277. return;
  278. }
  279. hook_entry = &hooks->hooks[i];
  280. /* Continue traversal iff userspace said ok... */
  281. if (verdict == NF_REPEAT)
  282. verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
  283. if (verdict == NF_ACCEPT) {
  284. if (nf_reroute(skb, entry) < 0)
  285. verdict = NF_DROP;
  286. }
  287. if (verdict == NF_ACCEPT) {
  288. next_hook:
  289. ++i;
  290. verdict = nf_iterate(skb, &entry->state, hooks, &i);
  291. }
  292. switch (verdict & NF_VERDICT_MASK) {
  293. case NF_ACCEPT:
  294. case NF_STOP:
  295. local_bh_disable();
  296. entry->state.okfn(entry->state.net, entry->state.sk, skb);
  297. local_bh_enable();
  298. break;
  299. case NF_QUEUE:
  300. err = nf_queue(skb, &entry->state, i, verdict);
  301. if (err == 1)
  302. goto next_hook;
  303. break;
  304. case NF_STOLEN:
  305. break;
  306. default:
  307. kfree_skb(skb);
  308. }
  309. nf_queue_entry_free(entry);
  310. }
  311. EXPORT_SYMBOL(nf_reinject);