nf_sockopt.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. #include <linux/kernel.h>
  2. #include <linux/init.h>
  3. #include <linux/module.h>
  4. #include <linux/skbuff.h>
  5. #include <linux/netfilter.h>
  6. #include <linux/mutex.h>
  7. #include <net/sock.h>
  8. #include "nf_internals.h"
  9. /* Sockopts only registered and called from user context, so
  10. net locking would be overkill. Also, [gs]etsockopt calls may
  11. sleep. */
  12. static DEFINE_MUTEX(nf_sockopt_mutex);
  13. static LIST_HEAD(nf_sockopts);
  14. /* Do exclusive ranges overlap? */
  15. static inline int overlap(int min1, int max1, int min2, int max2)
  16. {
  17. return max1 > min2 && min1 < max2;
  18. }
  19. /* Functions to register sockopt ranges (exclusive). */
  20. int nf_register_sockopt(struct nf_sockopt_ops *reg)
  21. {
  22. struct list_head *i;
  23. int ret = 0;
  24. if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
  25. return -EINTR;
  26. list_for_each(i, &nf_sockopts) {
  27. struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
  28. if (ops->pf == reg->pf
  29. && (overlap(ops->set_optmin, ops->set_optmax,
  30. reg->set_optmin, reg->set_optmax)
  31. || overlap(ops->get_optmin, ops->get_optmax,
  32. reg->get_optmin, reg->get_optmax))) {
  33. NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n",
  34. ops->set_optmin, ops->set_optmax,
  35. ops->get_optmin, ops->get_optmax,
  36. reg->set_optmin, reg->set_optmax,
  37. reg->get_optmin, reg->get_optmax);
  38. ret = -EBUSY;
  39. goto out;
  40. }
  41. }
  42. list_add(&reg->list, &nf_sockopts);
  43. out:
  44. mutex_unlock(&nf_sockopt_mutex);
  45. return ret;
  46. }
  47. EXPORT_SYMBOL(nf_register_sockopt);
  48. void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
  49. {
  50. /* No point being interruptible: we're probably in cleanup_module() */
  51. restart:
  52. mutex_lock(&nf_sockopt_mutex);
  53. if (reg->use != 0) {
  54. /* To be woken by nf_sockopt call... */
  55. /* FIXME: Stuart Young's name appears gratuitously. */
  56. set_current_state(TASK_UNINTERRUPTIBLE);
  57. reg->cleanup_task = current;
  58. mutex_unlock(&nf_sockopt_mutex);
  59. schedule();
  60. goto restart;
  61. }
  62. list_del(&reg->list);
  63. mutex_unlock(&nf_sockopt_mutex);
  64. }
  65. EXPORT_SYMBOL(nf_unregister_sockopt);
  66. /* Call get/setsockopt() */
  67. static int nf_sockopt(struct sock *sk, int pf, int val,
  68. char __user *opt, int *len, int get)
  69. {
  70. struct list_head *i;
  71. struct nf_sockopt_ops *ops;
  72. int ret;
  73. if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
  74. return -EINTR;
  75. list_for_each(i, &nf_sockopts) {
  76. ops = (struct nf_sockopt_ops *)i;
  77. if (ops->pf == pf) {
  78. if (get) {
  79. if (val >= ops->get_optmin
  80. && val < ops->get_optmax) {
  81. ops->use++;
  82. mutex_unlock(&nf_sockopt_mutex);
  83. ret = ops->get(sk, val, opt, len);
  84. goto out;
  85. }
  86. } else {
  87. if (val >= ops->set_optmin
  88. && val < ops->set_optmax) {
  89. ops->use++;
  90. mutex_unlock(&nf_sockopt_mutex);
  91. ret = ops->set(sk, val, opt, *len);
  92. goto out;
  93. }
  94. }
  95. }
  96. }
  97. mutex_unlock(&nf_sockopt_mutex);
  98. return -ENOPROTOOPT;
  99. out:
  100. mutex_lock(&nf_sockopt_mutex);
  101. ops->use--;
  102. if (ops->cleanup_task)
  103. wake_up_process(ops->cleanup_task);
  104. mutex_unlock(&nf_sockopt_mutex);
  105. return ret;
  106. }
  107. int nf_setsockopt(struct sock *sk, int pf, int val, char __user *opt,
  108. int len)
  109. {
  110. return nf_sockopt(sk, pf, val, opt, &len, 0);
  111. }
  112. EXPORT_SYMBOL(nf_setsockopt);
  113. int nf_getsockopt(struct sock *sk, int pf, int val, char __user *opt, int *len)
  114. {
  115. return nf_sockopt(sk, pf, val, opt, len, 1);
  116. }
  117. EXPORT_SYMBOL(nf_getsockopt);
  118. #ifdef CONFIG_COMPAT
  119. static int compat_nf_sockopt(struct sock *sk, int pf, int val,
  120. char __user *opt, int *len, int get)
  121. {
  122. struct list_head *i;
  123. struct nf_sockopt_ops *ops;
  124. int ret;
  125. if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
  126. return -EINTR;
  127. list_for_each(i, &nf_sockopts) {
  128. ops = (struct nf_sockopt_ops *)i;
  129. if (ops->pf == pf) {
  130. if (get) {
  131. if (val >= ops->get_optmin
  132. && val < ops->get_optmax) {
  133. ops->use++;
  134. mutex_unlock(&nf_sockopt_mutex);
  135. if (ops->compat_get)
  136. ret = ops->compat_get(sk,
  137. val, opt, len);
  138. else
  139. ret = ops->get(sk,
  140. val, opt, len);
  141. goto out;
  142. }
  143. } else {
  144. if (val >= ops->set_optmin
  145. && val < ops->set_optmax) {
  146. ops->use++;
  147. mutex_unlock(&nf_sockopt_mutex);
  148. if (ops->compat_set)
  149. ret = ops->compat_set(sk,
  150. val, opt, *len);
  151. else
  152. ret = ops->set(sk,
  153. val, opt, *len);
  154. goto out;
  155. }
  156. }
  157. }
  158. }
  159. mutex_unlock(&nf_sockopt_mutex);
  160. return -ENOPROTOOPT;
  161. out:
  162. mutex_lock(&nf_sockopt_mutex);
  163. ops->use--;
  164. if (ops->cleanup_task)
  165. wake_up_process(ops->cleanup_task);
  166. mutex_unlock(&nf_sockopt_mutex);
  167. return ret;
  168. }
  169. int compat_nf_setsockopt(struct sock *sk, int pf,
  170. int val, char __user *opt, int len)
  171. {
  172. return compat_nf_sockopt(sk, pf, val, opt, &len, 0);
  173. }
  174. EXPORT_SYMBOL(compat_nf_setsockopt);
  175. int compat_nf_getsockopt(struct sock *sk, int pf,
  176. int val, char __user *opt, int *len)
  177. {
  178. return compat_nf_sockopt(sk, pf, val, opt, len, 1);
  179. }
  180. EXPORT_SYMBOL(compat_nf_getsockopt);
  181. #endif