ip_queue.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /*
  2. * This is a module which is used for queueing IPv4 packets and
  3. * communicating with userspace via netlink.
  4. *
  5. * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
  6. * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * 2000-03-27: Simplified code (thanks to Andi Kleen for clues).
  13. * 2000-05-20: Fixed notifier problems (following Miguel Freitas' report).
  14. * 2000-06-19: Fixed so nfmark is copied to metadata (reported by Sebastian
  15. * Zander).
  16. * 2000-08-01: Added Nick Williams' MAC support.
  17. * 2002-06-25: Code cleanup.
  18. * 2005-01-10: Added /proc counter for dropped packets; fixed so
  19. * packets aren't delivered to user space if they're going
  20. * to be dropped.
  21. * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte)
  22. *
  23. */
  24. #include <linux/module.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/init.h>
  27. #include <linux/ip.h>
  28. #include <linux/notifier.h>
  29. #include <linux/netdevice.h>
  30. #include <linux/netfilter.h>
  31. #include <linux/netfilter_ipv4/ip_queue.h>
  32. #include <linux/netfilter_ipv4/ip_tables.h>
  33. #include <linux/netlink.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/sysctl.h>
  36. #include <linux/proc_fs.h>
  37. #include <linux/security.h>
  38. #include <linux/mutex.h>
  39. #include <net/sock.h>
  40. #include <net/route.h>
  41. #define IPQ_QMAX_DEFAULT 1024
  42. #define IPQ_PROC_FS_NAME "ip_queue"
  43. #define NET_IPQ_QMAX 2088
  44. #define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
  45. struct ipq_queue_entry {
  46. struct list_head list;
  47. struct nf_info *info;
  48. struct sk_buff *skb;
  49. };
  50. typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
  51. static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
  52. static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
  53. static DEFINE_RWLOCK(queue_lock);
  54. static int peer_pid __read_mostly;
  55. static unsigned int copy_range __read_mostly;
  56. static unsigned int queue_total;
  57. static unsigned int queue_dropped = 0;
  58. static unsigned int queue_user_dropped = 0;
  59. static struct sock *ipqnl __read_mostly;
  60. static LIST_HEAD(queue_list);
  61. static DEFINE_MUTEX(ipqnl_mutex);
  62. static void
  63. ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
  64. {
  65. /* TCP input path (and probably other bits) assume to be called
  66. * from softirq context, not from syscall, like ipq_issue_verdict is
  67. * called. TCP input path deadlocks with locks taken from timer
  68. * softirq, e.g. We therefore emulate this by local_bh_disable() */
  69. local_bh_disable();
  70. nf_reinject(entry->skb, entry->info, verdict);
  71. local_bh_enable();
  72. kfree(entry);
  73. }
  74. static inline void
  75. __ipq_enqueue_entry(struct ipq_queue_entry *entry)
  76. {
  77. list_add(&entry->list, &queue_list);
  78. queue_total++;
  79. }
  80. /*
  81. * Find and return a queued entry matched by cmpfn, or return the last
  82. * entry if cmpfn is NULL.
  83. */
  84. static inline struct ipq_queue_entry *
  85. __ipq_find_entry(ipq_cmpfn cmpfn, unsigned long data)
  86. {
  87. struct list_head *p;
  88. list_for_each_prev(p, &queue_list) {
  89. struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p;
  90. if (!cmpfn || cmpfn(entry, data))
  91. return entry;
  92. }
  93. return NULL;
  94. }
  95. static inline void
  96. __ipq_dequeue_entry(struct ipq_queue_entry *entry)
  97. {
  98. list_del(&entry->list);
  99. queue_total--;
  100. }
  101. static inline struct ipq_queue_entry *
  102. __ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
  103. {
  104. struct ipq_queue_entry *entry;
  105. entry = __ipq_find_entry(cmpfn, data);
  106. if (entry == NULL)
  107. return NULL;
  108. __ipq_dequeue_entry(entry);
  109. return entry;
  110. }
  111. static inline void
  112. __ipq_flush(int verdict)
  113. {
  114. struct ipq_queue_entry *entry;
  115. while ((entry = __ipq_find_dequeue_entry(NULL, 0)))
  116. ipq_issue_verdict(entry, verdict);
  117. }
  118. static inline int
  119. __ipq_set_mode(unsigned char mode, unsigned int range)
  120. {
  121. int status = 0;
  122. switch(mode) {
  123. case IPQ_COPY_NONE:
  124. case IPQ_COPY_META:
  125. copy_mode = mode;
  126. copy_range = 0;
  127. break;
  128. case IPQ_COPY_PACKET:
  129. copy_mode = mode;
  130. copy_range = range;
  131. if (copy_range > 0xFFFF)
  132. copy_range = 0xFFFF;
  133. break;
  134. default:
  135. status = -EINVAL;
  136. }
  137. return status;
  138. }
  139. static inline void
  140. __ipq_reset(void)
  141. {
  142. peer_pid = 0;
  143. net_disable_timestamp();
  144. __ipq_set_mode(IPQ_COPY_NONE, 0);
  145. __ipq_flush(NF_DROP);
  146. }
  147. static struct ipq_queue_entry *
  148. ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
  149. {
  150. struct ipq_queue_entry *entry;
  151. write_lock_bh(&queue_lock);
  152. entry = __ipq_find_dequeue_entry(cmpfn, data);
  153. write_unlock_bh(&queue_lock);
  154. return entry;
  155. }
  156. static void
  157. ipq_flush(int verdict)
  158. {
  159. write_lock_bh(&queue_lock);
  160. __ipq_flush(verdict);
  161. write_unlock_bh(&queue_lock);
  162. }
  163. static struct sk_buff *
  164. ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
  165. {
  166. unsigned char *old_tail;
  167. size_t size = 0;
  168. size_t data_len = 0;
  169. struct sk_buff *skb;
  170. struct ipq_packet_msg *pmsg;
  171. struct nlmsghdr *nlh;
  172. read_lock_bh(&queue_lock);
  173. switch (copy_mode) {
  174. case IPQ_COPY_META:
  175. case IPQ_COPY_NONE:
  176. size = NLMSG_SPACE(sizeof(*pmsg));
  177. data_len = 0;
  178. break;
  179. case IPQ_COPY_PACKET:
  180. if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
  181. entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
  182. (*errp = skb_checksum_help(entry->skb))) {
  183. read_unlock_bh(&queue_lock);
  184. return NULL;
  185. }
  186. if (copy_range == 0 || copy_range > entry->skb->len)
  187. data_len = entry->skb->len;
  188. else
  189. data_len = copy_range;
  190. size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
  191. break;
  192. default:
  193. *errp = -EINVAL;
  194. read_unlock_bh(&queue_lock);
  195. return NULL;
  196. }
  197. read_unlock_bh(&queue_lock);
  198. skb = alloc_skb(size, GFP_ATOMIC);
  199. if (!skb)
  200. goto nlmsg_failure;
  201. old_tail= skb->tail;
  202. nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
  203. pmsg = NLMSG_DATA(nlh);
  204. memset(pmsg, 0, sizeof(*pmsg));
  205. pmsg->packet_id = (unsigned long )entry;
  206. pmsg->data_len = data_len;
  207. pmsg->timestamp_sec = entry->skb->tstamp.off_sec;
  208. pmsg->timestamp_usec = entry->skb->tstamp.off_usec;
  209. pmsg->mark = entry->skb->mark;
  210. pmsg->hook = entry->info->hook;
  211. pmsg->hw_protocol = entry->skb->protocol;
  212. if (entry->info->indev)
  213. strcpy(pmsg->indev_name, entry->info->indev->name);
  214. else
  215. pmsg->indev_name[0] = '\0';
  216. if (entry->info->outdev)
  217. strcpy(pmsg->outdev_name, entry->info->outdev->name);
  218. else
  219. pmsg->outdev_name[0] = '\0';
  220. if (entry->info->indev && entry->skb->dev) {
  221. pmsg->hw_type = entry->skb->dev->type;
  222. if (entry->skb->dev->hard_header_parse)
  223. pmsg->hw_addrlen =
  224. entry->skb->dev->hard_header_parse(entry->skb,
  225. pmsg->hw_addr);
  226. }
  227. if (data_len)
  228. if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
  229. BUG();
  230. nlh->nlmsg_len = skb->tail - old_tail;
  231. return skb;
  232. nlmsg_failure:
  233. if (skb)
  234. kfree_skb(skb);
  235. *errp = -EINVAL;
  236. printk(KERN_ERR "ip_queue: error creating packet message\n");
  237. return NULL;
  238. }
  239. static int
  240. ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
  241. unsigned int queuenum, void *data)
  242. {
  243. int status = -EINVAL;
  244. struct sk_buff *nskb;
  245. struct ipq_queue_entry *entry;
  246. if (copy_mode == IPQ_COPY_NONE)
  247. return -EAGAIN;
  248. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  249. if (entry == NULL) {
  250. printk(KERN_ERR "ip_queue: OOM in ipq_enqueue_packet()\n");
  251. return -ENOMEM;
  252. }
  253. entry->info = info;
  254. entry->skb = skb;
  255. nskb = ipq_build_packet_message(entry, &status);
  256. if (nskb == NULL)
  257. goto err_out_free;
  258. write_lock_bh(&queue_lock);
  259. if (!peer_pid)
  260. goto err_out_free_nskb;
  261. if (queue_total >= queue_maxlen) {
  262. queue_dropped++;
  263. status = -ENOSPC;
  264. if (net_ratelimit())
  265. printk (KERN_WARNING "ip_queue: full at %d entries, "
  266. "dropping packets(s). Dropped: %d\n", queue_total,
  267. queue_dropped);
  268. goto err_out_free_nskb;
  269. }
  270. /* netlink_unicast will either free the nskb or attach it to a socket */
  271. status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
  272. if (status < 0) {
  273. queue_user_dropped++;
  274. goto err_out_unlock;
  275. }
  276. __ipq_enqueue_entry(entry);
  277. write_unlock_bh(&queue_lock);
  278. return status;
  279. err_out_free_nskb:
  280. kfree_skb(nskb);
  281. err_out_unlock:
  282. write_unlock_bh(&queue_lock);
  283. err_out_free:
  284. kfree(entry);
  285. return status;
  286. }
  287. static int
  288. ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
  289. {
  290. int diff;
  291. struct iphdr *user_iph = (struct iphdr *)v->payload;
  292. if (v->data_len < sizeof(*user_iph))
  293. return 0;
  294. diff = v->data_len - e->skb->len;
  295. if (diff < 0) {
  296. if (pskb_trim(e->skb, v->data_len))
  297. return -ENOMEM;
  298. } else if (diff > 0) {
  299. if (v->data_len > 0xFFFF)
  300. return -EINVAL;
  301. if (diff > skb_tailroom(e->skb)) {
  302. struct sk_buff *newskb;
  303. newskb = skb_copy_expand(e->skb,
  304. skb_headroom(e->skb),
  305. diff,
  306. GFP_ATOMIC);
  307. if (newskb == NULL) {
  308. printk(KERN_WARNING "ip_queue: OOM "
  309. "in mangle, dropping packet\n");
  310. return -ENOMEM;
  311. }
  312. if (e->skb->sk)
  313. skb_set_owner_w(newskb, e->skb->sk);
  314. kfree_skb(e->skb);
  315. e->skb = newskb;
  316. }
  317. skb_put(e->skb, diff);
  318. }
  319. if (!skb_make_writable(&e->skb, v->data_len))
  320. return -ENOMEM;
  321. memcpy(e->skb->data, v->payload, v->data_len);
  322. e->skb->ip_summed = CHECKSUM_NONE;
  323. return 0;
  324. }
  325. static inline int
  326. id_cmp(struct ipq_queue_entry *e, unsigned long id)
  327. {
  328. return (id == (unsigned long )e);
  329. }
  330. static int
  331. ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
  332. {
  333. struct ipq_queue_entry *entry;
  334. if (vmsg->value > NF_MAX_VERDICT)
  335. return -EINVAL;
  336. entry = ipq_find_dequeue_entry(id_cmp, vmsg->id);
  337. if (entry == NULL)
  338. return -ENOENT;
  339. else {
  340. int verdict = vmsg->value;
  341. if (vmsg->data_len && vmsg->data_len == len)
  342. if (ipq_mangle_ipv4(vmsg, entry) < 0)
  343. verdict = NF_DROP;
  344. ipq_issue_verdict(entry, verdict);
  345. return 0;
  346. }
  347. }
  348. static int
  349. ipq_set_mode(unsigned char mode, unsigned int range)
  350. {
  351. int status;
  352. write_lock_bh(&queue_lock);
  353. status = __ipq_set_mode(mode, range);
  354. write_unlock_bh(&queue_lock);
  355. return status;
  356. }
  357. static int
  358. ipq_receive_peer(struct ipq_peer_msg *pmsg,
  359. unsigned char type, unsigned int len)
  360. {
  361. int status = 0;
  362. if (len < sizeof(*pmsg))
  363. return -EINVAL;
  364. switch (type) {
  365. case IPQM_MODE:
  366. status = ipq_set_mode(pmsg->msg.mode.value,
  367. pmsg->msg.mode.range);
  368. break;
  369. case IPQM_VERDICT:
  370. if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
  371. status = -EINVAL;
  372. else
  373. status = ipq_set_verdict(&pmsg->msg.verdict,
  374. len - sizeof(*pmsg));
  375. break;
  376. default:
  377. status = -EINVAL;
  378. }
  379. return status;
  380. }
  381. static int
  382. dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex)
  383. {
  384. if (entry->info->indev)
  385. if (entry->info->indev->ifindex == ifindex)
  386. return 1;
  387. if (entry->info->outdev)
  388. if (entry->info->outdev->ifindex == ifindex)
  389. return 1;
  390. #ifdef CONFIG_BRIDGE_NETFILTER
  391. if (entry->skb->nf_bridge) {
  392. if (entry->skb->nf_bridge->physindev &&
  393. entry->skb->nf_bridge->physindev->ifindex == ifindex)
  394. return 1;
  395. if (entry->skb->nf_bridge->physoutdev &&
  396. entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
  397. return 1;
  398. }
  399. #endif
  400. return 0;
  401. }
  402. static void
  403. ipq_dev_drop(int ifindex)
  404. {
  405. struct ipq_queue_entry *entry;
  406. while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL)
  407. ipq_issue_verdict(entry, NF_DROP);
  408. }
  409. #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
  410. static inline void
  411. ipq_rcv_skb(struct sk_buff *skb)
  412. {
  413. int status, type, pid, flags, nlmsglen, skblen;
  414. struct nlmsghdr *nlh;
  415. skblen = skb->len;
  416. if (skblen < sizeof(*nlh))
  417. return;
  418. nlh = (struct nlmsghdr *)skb->data;
  419. nlmsglen = nlh->nlmsg_len;
  420. if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
  421. return;
  422. pid = nlh->nlmsg_pid;
  423. flags = nlh->nlmsg_flags;
  424. if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
  425. RCV_SKB_FAIL(-EINVAL);
  426. if (flags & MSG_TRUNC)
  427. RCV_SKB_FAIL(-ECOMM);
  428. type = nlh->nlmsg_type;
  429. if (type < NLMSG_NOOP || type >= IPQM_MAX)
  430. RCV_SKB_FAIL(-EINVAL);
  431. if (type <= IPQM_BASE)
  432. return;
  433. if (security_netlink_recv(skb, CAP_NET_ADMIN))
  434. RCV_SKB_FAIL(-EPERM);
  435. write_lock_bh(&queue_lock);
  436. if (peer_pid) {
  437. if (peer_pid != pid) {
  438. write_unlock_bh(&queue_lock);
  439. RCV_SKB_FAIL(-EBUSY);
  440. }
  441. } else {
  442. net_enable_timestamp();
  443. peer_pid = pid;
  444. }
  445. write_unlock_bh(&queue_lock);
  446. status = ipq_receive_peer(NLMSG_DATA(nlh), type,
  447. nlmsglen - NLMSG_LENGTH(0));
  448. if (status < 0)
  449. RCV_SKB_FAIL(status);
  450. if (flags & NLM_F_ACK)
  451. netlink_ack(skb, nlh, 0);
  452. return;
  453. }
  454. static void
  455. ipq_rcv_sk(struct sock *sk, int len)
  456. {
  457. struct sk_buff *skb;
  458. unsigned int qlen;
  459. mutex_lock(&ipqnl_mutex);
  460. for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
  461. skb = skb_dequeue(&sk->sk_receive_queue);
  462. ipq_rcv_skb(skb);
  463. kfree_skb(skb);
  464. }
  465. mutex_unlock(&ipqnl_mutex);
  466. }
  467. static int
  468. ipq_rcv_dev_event(struct notifier_block *this,
  469. unsigned long event, void *ptr)
  470. {
  471. struct net_device *dev = ptr;
  472. /* Drop any packets associated with the downed device */
  473. if (event == NETDEV_DOWN)
  474. ipq_dev_drop(dev->ifindex);
  475. return NOTIFY_DONE;
  476. }
  477. static struct notifier_block ipq_dev_notifier = {
  478. .notifier_call = ipq_rcv_dev_event,
  479. };
  480. static int
  481. ipq_rcv_nl_event(struct notifier_block *this,
  482. unsigned long event, void *ptr)
  483. {
  484. struct netlink_notify *n = ptr;
  485. if (event == NETLINK_URELEASE &&
  486. n->protocol == NETLINK_FIREWALL && n->pid) {
  487. write_lock_bh(&queue_lock);
  488. if (n->pid == peer_pid)
  489. __ipq_reset();
  490. write_unlock_bh(&queue_lock);
  491. }
  492. return NOTIFY_DONE;
  493. }
  494. static struct notifier_block ipq_nl_notifier = {
  495. .notifier_call = ipq_rcv_nl_event,
  496. };
  497. static struct ctl_table_header *ipq_sysctl_header;
  498. static ctl_table ipq_table[] = {
  499. {
  500. .ctl_name = NET_IPQ_QMAX,
  501. .procname = NET_IPQ_QMAX_NAME,
  502. .data = &queue_maxlen,
  503. .maxlen = sizeof(queue_maxlen),
  504. .mode = 0644,
  505. .proc_handler = proc_dointvec
  506. },
  507. { .ctl_name = 0 }
  508. };
  509. static ctl_table ipq_dir_table[] = {
  510. {
  511. .ctl_name = NET_IPV4,
  512. .procname = "ipv4",
  513. .mode = 0555,
  514. .child = ipq_table
  515. },
  516. { .ctl_name = 0 }
  517. };
  518. static ctl_table ipq_root_table[] = {
  519. {
  520. .ctl_name = CTL_NET,
  521. .procname = "net",
  522. .mode = 0555,
  523. .child = ipq_dir_table
  524. },
  525. { .ctl_name = 0 }
  526. };
  527. #ifdef CONFIG_PROC_FS
  528. static int
  529. ipq_get_info(char *buffer, char **start, off_t offset, int length)
  530. {
  531. int len;
  532. read_lock_bh(&queue_lock);
  533. len = sprintf(buffer,
  534. "Peer PID : %d\n"
  535. "Copy mode : %hu\n"
  536. "Copy range : %u\n"
  537. "Queue length : %u\n"
  538. "Queue max. length : %u\n"
  539. "Queue dropped : %u\n"
  540. "Netlink dropped : %u\n",
  541. peer_pid,
  542. copy_mode,
  543. copy_range,
  544. queue_total,
  545. queue_maxlen,
  546. queue_dropped,
  547. queue_user_dropped);
  548. read_unlock_bh(&queue_lock);
  549. *start = buffer + offset;
  550. len -= offset;
  551. if (len > length)
  552. len = length;
  553. else if (len < 0)
  554. len = 0;
  555. return len;
  556. }
  557. #endif /* CONFIG_PROC_FS */
  558. static struct nf_queue_handler nfqh = {
  559. .name = "ip_queue",
  560. .outfn = &ipq_enqueue_packet,
  561. };
  562. static int __init ip_queue_init(void)
  563. {
  564. int status = -ENOMEM;
  565. struct proc_dir_entry *proc;
  566. netlink_register_notifier(&ipq_nl_notifier);
  567. ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk,
  568. THIS_MODULE);
  569. if (ipqnl == NULL) {
  570. printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
  571. goto cleanup_netlink_notifier;
  572. }
  573. proc = proc_net_create(IPQ_PROC_FS_NAME, 0, ipq_get_info);
  574. if (proc)
  575. proc->owner = THIS_MODULE;
  576. else {
  577. printk(KERN_ERR "ip_queue: failed to create proc entry\n");
  578. goto cleanup_ipqnl;
  579. }
  580. register_netdevice_notifier(&ipq_dev_notifier);
  581. ipq_sysctl_header = register_sysctl_table(ipq_root_table);
  582. status = nf_register_queue_handler(PF_INET, &nfqh);
  583. if (status < 0) {
  584. printk(KERN_ERR "ip_queue: failed to register queue handler\n");
  585. goto cleanup_sysctl;
  586. }
  587. return status;
  588. cleanup_sysctl:
  589. unregister_sysctl_table(ipq_sysctl_header);
  590. unregister_netdevice_notifier(&ipq_dev_notifier);
  591. proc_net_remove(IPQ_PROC_FS_NAME);
  592. cleanup_ipqnl:
  593. sock_release(ipqnl->sk_socket);
  594. mutex_lock(&ipqnl_mutex);
  595. mutex_unlock(&ipqnl_mutex);
  596. cleanup_netlink_notifier:
  597. netlink_unregister_notifier(&ipq_nl_notifier);
  598. return status;
  599. }
  600. static void __exit ip_queue_fini(void)
  601. {
  602. nf_unregister_queue_handlers(&nfqh);
  603. synchronize_net();
  604. ipq_flush(NF_DROP);
  605. unregister_sysctl_table(ipq_sysctl_header);
  606. unregister_netdevice_notifier(&ipq_dev_notifier);
  607. proc_net_remove(IPQ_PROC_FS_NAME);
  608. sock_release(ipqnl->sk_socket);
  609. mutex_lock(&ipqnl_mutex);
  610. mutex_unlock(&ipqnl_mutex);
  611. netlink_unregister_notifier(&ipq_nl_notifier);
  612. }
  613. MODULE_DESCRIPTION("IPv4 packet queue handler");
  614. MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
  615. MODULE_LICENSE("GPL");
  616. module_init(ip_queue_init);
  617. module_exit(ip_queue_fini);