nfnetlink_queue.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * This is a module which is used for queueing packets and communicating with
  4. * userspace via nfnetlink.
  5. *
  6. * (C) 2005 by Harald Welte <laforge@netfilter.org>
  7. * (C) 2007 by Patrick McHardy <kaber@trash.net>
  8. *
  9. * Based on the old ipv4-only ip_queue.c:
  10. * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
  11. * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/module.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/init.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/slab.h>
  19. #include <linux/notifier.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/netfilter.h>
  22. #include <linux/proc_fs.h>
  23. #include <linux/netfilter_ipv4.h>
  24. #include <linux/netfilter_ipv6.h>
  25. #include <linux/netfilter_bridge.h>
  26. #include <linux/netfilter/nfnetlink.h>
  27. #include <linux/netfilter/nfnetlink_queue.h>
  28. #include <linux/netfilter/nf_conntrack_common.h>
  29. #include <linux/list.h>
  30. #include <net/sock.h>
  31. #include <net/tcp_states.h>
  32. #include <net/netfilter/nf_queue.h>
  33. #include <net/netns/generic.h>
  34. #include <linux/atomic.h>
  35. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  36. #include "../bridge/br_private.h"
  37. #endif
  38. #if IS_ENABLED(CONFIG_NF_CONNTRACK)
  39. #include <net/netfilter/nf_conntrack.h>
  40. #endif
  41. #define NFQNL_QMAX_DEFAULT 1024
  42. /* We're using struct nlattr which has 16bit nla_len. Note that nla_len
  43. * includes the header length. Thus, the maximum packet length that we
  44. * support is 65531 bytes. We send truncated packets if the specified length
  45. * is larger than that. Userspace can check for presence of NFQA_CAP_LEN
  46. * attribute to detect truncation.
  47. */
  48. #define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
  49. struct nfqnl_instance {
  50. struct hlist_node hlist; /* global list of queues */
  51. struct rcu_head rcu;
  52. u32 peer_portid;
  53. unsigned int queue_maxlen;
  54. unsigned int copy_range;
  55. unsigned int queue_dropped;
  56. unsigned int queue_user_dropped;
  57. u_int16_t queue_num; /* number of this queue */
  58. u_int8_t copy_mode;
  59. u_int32_t flags; /* Set using NFQA_CFG_FLAGS */
  60. /*
  61. * Following fields are dirtied for each queued packet,
  62. * keep them in same cache line if possible.
  63. */
  64. spinlock_t lock ____cacheline_aligned_in_smp;
  65. unsigned int queue_total;
  66. unsigned int id_sequence; /* 'sequence' of pkt ids */
  67. struct list_head queue_list; /* packets in queue */
  68. };
  69. typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
  70. static unsigned int nfnl_queue_net_id __read_mostly;
  71. #define INSTANCE_BUCKETS 16
  72. struct nfnl_queue_net {
  73. spinlock_t instances_lock;
  74. struct hlist_head instance_table[INSTANCE_BUCKETS];
  75. };
  76. static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
  77. {
  78. return net_generic(net, nfnl_queue_net_id);
  79. }
  80. static inline u_int8_t instance_hashfn(u_int16_t queue_num)
  81. {
  82. return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
  83. }
  84. static struct nfqnl_instance *
  85. instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
  86. {
  87. struct hlist_head *head;
  88. struct nfqnl_instance *inst;
  89. head = &q->instance_table[instance_hashfn(queue_num)];
  90. hlist_for_each_entry_rcu(inst, head, hlist) {
  91. if (inst->queue_num == queue_num)
  92. return inst;
  93. }
  94. return NULL;
  95. }
  96. static struct nfqnl_instance *
  97. instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
  98. {
  99. struct nfqnl_instance *inst;
  100. unsigned int h;
  101. int err;
  102. spin_lock(&q->instances_lock);
  103. if (instance_lookup(q, queue_num)) {
  104. err = -EEXIST;
  105. goto out_unlock;
  106. }
  107. inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
  108. if (!inst) {
  109. err = -ENOMEM;
  110. goto out_unlock;
  111. }
  112. inst->queue_num = queue_num;
  113. inst->peer_portid = portid;
  114. inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
  115. inst->copy_range = NFQNL_MAX_COPY_RANGE;
  116. inst->copy_mode = NFQNL_COPY_NONE;
  117. spin_lock_init(&inst->lock);
  118. INIT_LIST_HEAD(&inst->queue_list);
  119. if (!try_module_get(THIS_MODULE)) {
  120. err = -EAGAIN;
  121. goto out_free;
  122. }
  123. h = instance_hashfn(queue_num);
  124. hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
  125. spin_unlock(&q->instances_lock);
  126. return inst;
  127. out_free:
  128. kfree(inst);
  129. out_unlock:
  130. spin_unlock(&q->instances_lock);
  131. return ERR_PTR(err);
  132. }
  133. static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
  134. unsigned long data);
  135. static void
  136. instance_destroy_rcu(struct rcu_head *head)
  137. {
  138. struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
  139. rcu);
  140. nfqnl_flush(inst, NULL, 0);
  141. kfree(inst);
  142. module_put(THIS_MODULE);
  143. }
  144. static void
  145. __instance_destroy(struct nfqnl_instance *inst)
  146. {
  147. hlist_del_rcu(&inst->hlist);
  148. call_rcu(&inst->rcu, instance_destroy_rcu);
  149. }
  150. static void
  151. instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
  152. {
  153. spin_lock(&q->instances_lock);
  154. __instance_destroy(inst);
  155. spin_unlock(&q->instances_lock);
  156. }
  157. static inline void
  158. __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
  159. {
  160. list_add_tail(&entry->list, &queue->queue_list);
  161. queue->queue_total++;
  162. }
  163. static void
  164. __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
  165. {
  166. list_del(&entry->list);
  167. queue->queue_total--;
  168. }
  169. static struct nf_queue_entry *
  170. find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
  171. {
  172. struct nf_queue_entry *entry = NULL, *i;
  173. spin_lock_bh(&queue->lock);
  174. list_for_each_entry(i, &queue->queue_list, list) {
  175. if (i->id == id) {
  176. entry = i;
  177. break;
  178. }
  179. }
  180. if (entry)
  181. __dequeue_entry(queue, entry);
  182. spin_unlock_bh(&queue->lock);
  183. return entry;
  184. }
  185. static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
  186. {
  187. struct nf_ct_hook *ct_hook;
  188. int err;
  189. if (verdict == NF_ACCEPT ||
  190. verdict == NF_REPEAT ||
  191. verdict == NF_STOP) {
  192. rcu_read_lock();
  193. ct_hook = rcu_dereference(nf_ct_hook);
  194. if (ct_hook) {
  195. err = ct_hook->update(entry->state.net, entry->skb);
  196. if (err < 0)
  197. verdict = NF_DROP;
  198. }
  199. rcu_read_unlock();
  200. }
  201. nf_reinject(entry, verdict);
  202. }
  203. static void
  204. nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
  205. {
  206. struct nf_queue_entry *entry, *next;
  207. spin_lock_bh(&queue->lock);
  208. list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
  209. if (!cmpfn || cmpfn(entry, data)) {
  210. list_del(&entry->list);
  211. queue->queue_total--;
  212. nfqnl_reinject(entry, NF_DROP);
  213. }
  214. }
  215. spin_unlock_bh(&queue->lock);
  216. }
  217. static int
  218. nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
  219. bool csum_verify)
  220. {
  221. __u32 flags = 0;
  222. if (packet->ip_summed == CHECKSUM_PARTIAL)
  223. flags = NFQA_SKB_CSUMNOTREADY;
  224. else if (csum_verify)
  225. flags = NFQA_SKB_CSUM_NOTVERIFIED;
  226. if (skb_is_gso(packet))
  227. flags |= NFQA_SKB_GSO;
  228. return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
  229. }
  230. static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
  231. {
  232. const struct cred *cred;
  233. if (!sk_fullsock(sk))
  234. return 0;
  235. read_lock_bh(&sk->sk_callback_lock);
  236. if (sk->sk_socket && sk->sk_socket->file) {
  237. cred = sk->sk_socket->file->f_cred;
  238. if (nla_put_be32(skb, NFQA_UID,
  239. htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
  240. goto nla_put_failure;
  241. if (nla_put_be32(skb, NFQA_GID,
  242. htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
  243. goto nla_put_failure;
  244. }
  245. read_unlock_bh(&sk->sk_callback_lock);
  246. return 0;
  247. nla_put_failure:
  248. read_unlock_bh(&sk->sk_callback_lock);
  249. return -1;
  250. }
  251. static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
  252. {
  253. u32 seclen = 0;
  254. #if IS_ENABLED(CONFIG_NETWORK_SECMARK)
  255. if (!skb || !sk_fullsock(skb->sk))
  256. return 0;
  257. read_lock_bh(&skb->sk->sk_callback_lock);
  258. if (skb->secmark)
  259. security_secid_to_secctx(skb->secmark, secdata, &seclen);
  260. read_unlock_bh(&skb->sk->sk_callback_lock);
  261. #endif
  262. return seclen;
  263. }
  264. static u32 nfqnl_get_bridge_size(struct nf_queue_entry *entry)
  265. {
  266. struct sk_buff *entskb = entry->skb;
  267. u32 nlalen = 0;
  268. if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
  269. return 0;
  270. if (skb_vlan_tag_present(entskb))
  271. nlalen += nla_total_size(nla_total_size(sizeof(__be16)) +
  272. nla_total_size(sizeof(__be16)));
  273. if (entskb->network_header > entskb->mac_header)
  274. nlalen += nla_total_size((entskb->network_header -
  275. entskb->mac_header));
  276. return nlalen;
  277. }
  278. static int nfqnl_put_bridge(struct nf_queue_entry *entry, struct sk_buff *skb)
  279. {
  280. struct sk_buff *entskb = entry->skb;
  281. if (entry->state.pf != PF_BRIDGE || !skb_mac_header_was_set(entskb))
  282. return 0;
  283. if (skb_vlan_tag_present(entskb)) {
  284. struct nlattr *nest;
  285. nest = nla_nest_start(skb, NFQA_VLAN);
  286. if (!nest)
  287. goto nla_put_failure;
  288. if (nla_put_be16(skb, NFQA_VLAN_TCI, htons(entskb->vlan_tci)) ||
  289. nla_put_be16(skb, NFQA_VLAN_PROTO, entskb->vlan_proto))
  290. goto nla_put_failure;
  291. nla_nest_end(skb, nest);
  292. }
  293. if (entskb->mac_header < entskb->network_header) {
  294. int len = (int)(entskb->network_header - entskb->mac_header);
  295. if (nla_put(skb, NFQA_L2HDR, len, skb_mac_header(entskb)))
  296. goto nla_put_failure;
  297. }
  298. return 0;
  299. nla_put_failure:
  300. return -1;
  301. }
  302. static struct sk_buff *
  303. nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
  304. struct nf_queue_entry *entry,
  305. __be32 **packet_id_ptr)
  306. {
  307. size_t size;
  308. size_t data_len = 0, cap_len = 0;
  309. unsigned int hlen = 0;
  310. struct sk_buff *skb;
  311. struct nlattr *nla;
  312. struct nfqnl_msg_packet_hdr *pmsg;
  313. struct nlmsghdr *nlh;
  314. struct nfgenmsg *nfmsg;
  315. struct sk_buff *entskb = entry->skb;
  316. struct net_device *indev;
  317. struct net_device *outdev;
  318. struct nf_conn *ct = NULL;
  319. enum ip_conntrack_info ctinfo;
  320. struct nfnl_ct_hook *nfnl_ct;
  321. bool csum_verify;
  322. char *secdata = NULL;
  323. u32 seclen = 0;
  324. size = nlmsg_total_size(sizeof(struct nfgenmsg))
  325. + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
  326. + nla_total_size(sizeof(u_int32_t)) /* ifindex */
  327. + nla_total_size(sizeof(u_int32_t)) /* ifindex */
  328. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  329. + nla_total_size(sizeof(u_int32_t)) /* ifindex */
  330. + nla_total_size(sizeof(u_int32_t)) /* ifindex */
  331. #endif
  332. + nla_total_size(sizeof(u_int32_t)) /* mark */
  333. + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
  334. + nla_total_size(sizeof(u_int32_t)) /* skbinfo */
  335. + nla_total_size(sizeof(u_int32_t)); /* cap_len */
  336. if (entskb->tstamp)
  337. size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
  338. size += nfqnl_get_bridge_size(entry);
  339. if (entry->state.hook <= NF_INET_FORWARD ||
  340. (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
  341. csum_verify = !skb_csum_unnecessary(entskb);
  342. else
  343. csum_verify = false;
  344. outdev = entry->state.out;
  345. switch ((enum nfqnl_config_mode)READ_ONCE(queue->copy_mode)) {
  346. case NFQNL_COPY_META:
  347. case NFQNL_COPY_NONE:
  348. break;
  349. case NFQNL_COPY_PACKET:
  350. if (!(queue->flags & NFQA_CFG_F_GSO) &&
  351. entskb->ip_summed == CHECKSUM_PARTIAL &&
  352. skb_checksum_help(entskb))
  353. return NULL;
  354. data_len = READ_ONCE(queue->copy_range);
  355. if (data_len > entskb->len)
  356. data_len = entskb->len;
  357. hlen = skb_zerocopy_headlen(entskb);
  358. hlen = min_t(unsigned int, hlen, data_len);
  359. size += sizeof(struct nlattr) + hlen;
  360. cap_len = entskb->len;
  361. break;
  362. }
  363. nfnl_ct = rcu_dereference(nfnl_ct_hook);
  364. if (queue->flags & NFQA_CFG_F_CONNTRACK) {
  365. if (nfnl_ct != NULL) {
  366. ct = nfnl_ct->get_ct(entskb, &ctinfo);
  367. if (ct != NULL)
  368. size += nfnl_ct->build_size(ct);
  369. }
  370. }
  371. if (queue->flags & NFQA_CFG_F_UID_GID) {
  372. size += (nla_total_size(sizeof(u_int32_t)) /* uid */
  373. + nla_total_size(sizeof(u_int32_t))); /* gid */
  374. }
  375. if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
  376. seclen = nfqnl_get_sk_secctx(entskb, &secdata);
  377. if (seclen)
  378. size += nla_total_size(seclen);
  379. }
  380. skb = alloc_skb(size, GFP_ATOMIC);
  381. if (!skb) {
  382. skb_tx_error(entskb);
  383. goto nlmsg_failure;
  384. }
  385. nlh = nlmsg_put(skb, 0, 0,
  386. nfnl_msg_type(NFNL_SUBSYS_QUEUE, NFQNL_MSG_PACKET),
  387. sizeof(struct nfgenmsg), 0);
  388. if (!nlh) {
  389. skb_tx_error(entskb);
  390. kfree_skb(skb);
  391. goto nlmsg_failure;
  392. }
  393. nfmsg = nlmsg_data(nlh);
  394. nfmsg->nfgen_family = entry->state.pf;
  395. nfmsg->version = NFNETLINK_V0;
  396. nfmsg->res_id = htons(queue->queue_num);
  397. nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
  398. pmsg = nla_data(nla);
  399. pmsg->hw_protocol = entskb->protocol;
  400. pmsg->hook = entry->state.hook;
  401. *packet_id_ptr = &pmsg->packet_id;
  402. indev = entry->state.in;
  403. if (indev) {
  404. #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  405. if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
  406. goto nla_put_failure;
  407. #else
  408. if (entry->state.pf == PF_BRIDGE) {
  409. /* Case 1: indev is physical input device, we need to
  410. * look for bridge group (when called from
  411. * netfilter_bridge) */
  412. if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
  413. htonl(indev->ifindex)) ||
  414. /* this is the bridge group "brX" */
  415. /* rcu_read_lock()ed by __nf_queue */
  416. nla_put_be32(skb, NFQA_IFINDEX_INDEV,
  417. htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
  418. goto nla_put_failure;
  419. } else {
  420. int physinif;
  421. /* Case 2: indev is bridge group, we need to look for
  422. * physical device (when called from ipv4) */
  423. if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
  424. htonl(indev->ifindex)))
  425. goto nla_put_failure;
  426. physinif = nf_bridge_get_physinif(entskb);
  427. if (physinif &&
  428. nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
  429. htonl(physinif)))
  430. goto nla_put_failure;
  431. }
  432. #endif
  433. }
  434. if (outdev) {
  435. #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  436. if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
  437. goto nla_put_failure;
  438. #else
  439. if (entry->state.pf == PF_BRIDGE) {
  440. /* Case 1: outdev is physical output device, we need to
  441. * look for bridge group (when called from
  442. * netfilter_bridge) */
  443. if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
  444. htonl(outdev->ifindex)) ||
  445. /* this is the bridge group "brX" */
  446. /* rcu_read_lock()ed by __nf_queue */
  447. nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
  448. htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
  449. goto nla_put_failure;
  450. } else {
  451. int physoutif;
  452. /* Case 2: outdev is bridge group, we need to look for
  453. * physical output device (when called from ipv4) */
  454. if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
  455. htonl(outdev->ifindex)))
  456. goto nla_put_failure;
  457. physoutif = nf_bridge_get_physoutif(entskb);
  458. if (physoutif &&
  459. nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
  460. htonl(physoutif)))
  461. goto nla_put_failure;
  462. }
  463. #endif
  464. }
  465. if (entskb->mark &&
  466. nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
  467. goto nla_put_failure;
  468. if (indev && entskb->dev &&
  469. skb_mac_header_was_set(entskb) &&
  470. skb_mac_header_len(entskb) != 0) {
  471. struct nfqnl_msg_packet_hw phw;
  472. int len;
  473. memset(&phw, 0, sizeof(phw));
  474. len = dev_parse_header(entskb, phw.hw_addr);
  475. if (len) {
  476. phw.hw_addrlen = htons(len);
  477. if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
  478. goto nla_put_failure;
  479. }
  480. }
  481. if (nfqnl_put_bridge(entry, skb) < 0)
  482. goto nla_put_failure;
  483. if (entry->state.hook <= NF_INET_FORWARD && entskb->tstamp) {
  484. struct nfqnl_msg_packet_timestamp ts;
  485. struct timespec64 kts = ktime_to_timespec64(entskb->tstamp);
  486. ts.sec = cpu_to_be64(kts.tv_sec);
  487. ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
  488. if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
  489. goto nla_put_failure;
  490. }
  491. if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
  492. nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
  493. goto nla_put_failure;
  494. if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
  495. goto nla_put_failure;
  496. if (ct && nfnl_ct->build(skb, ct, ctinfo, NFQA_CT, NFQA_CT_INFO) < 0)
  497. goto nla_put_failure;
  498. if (cap_len > data_len &&
  499. nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
  500. goto nla_put_failure;
  501. if (nfqnl_put_packet_info(skb, entskb, csum_verify))
  502. goto nla_put_failure;
  503. if (data_len) {
  504. struct nlattr *nla;
  505. if (skb_tailroom(skb) < sizeof(*nla) + hlen)
  506. goto nla_put_failure;
  507. nla = skb_put(skb, sizeof(*nla));
  508. nla->nla_type = NFQA_PAYLOAD;
  509. nla->nla_len = nla_attr_size(data_len);
  510. if (skb_zerocopy(skb, entskb, data_len, hlen))
  511. goto nla_put_failure;
  512. }
  513. nlh->nlmsg_len = skb->len;
  514. if (seclen)
  515. security_release_secctx(secdata, seclen);
  516. return skb;
  517. nla_put_failure:
  518. skb_tx_error(entskb);
  519. kfree_skb(skb);
  520. net_err_ratelimited("nf_queue: error creating packet message\n");
  521. nlmsg_failure:
  522. if (seclen)
  523. security_release_secctx(secdata, seclen);
  524. return NULL;
  525. }
  526. static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry)
  527. {
  528. #if IS_ENABLED(CONFIG_NF_CONNTRACK)
  529. static const unsigned long flags = IPS_CONFIRMED | IPS_DYING;
  530. const struct nf_conn *ct = (void *)skb_nfct(entry->skb);
  531. if (ct && ((ct->status & flags) == IPS_DYING))
  532. return true;
  533. #endif
  534. return false;
  535. }
  536. static int
  537. __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
  538. struct nf_queue_entry *entry)
  539. {
  540. struct sk_buff *nskb;
  541. int err = -ENOBUFS;
  542. __be32 *packet_id_ptr;
  543. int failopen = 0;
  544. nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
  545. if (nskb == NULL) {
  546. err = -ENOMEM;
  547. goto err_out;
  548. }
  549. spin_lock_bh(&queue->lock);
  550. if (nf_ct_drop_unconfirmed(entry))
  551. goto err_out_free_nskb;
  552. if (queue->queue_total >= queue->queue_maxlen) {
  553. if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
  554. failopen = 1;
  555. err = 0;
  556. } else {
  557. queue->queue_dropped++;
  558. net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
  559. queue->queue_total);
  560. }
  561. goto err_out_free_nskb;
  562. }
  563. entry->id = ++queue->id_sequence;
  564. *packet_id_ptr = htonl(entry->id);
  565. /* nfnetlink_unicast will either free the nskb or add it to a socket */
  566. err = nfnetlink_unicast(nskb, net, queue->peer_portid);
  567. if (err < 0) {
  568. if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
  569. failopen = 1;
  570. err = 0;
  571. } else {
  572. queue->queue_user_dropped++;
  573. }
  574. goto err_out_unlock;
  575. }
  576. __enqueue_entry(queue, entry);
  577. spin_unlock_bh(&queue->lock);
  578. return 0;
  579. err_out_free_nskb:
  580. kfree_skb(nskb);
  581. err_out_unlock:
  582. spin_unlock_bh(&queue->lock);
  583. if (failopen)
  584. nfqnl_reinject(entry, NF_ACCEPT);
  585. err_out:
  586. return err;
  587. }
  588. static struct nf_queue_entry *
  589. nf_queue_entry_dup(struct nf_queue_entry *e)
  590. {
  591. struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
  592. if (!entry)
  593. return NULL;
  594. if (nf_queue_entry_get_refs(entry))
  595. return entry;
  596. kfree(entry);
  597. return NULL;
  598. }
  599. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  600. /* When called from bridge netfilter, skb->data must point to MAC header
  601. * before calling skb_gso_segment(). Else, original MAC header is lost
  602. * and segmented skbs will be sent to wrong destination.
  603. */
  604. static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
  605. {
  606. if (nf_bridge_info_get(skb))
  607. __skb_push(skb, skb->network_header - skb->mac_header);
  608. }
  609. static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
  610. {
  611. if (nf_bridge_info_get(skb))
  612. __skb_pull(skb, skb->network_header - skb->mac_header);
  613. }
  614. #else
  615. #define nf_bridge_adjust_skb_data(s) do {} while (0)
  616. #define nf_bridge_adjust_segmented_data(s) do {} while (0)
  617. #endif
  618. static int
  619. __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
  620. struct sk_buff *skb, struct nf_queue_entry *entry)
  621. {
  622. int ret = -ENOMEM;
  623. struct nf_queue_entry *entry_seg;
  624. nf_bridge_adjust_segmented_data(skb);
  625. if (skb->next == NULL) { /* last packet, no need to copy entry */
  626. struct sk_buff *gso_skb = entry->skb;
  627. entry->skb = skb;
  628. ret = __nfqnl_enqueue_packet(net, queue, entry);
  629. if (ret)
  630. entry->skb = gso_skb;
  631. return ret;
  632. }
  633. skb_mark_not_on_list(skb);
  634. entry_seg = nf_queue_entry_dup(entry);
  635. if (entry_seg) {
  636. entry_seg->skb = skb;
  637. ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
  638. if (ret)
  639. nf_queue_entry_free(entry_seg);
  640. }
  641. return ret;
  642. }
  643. static int
  644. nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
  645. {
  646. unsigned int queued;
  647. struct nfqnl_instance *queue;
  648. struct sk_buff *skb, *segs, *nskb;
  649. int err = -ENOBUFS;
  650. struct net *net = entry->state.net;
  651. struct nfnl_queue_net *q = nfnl_queue_pernet(net);
  652. /* rcu_read_lock()ed by nf_hook_thresh */
  653. queue = instance_lookup(q, queuenum);
  654. if (!queue)
  655. return -ESRCH;
  656. if (queue->copy_mode == NFQNL_COPY_NONE)
  657. return -EINVAL;
  658. skb = entry->skb;
  659. switch (entry->state.pf) {
  660. case NFPROTO_IPV4:
  661. skb->protocol = htons(ETH_P_IP);
  662. break;
  663. case NFPROTO_IPV6:
  664. skb->protocol = htons(ETH_P_IPV6);
  665. break;
  666. }
  667. if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
  668. return __nfqnl_enqueue_packet(net, queue, entry);
  669. nf_bridge_adjust_skb_data(skb);
  670. segs = skb_gso_segment(skb, 0);
  671. /* Does not use PTR_ERR to limit the number of error codes that can be
  672. * returned by nf_queue. For instance, callers rely on -ESRCH to
  673. * mean 'ignore this hook'.
  674. */
  675. if (IS_ERR_OR_NULL(segs))
  676. goto out_err;
  677. queued = 0;
  678. err = 0;
  679. skb_list_walk_safe(segs, segs, nskb) {
  680. if (err == 0)
  681. err = __nfqnl_enqueue_packet_gso(net, queue,
  682. segs, entry);
  683. if (err == 0)
  684. queued++;
  685. else
  686. kfree_skb(segs);
  687. }
  688. if (queued) {
  689. if (err) /* some segments are already queued */
  690. nf_queue_entry_free(entry);
  691. kfree_skb(skb);
  692. return 0;
  693. }
  694. out_err:
  695. nf_bridge_adjust_segmented_data(skb);
  696. return err;
  697. }
  698. static int
  699. nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
  700. {
  701. struct sk_buff *nskb;
  702. if (diff < 0) {
  703. if (pskb_trim(e->skb, data_len))
  704. return -ENOMEM;
  705. } else if (diff > 0) {
  706. if (data_len > 0xFFFF)
  707. return -EINVAL;
  708. if (diff > skb_tailroom(e->skb)) {
  709. nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
  710. diff, GFP_ATOMIC);
  711. if (!nskb)
  712. return -ENOMEM;
  713. kfree_skb(e->skb);
  714. e->skb = nskb;
  715. }
  716. skb_put(e->skb, diff);
  717. }
  718. if (skb_ensure_writable(e->skb, data_len))
  719. return -ENOMEM;
  720. skb_copy_to_linear_data(e->skb, data, data_len);
  721. e->skb->ip_summed = CHECKSUM_NONE;
  722. return 0;
  723. }
  724. static int
  725. nfqnl_set_mode(struct nfqnl_instance *queue,
  726. unsigned char mode, unsigned int range)
  727. {
  728. int status = 0;
  729. spin_lock_bh(&queue->lock);
  730. switch (mode) {
  731. case NFQNL_COPY_NONE:
  732. case NFQNL_COPY_META:
  733. queue->copy_mode = mode;
  734. queue->copy_range = 0;
  735. break;
  736. case NFQNL_COPY_PACKET:
  737. queue->copy_mode = mode;
  738. if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
  739. queue->copy_range = NFQNL_MAX_COPY_RANGE;
  740. else
  741. queue->copy_range = range;
  742. break;
  743. default:
  744. status = -EINVAL;
  745. }
  746. spin_unlock_bh(&queue->lock);
  747. return status;
  748. }
  749. static int
  750. dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
  751. {
  752. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  753. int physinif, physoutif;
  754. physinif = nf_bridge_get_physinif(entry->skb);
  755. physoutif = nf_bridge_get_physoutif(entry->skb);
  756. if (physinif == ifindex || physoutif == ifindex)
  757. return 1;
  758. #endif
  759. if (entry->state.in)
  760. if (entry->state.in->ifindex == ifindex)
  761. return 1;
  762. if (entry->state.out)
  763. if (entry->state.out->ifindex == ifindex)
  764. return 1;
  765. return 0;
  766. }
  767. /* drop all packets with either indev or outdev == ifindex from all queue
  768. * instances */
  769. static void
  770. nfqnl_dev_drop(struct net *net, int ifindex)
  771. {
  772. int i;
  773. struct nfnl_queue_net *q = nfnl_queue_pernet(net);
  774. rcu_read_lock();
  775. for (i = 0; i < INSTANCE_BUCKETS; i++) {
  776. struct nfqnl_instance *inst;
  777. struct hlist_head *head = &q->instance_table[i];
  778. hlist_for_each_entry_rcu(inst, head, hlist)
  779. nfqnl_flush(inst, dev_cmp, ifindex);
  780. }
  781. rcu_read_unlock();
  782. }
  783. static int
  784. nfqnl_rcv_dev_event(struct notifier_block *this,
  785. unsigned long event, void *ptr)
  786. {
  787. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  788. /* Drop any packets associated with the downed device */
  789. if (event == NETDEV_DOWN)
  790. nfqnl_dev_drop(dev_net(dev), dev->ifindex);
  791. return NOTIFY_DONE;
  792. }
  793. static struct notifier_block nfqnl_dev_notifier = {
  794. .notifier_call = nfqnl_rcv_dev_event,
  795. };
  796. static void nfqnl_nf_hook_drop(struct net *net)
  797. {
  798. struct nfnl_queue_net *q = nfnl_queue_pernet(net);
  799. int i;
  800. for (i = 0; i < INSTANCE_BUCKETS; i++) {
  801. struct nfqnl_instance *inst;
  802. struct hlist_head *head = &q->instance_table[i];
  803. hlist_for_each_entry_rcu(inst, head, hlist)
  804. nfqnl_flush(inst, NULL, 0);
  805. }
  806. }
  807. static int
  808. nfqnl_rcv_nl_event(struct notifier_block *this,
  809. unsigned long event, void *ptr)
  810. {
  811. struct netlink_notify *n = ptr;
  812. struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
  813. if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
  814. int i;
  815. /* destroy all instances for this portid */
  816. spin_lock(&q->instances_lock);
  817. for (i = 0; i < INSTANCE_BUCKETS; i++) {
  818. struct hlist_node *t2;
  819. struct nfqnl_instance *inst;
  820. struct hlist_head *head = &q->instance_table[i];
  821. hlist_for_each_entry_safe(inst, t2, head, hlist) {
  822. if (n->portid == inst->peer_portid)
  823. __instance_destroy(inst);
  824. }
  825. }
  826. spin_unlock(&q->instances_lock);
  827. }
  828. return NOTIFY_DONE;
  829. }
  830. static struct notifier_block nfqnl_rtnl_notifier = {
  831. .notifier_call = nfqnl_rcv_nl_event,
  832. };
  833. static const struct nla_policy nfqa_vlan_policy[NFQA_VLAN_MAX + 1] = {
  834. [NFQA_VLAN_TCI] = { .type = NLA_U16},
  835. [NFQA_VLAN_PROTO] = { .type = NLA_U16},
  836. };
  837. static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
  838. [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
  839. [NFQA_MARK] = { .type = NLA_U32 },
  840. [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
  841. [NFQA_CT] = { .type = NLA_UNSPEC },
  842. [NFQA_EXP] = { .type = NLA_UNSPEC },
  843. [NFQA_VLAN] = { .type = NLA_NESTED },
  844. };
  845. static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
  846. [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
  847. [NFQA_MARK] = { .type = NLA_U32 },
  848. };
  849. static struct nfqnl_instance *
  850. verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
  851. {
  852. struct nfqnl_instance *queue;
  853. queue = instance_lookup(q, queue_num);
  854. if (!queue)
  855. return ERR_PTR(-ENODEV);
  856. if (queue->peer_portid != nlportid)
  857. return ERR_PTR(-EPERM);
  858. return queue;
  859. }
  860. static struct nfqnl_msg_verdict_hdr*
  861. verdicthdr_get(const struct nlattr * const nfqa[])
  862. {
  863. struct nfqnl_msg_verdict_hdr *vhdr;
  864. unsigned int verdict;
  865. if (!nfqa[NFQA_VERDICT_HDR])
  866. return NULL;
  867. vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
  868. verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
  869. if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
  870. return NULL;
  871. return vhdr;
  872. }
  873. static int nfq_id_after(unsigned int id, unsigned int max)
  874. {
  875. return (int)(id - max) > 0;
  876. }
  877. static int nfqnl_recv_verdict_batch(struct net *net, struct sock *ctnl,
  878. struct sk_buff *skb,
  879. const struct nlmsghdr *nlh,
  880. const struct nlattr * const nfqa[],
  881. struct netlink_ext_ack *extack)
  882. {
  883. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  884. struct nf_queue_entry *entry, *tmp;
  885. unsigned int verdict, maxid;
  886. struct nfqnl_msg_verdict_hdr *vhdr;
  887. struct nfqnl_instance *queue;
  888. LIST_HEAD(batch_list);
  889. u16 queue_num = ntohs(nfmsg->res_id);
  890. struct nfnl_queue_net *q = nfnl_queue_pernet(net);
  891. queue = verdict_instance_lookup(q, queue_num,
  892. NETLINK_CB(skb).portid);
  893. if (IS_ERR(queue))
  894. return PTR_ERR(queue);
  895. vhdr = verdicthdr_get(nfqa);
  896. if (!vhdr)
  897. return -EINVAL;
  898. verdict = ntohl(vhdr->verdict);
  899. maxid = ntohl(vhdr->id);
  900. spin_lock_bh(&queue->lock);
  901. list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
  902. if (nfq_id_after(entry->id, maxid))
  903. break;
  904. __dequeue_entry(queue, entry);
  905. list_add_tail(&entry->list, &batch_list);
  906. }
  907. spin_unlock_bh(&queue->lock);
  908. if (list_empty(&batch_list))
  909. return -ENOENT;
  910. list_for_each_entry_safe(entry, tmp, &batch_list, list) {
  911. if (nfqa[NFQA_MARK])
  912. entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
  913. nfqnl_reinject(entry, verdict);
  914. }
  915. return 0;
  916. }
  917. static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct,
  918. const struct nlmsghdr *nlh,
  919. const struct nlattr * const nfqa[],
  920. struct nf_queue_entry *entry,
  921. enum ip_conntrack_info *ctinfo)
  922. {
  923. struct nf_conn *ct;
  924. ct = nfnl_ct->get_ct(entry->skb, ctinfo);
  925. if (ct == NULL)
  926. return NULL;
  927. if (nfnl_ct->parse(nfqa[NFQA_CT], ct) < 0)
  928. return NULL;
  929. if (nfqa[NFQA_EXP])
  930. nfnl_ct->attach_expect(nfqa[NFQA_EXP], ct,
  931. NETLINK_CB(entry->skb).portid,
  932. nlmsg_report(nlh));
  933. return ct;
  934. }
  935. static int nfqa_parse_bridge(struct nf_queue_entry *entry,
  936. const struct nlattr * const nfqa[])
  937. {
  938. if (nfqa[NFQA_VLAN]) {
  939. struct nlattr *tb[NFQA_VLAN_MAX + 1];
  940. int err;
  941. err = nla_parse_nested_deprecated(tb, NFQA_VLAN_MAX,
  942. nfqa[NFQA_VLAN],
  943. nfqa_vlan_policy, NULL);
  944. if (err < 0)
  945. return err;
  946. if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
  947. return -EINVAL;
  948. __vlan_hwaccel_put_tag(entry->skb,
  949. nla_get_be16(tb[NFQA_VLAN_PROTO]),
  950. ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])));
  951. }
  952. if (nfqa[NFQA_L2HDR]) {
  953. int mac_header_len = entry->skb->network_header -
  954. entry->skb->mac_header;
  955. if (mac_header_len != nla_len(nfqa[NFQA_L2HDR]))
  956. return -EINVAL;
  957. else if (mac_header_len > 0)
  958. memcpy(skb_mac_header(entry->skb),
  959. nla_data(nfqa[NFQA_L2HDR]),
  960. mac_header_len);
  961. }
  962. return 0;
  963. }
  964. static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl,
  965. struct sk_buff *skb,
  966. const struct nlmsghdr *nlh,
  967. const struct nlattr * const nfqa[],
  968. struct netlink_ext_ack *extack)
  969. {
  970. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  971. u_int16_t queue_num = ntohs(nfmsg->res_id);
  972. struct nfqnl_msg_verdict_hdr *vhdr;
  973. struct nfqnl_instance *queue;
  974. unsigned int verdict;
  975. struct nf_queue_entry *entry;
  976. enum ip_conntrack_info ctinfo;
  977. struct nfnl_ct_hook *nfnl_ct;
  978. struct nf_conn *ct = NULL;
  979. struct nfnl_queue_net *q = nfnl_queue_pernet(net);
  980. int err;
  981. queue = verdict_instance_lookup(q, queue_num,
  982. NETLINK_CB(skb).portid);
  983. if (IS_ERR(queue))
  984. return PTR_ERR(queue);
  985. vhdr = verdicthdr_get(nfqa);
  986. if (!vhdr)
  987. return -EINVAL;
  988. verdict = ntohl(vhdr->verdict);
  989. entry = find_dequeue_entry(queue, ntohl(vhdr->id));
  990. if (entry == NULL)
  991. return -ENOENT;
  992. /* rcu lock already held from nfnl->call_rcu. */
  993. nfnl_ct = rcu_dereference(nfnl_ct_hook);
  994. if (nfqa[NFQA_CT]) {
  995. if (nfnl_ct != NULL)
  996. ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo);
  997. }
  998. if (entry->state.pf == PF_BRIDGE) {
  999. err = nfqa_parse_bridge(entry, nfqa);
  1000. if (err < 0)
  1001. return err;
  1002. }
  1003. if (nfqa[NFQA_PAYLOAD]) {
  1004. u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
  1005. int diff = payload_len - entry->skb->len;
  1006. if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
  1007. payload_len, entry, diff) < 0)
  1008. verdict = NF_DROP;
  1009. if (ct && diff)
  1010. nfnl_ct->seq_adjust(entry->skb, ct, ctinfo, diff);
  1011. }
  1012. if (nfqa[NFQA_MARK])
  1013. entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
  1014. nfqnl_reinject(entry, verdict);
  1015. return 0;
  1016. }
  1017. static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
  1018. struct sk_buff *skb, const struct nlmsghdr *nlh,
  1019. const struct nlattr * const nfqa[],
  1020. struct netlink_ext_ack *extack)
  1021. {
  1022. return -ENOTSUPP;
  1023. }
  1024. static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
  1025. [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
  1026. [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
  1027. [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
  1028. [NFQA_CFG_MASK] = { .type = NLA_U32 },
  1029. [NFQA_CFG_FLAGS] = { .type = NLA_U32 },
  1030. };
  1031. static const struct nf_queue_handler nfqh = {
  1032. .outfn = nfqnl_enqueue_packet,
  1033. .nf_hook_drop = nfqnl_nf_hook_drop,
  1034. };
  1035. static int nfqnl_recv_config(struct net *net, struct sock *ctnl,
  1036. struct sk_buff *skb, const struct nlmsghdr *nlh,
  1037. const struct nlattr * const nfqa[],
  1038. struct netlink_ext_ack *extack)
  1039. {
  1040. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  1041. u_int16_t queue_num = ntohs(nfmsg->res_id);
  1042. struct nfqnl_instance *queue;
  1043. struct nfqnl_msg_config_cmd *cmd = NULL;
  1044. struct nfnl_queue_net *q = nfnl_queue_pernet(net);
  1045. __u32 flags = 0, mask = 0;
  1046. int ret = 0;
  1047. if (nfqa[NFQA_CFG_CMD]) {
  1048. cmd = nla_data(nfqa[NFQA_CFG_CMD]);
  1049. /* Obsolete commands without queue context */
  1050. switch (cmd->command) {
  1051. case NFQNL_CFG_CMD_PF_BIND: return 0;
  1052. case NFQNL_CFG_CMD_PF_UNBIND: return 0;
  1053. }
  1054. }
  1055. /* Check if we support these flags in first place, dependencies should
  1056. * be there too not to break atomicity.
  1057. */
  1058. if (nfqa[NFQA_CFG_FLAGS]) {
  1059. if (!nfqa[NFQA_CFG_MASK]) {
  1060. /* A mask is needed to specify which flags are being
  1061. * changed.
  1062. */
  1063. return -EINVAL;
  1064. }
  1065. flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
  1066. mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
  1067. if (flags >= NFQA_CFG_F_MAX)
  1068. return -EOPNOTSUPP;
  1069. #if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
  1070. if (flags & mask & NFQA_CFG_F_SECCTX)
  1071. return -EOPNOTSUPP;
  1072. #endif
  1073. if ((flags & mask & NFQA_CFG_F_CONNTRACK) &&
  1074. !rcu_access_pointer(nfnl_ct_hook)) {
  1075. #ifdef CONFIG_MODULES
  1076. nfnl_unlock(NFNL_SUBSYS_QUEUE);
  1077. request_module("ip_conntrack_netlink");
  1078. nfnl_lock(NFNL_SUBSYS_QUEUE);
  1079. if (rcu_access_pointer(nfnl_ct_hook))
  1080. return -EAGAIN;
  1081. #endif
  1082. return -EOPNOTSUPP;
  1083. }
  1084. }
  1085. rcu_read_lock();
  1086. queue = instance_lookup(q, queue_num);
  1087. if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
  1088. ret = -EPERM;
  1089. goto err_out_unlock;
  1090. }
  1091. if (cmd != NULL) {
  1092. switch (cmd->command) {
  1093. case NFQNL_CFG_CMD_BIND:
  1094. if (queue) {
  1095. ret = -EBUSY;
  1096. goto err_out_unlock;
  1097. }
  1098. queue = instance_create(q, queue_num,
  1099. NETLINK_CB(skb).portid);
  1100. if (IS_ERR(queue)) {
  1101. ret = PTR_ERR(queue);
  1102. goto err_out_unlock;
  1103. }
  1104. break;
  1105. case NFQNL_CFG_CMD_UNBIND:
  1106. if (!queue) {
  1107. ret = -ENODEV;
  1108. goto err_out_unlock;
  1109. }
  1110. instance_destroy(q, queue);
  1111. goto err_out_unlock;
  1112. case NFQNL_CFG_CMD_PF_BIND:
  1113. case NFQNL_CFG_CMD_PF_UNBIND:
  1114. break;
  1115. default:
  1116. ret = -ENOTSUPP;
  1117. goto err_out_unlock;
  1118. }
  1119. }
  1120. if (!queue) {
  1121. ret = -ENODEV;
  1122. goto err_out_unlock;
  1123. }
  1124. if (nfqa[NFQA_CFG_PARAMS]) {
  1125. struct nfqnl_msg_config_params *params =
  1126. nla_data(nfqa[NFQA_CFG_PARAMS]);
  1127. nfqnl_set_mode(queue, params->copy_mode,
  1128. ntohl(params->copy_range));
  1129. }
  1130. if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
  1131. __be32 *queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
  1132. spin_lock_bh(&queue->lock);
  1133. queue->queue_maxlen = ntohl(*queue_maxlen);
  1134. spin_unlock_bh(&queue->lock);
  1135. }
  1136. if (nfqa[NFQA_CFG_FLAGS]) {
  1137. spin_lock_bh(&queue->lock);
  1138. queue->flags &= ~mask;
  1139. queue->flags |= flags & mask;
  1140. spin_unlock_bh(&queue->lock);
  1141. }
  1142. err_out_unlock:
  1143. rcu_read_unlock();
  1144. return ret;
  1145. }
  1146. static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
  1147. [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp,
  1148. .attr_count = NFQA_MAX, },
  1149. [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict,
  1150. .attr_count = NFQA_MAX,
  1151. .policy = nfqa_verdict_policy },
  1152. [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
  1153. .attr_count = NFQA_CFG_MAX,
  1154. .policy = nfqa_cfg_policy },
  1155. [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
  1156. .attr_count = NFQA_MAX,
  1157. .policy = nfqa_verdict_batch_policy },
  1158. };
  1159. static const struct nfnetlink_subsystem nfqnl_subsys = {
  1160. .name = "nf_queue",
  1161. .subsys_id = NFNL_SUBSYS_QUEUE,
  1162. .cb_count = NFQNL_MSG_MAX,
  1163. .cb = nfqnl_cb,
  1164. };
  1165. #ifdef CONFIG_PROC_FS
  1166. struct iter_state {
  1167. struct seq_net_private p;
  1168. unsigned int bucket;
  1169. };
  1170. static struct hlist_node *get_first(struct seq_file *seq)
  1171. {
  1172. struct iter_state *st = seq->private;
  1173. struct net *net;
  1174. struct nfnl_queue_net *q;
  1175. if (!st)
  1176. return NULL;
  1177. net = seq_file_net(seq);
  1178. q = nfnl_queue_pernet(net);
  1179. for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
  1180. if (!hlist_empty(&q->instance_table[st->bucket]))
  1181. return q->instance_table[st->bucket].first;
  1182. }
  1183. return NULL;
  1184. }
  1185. static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
  1186. {
  1187. struct iter_state *st = seq->private;
  1188. struct net *net = seq_file_net(seq);
  1189. h = h->next;
  1190. while (!h) {
  1191. struct nfnl_queue_net *q;
  1192. if (++st->bucket >= INSTANCE_BUCKETS)
  1193. return NULL;
  1194. q = nfnl_queue_pernet(net);
  1195. h = q->instance_table[st->bucket].first;
  1196. }
  1197. return h;
  1198. }
  1199. static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
  1200. {
  1201. struct hlist_node *head;
  1202. head = get_first(seq);
  1203. if (head)
  1204. while (pos && (head = get_next(seq, head)))
  1205. pos--;
  1206. return pos ? NULL : head;
  1207. }
  1208. static void *seq_start(struct seq_file *s, loff_t *pos)
  1209. __acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
  1210. {
  1211. spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
  1212. return get_idx(s, *pos);
  1213. }
  1214. static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
  1215. {
  1216. (*pos)++;
  1217. return get_next(s, v);
  1218. }
  1219. static void seq_stop(struct seq_file *s, void *v)
  1220. __releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
  1221. {
  1222. spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
  1223. }
  1224. static int seq_show(struct seq_file *s, void *v)
  1225. {
  1226. const struct nfqnl_instance *inst = v;
  1227. seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
  1228. inst->queue_num,
  1229. inst->peer_portid, inst->queue_total,
  1230. inst->copy_mode, inst->copy_range,
  1231. inst->queue_dropped, inst->queue_user_dropped,
  1232. inst->id_sequence, 1);
  1233. return 0;
  1234. }
  1235. static const struct seq_operations nfqnl_seq_ops = {
  1236. .start = seq_start,
  1237. .next = seq_next,
  1238. .stop = seq_stop,
  1239. .show = seq_show,
  1240. };
  1241. #endif /* PROC_FS */
  1242. static int __net_init nfnl_queue_net_init(struct net *net)
  1243. {
  1244. unsigned int i;
  1245. struct nfnl_queue_net *q = nfnl_queue_pernet(net);
  1246. for (i = 0; i < INSTANCE_BUCKETS; i++)
  1247. INIT_HLIST_HEAD(&q->instance_table[i]);
  1248. spin_lock_init(&q->instances_lock);
  1249. #ifdef CONFIG_PROC_FS
  1250. if (!proc_create_net("nfnetlink_queue", 0440, net->nf.proc_netfilter,
  1251. &nfqnl_seq_ops, sizeof(struct iter_state)))
  1252. return -ENOMEM;
  1253. #endif
  1254. nf_register_queue_handler(net, &nfqh);
  1255. return 0;
  1256. }
  1257. static void __net_exit nfnl_queue_net_exit(struct net *net)
  1258. {
  1259. struct nfnl_queue_net *q = nfnl_queue_pernet(net);
  1260. unsigned int i;
  1261. nf_unregister_queue_handler(net);
  1262. #ifdef CONFIG_PROC_FS
  1263. remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
  1264. #endif
  1265. for (i = 0; i < INSTANCE_BUCKETS; i++)
  1266. WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
  1267. }
  1268. static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
  1269. {
  1270. synchronize_rcu();
  1271. }
  1272. static struct pernet_operations nfnl_queue_net_ops = {
  1273. .init = nfnl_queue_net_init,
  1274. .exit = nfnl_queue_net_exit,
  1275. .exit_batch = nfnl_queue_net_exit_batch,
  1276. .id = &nfnl_queue_net_id,
  1277. .size = sizeof(struct nfnl_queue_net),
  1278. };
  1279. static int __init nfnetlink_queue_init(void)
  1280. {
  1281. int status;
  1282. status = register_pernet_subsys(&nfnl_queue_net_ops);
  1283. if (status < 0) {
  1284. pr_err("failed to register pernet ops\n");
  1285. goto out;
  1286. }
  1287. netlink_register_notifier(&nfqnl_rtnl_notifier);
  1288. status = nfnetlink_subsys_register(&nfqnl_subsys);
  1289. if (status < 0) {
  1290. pr_err("failed to create netlink socket\n");
  1291. goto cleanup_netlink_notifier;
  1292. }
  1293. status = register_netdevice_notifier(&nfqnl_dev_notifier);
  1294. if (status < 0) {
  1295. pr_err("failed to register netdevice notifier\n");
  1296. goto cleanup_netlink_subsys;
  1297. }
  1298. return status;
  1299. cleanup_netlink_subsys:
  1300. nfnetlink_subsys_unregister(&nfqnl_subsys);
  1301. cleanup_netlink_notifier:
  1302. netlink_unregister_notifier(&nfqnl_rtnl_notifier);
  1303. unregister_pernet_subsys(&nfnl_queue_net_ops);
  1304. out:
  1305. return status;
  1306. }
  1307. static void __exit nfnetlink_queue_fini(void)
  1308. {
  1309. unregister_netdevice_notifier(&nfqnl_dev_notifier);
  1310. nfnetlink_subsys_unregister(&nfqnl_subsys);
  1311. netlink_unregister_notifier(&nfqnl_rtnl_notifier);
  1312. unregister_pernet_subsys(&nfnl_queue_net_ops);
  1313. rcu_barrier(); /* Wait for completion of call_rcu()'s */
  1314. }
  1315. MODULE_DESCRIPTION("netfilter packet queue handler");
  1316. MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  1317. MODULE_LICENSE("GPL");
  1318. MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
  1319. module_init(nfnetlink_queue_init);
  1320. module_exit(nfnetlink_queue_fini);