fou.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/module.h>
  3. #include <linux/errno.h>
  4. #include <linux/socket.h>
  5. #include <linux/skbuff.h>
  6. #include <linux/ip.h>
  7. #include <linux/icmp.h>
  8. #include <linux/udp.h>
  9. #include <linux/types.h>
  10. #include <linux/kernel.h>
  11. #include <net/genetlink.h>
  12. #include <net/gue.h>
  13. #include <net/fou.h>
  14. #include <net/ip.h>
  15. #include <net/protocol.h>
  16. #include <net/udp.h>
  17. #include <net/udp_tunnel.h>
  18. #include <net/xfrm.h>
  19. #include <uapi/linux/fou.h>
  20. #include <uapi/linux/genetlink.h>
  21. struct fou {
  22. struct socket *sock;
  23. u8 protocol;
  24. u8 flags;
  25. __be16 port;
  26. u8 family;
  27. u16 type;
  28. struct list_head list;
  29. struct rcu_head rcu;
  30. };
  31. #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
  32. struct fou_cfg {
  33. u16 type;
  34. u8 protocol;
  35. u8 flags;
  36. struct udp_port_cfg udp_config;
  37. };
  38. static unsigned int fou_net_id;
  39. struct fou_net {
  40. struct list_head fou_list;
  41. struct mutex fou_lock;
  42. };
  43. static inline struct fou *fou_from_sock(struct sock *sk)
  44. {
  45. return sk->sk_user_data;
  46. }
  47. static int fou_recv_pull(struct sk_buff *skb, struct fou *fou, size_t len)
  48. {
  49. /* Remove 'len' bytes from the packet (UDP header and
  50. * FOU header if present).
  51. */
  52. if (fou->family == AF_INET)
  53. ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
  54. else
  55. ipv6_hdr(skb)->payload_len =
  56. htons(ntohs(ipv6_hdr(skb)->payload_len) - len);
  57. __skb_pull(skb, len);
  58. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  59. skb_reset_transport_header(skb);
  60. return iptunnel_pull_offloads(skb);
  61. }
  62. static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
  63. {
  64. struct fou *fou = fou_from_sock(sk);
  65. if (!fou)
  66. return 1;
  67. if (fou_recv_pull(skb, fou, sizeof(struct udphdr)))
  68. goto drop;
  69. return -fou->protocol;
  70. drop:
  71. kfree_skb(skb);
  72. return 0;
  73. }
  74. static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
  75. void *data, size_t hdrlen, u8 ipproto,
  76. bool nopartial)
  77. {
  78. __be16 *pd = data;
  79. size_t start = ntohs(pd[0]);
  80. size_t offset = ntohs(pd[1]);
  81. size_t plen = sizeof(struct udphdr) + hdrlen +
  82. max_t(size_t, offset + sizeof(u16), start);
  83. if (skb->remcsum_offload)
  84. return guehdr;
  85. if (!pskb_may_pull(skb, plen))
  86. return NULL;
  87. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  88. skb_remcsum_process(skb, (void *)guehdr + hdrlen,
  89. start, offset, nopartial);
  90. return guehdr;
  91. }
  92. static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr)
  93. {
  94. /* No support yet */
  95. kfree_skb(skb);
  96. return 0;
  97. }
  98. static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
  99. {
  100. struct fou *fou = fou_from_sock(sk);
  101. size_t len, optlen, hdrlen;
  102. struct guehdr *guehdr;
  103. void *data;
  104. u16 doffset = 0;
  105. u8 proto_ctype;
  106. if (!fou)
  107. return 1;
  108. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  109. if (!pskb_may_pull(skb, len))
  110. goto drop;
  111. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  112. switch (guehdr->version) {
  113. case 0: /* Full GUE header present */
  114. break;
  115. case 1: {
  116. /* Direct encapsulation of IPv4 or IPv6 */
  117. int prot;
  118. switch (((struct iphdr *)guehdr)->version) {
  119. case 4:
  120. prot = IPPROTO_IPIP;
  121. break;
  122. case 6:
  123. prot = IPPROTO_IPV6;
  124. break;
  125. default:
  126. goto drop;
  127. }
  128. if (fou_recv_pull(skb, fou, sizeof(struct udphdr)))
  129. goto drop;
  130. return -prot;
  131. }
  132. default: /* Undefined version */
  133. goto drop;
  134. }
  135. optlen = guehdr->hlen << 2;
  136. len += optlen;
  137. if (!pskb_may_pull(skb, len))
  138. goto drop;
  139. /* guehdr may change after pull */
  140. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  141. if (validate_gue_flags(guehdr, optlen))
  142. goto drop;
  143. hdrlen = sizeof(struct guehdr) + optlen;
  144. if (fou->family == AF_INET)
  145. ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len);
  146. else
  147. ipv6_hdr(skb)->payload_len =
  148. htons(ntohs(ipv6_hdr(skb)->payload_len) - len);
  149. /* Pull csum through the guehdr now . This can be used if
  150. * there is a remote checksum offload.
  151. */
  152. skb_postpull_rcsum(skb, udp_hdr(skb), len);
  153. data = &guehdr[1];
  154. if (guehdr->flags & GUE_FLAG_PRIV) {
  155. __be32 flags = *(__be32 *)(data + doffset);
  156. doffset += GUE_LEN_PRIV;
  157. if (flags & GUE_PFLAG_REMCSUM) {
  158. guehdr = gue_remcsum(skb, guehdr, data + doffset,
  159. hdrlen, guehdr->proto_ctype,
  160. !!(fou->flags &
  161. FOU_F_REMCSUM_NOPARTIAL));
  162. if (!guehdr)
  163. goto drop;
  164. data = &guehdr[1];
  165. doffset += GUE_PLEN_REMCSUM;
  166. }
  167. }
  168. if (unlikely(guehdr->control))
  169. return gue_control_message(skb, guehdr);
  170. proto_ctype = guehdr->proto_ctype;
  171. __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
  172. skb_reset_transport_header(skb);
  173. if (iptunnel_pull_offloads(skb))
  174. goto drop;
  175. return -proto_ctype;
  176. drop:
  177. kfree_skb(skb);
  178. return 0;
  179. }
  180. static struct sk_buff *fou_gro_receive(struct sock *sk,
  181. struct list_head *head,
  182. struct sk_buff *skb)
  183. {
  184. u8 proto = fou_from_sock(sk)->protocol;
  185. const struct net_offload **offloads;
  186. const struct net_offload *ops;
  187. struct sk_buff *pp = NULL;
  188. /* We can clear the encap_mark for FOU as we are essentially doing
  189. * one of two possible things. We are either adding an L4 tunnel
  190. * header to the outer L3 tunnel header, or we are simply
  191. * treating the GRE tunnel header as though it is a UDP protocol
  192. * specific header such as VXLAN or GENEVE.
  193. */
  194. NAPI_GRO_CB(skb)->encap_mark = 0;
  195. /* Flag this frame as already having an outer encap header */
  196. NAPI_GRO_CB(skb)->is_fou = 1;
  197. rcu_read_lock();
  198. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  199. ops = rcu_dereference(offloads[proto]);
  200. if (!ops || !ops->callbacks.gro_receive)
  201. goto out_unlock;
  202. pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
  203. out_unlock:
  204. rcu_read_unlock();
  205. return pp;
  206. }
  207. static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
  208. int nhoff)
  209. {
  210. const struct net_offload *ops;
  211. u8 proto = fou_from_sock(sk)->protocol;
  212. int err = -ENOSYS;
  213. const struct net_offload **offloads;
  214. rcu_read_lock();
  215. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  216. ops = rcu_dereference(offloads[proto]);
  217. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  218. goto out_unlock;
  219. err = ops->callbacks.gro_complete(skb, nhoff);
  220. skb_set_inner_mac_header(skb, nhoff);
  221. out_unlock:
  222. rcu_read_unlock();
  223. return err;
  224. }
  225. static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
  226. struct guehdr *guehdr, void *data,
  227. size_t hdrlen, struct gro_remcsum *grc,
  228. bool nopartial)
  229. {
  230. __be16 *pd = data;
  231. size_t start = ntohs(pd[0]);
  232. size_t offset = ntohs(pd[1]);
  233. if (skb->remcsum_offload)
  234. return guehdr;
  235. if (!NAPI_GRO_CB(skb)->csum_valid)
  236. return NULL;
  237. guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
  238. start, offset, grc, nopartial);
  239. skb->remcsum_offload = 1;
  240. return guehdr;
  241. }
  242. static struct sk_buff *gue_gro_receive(struct sock *sk,
  243. struct list_head *head,
  244. struct sk_buff *skb)
  245. {
  246. const struct net_offload **offloads;
  247. const struct net_offload *ops;
  248. struct sk_buff *pp = NULL;
  249. struct sk_buff *p;
  250. struct guehdr *guehdr;
  251. size_t len, optlen, hdrlen, off;
  252. void *data;
  253. u16 doffset = 0;
  254. int flush = 1;
  255. struct fou *fou = fou_from_sock(sk);
  256. struct gro_remcsum grc;
  257. u8 proto;
  258. skb_gro_remcsum_init(&grc);
  259. off = skb_gro_offset(skb);
  260. len = off + sizeof(*guehdr);
  261. guehdr = skb_gro_header_fast(skb, off);
  262. if (skb_gro_header_hard(skb, len)) {
  263. guehdr = skb_gro_header_slow(skb, len, off);
  264. if (unlikely(!guehdr))
  265. goto out;
  266. }
  267. switch (guehdr->version) {
  268. case 0:
  269. break;
  270. case 1:
  271. switch (((struct iphdr *)guehdr)->version) {
  272. case 4:
  273. proto = IPPROTO_IPIP;
  274. break;
  275. case 6:
  276. proto = IPPROTO_IPV6;
  277. break;
  278. default:
  279. goto out;
  280. }
  281. goto next_proto;
  282. default:
  283. goto out;
  284. }
  285. optlen = guehdr->hlen << 2;
  286. len += optlen;
  287. if (skb_gro_header_hard(skb, len)) {
  288. guehdr = skb_gro_header_slow(skb, len, off);
  289. if (unlikely(!guehdr))
  290. goto out;
  291. }
  292. if (unlikely(guehdr->control) || guehdr->version != 0 ||
  293. validate_gue_flags(guehdr, optlen))
  294. goto out;
  295. hdrlen = sizeof(*guehdr) + optlen;
  296. /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr,
  297. * this is needed if there is a remote checkcsum offload.
  298. */
  299. skb_gro_postpull_rcsum(skb, guehdr, hdrlen);
  300. data = &guehdr[1];
  301. if (guehdr->flags & GUE_FLAG_PRIV) {
  302. __be32 flags = *(__be32 *)(data + doffset);
  303. doffset += GUE_LEN_PRIV;
  304. if (flags & GUE_PFLAG_REMCSUM) {
  305. guehdr = gue_gro_remcsum(skb, off, guehdr,
  306. data + doffset, hdrlen, &grc,
  307. !!(fou->flags &
  308. FOU_F_REMCSUM_NOPARTIAL));
  309. if (!guehdr)
  310. goto out;
  311. data = &guehdr[1];
  312. doffset += GUE_PLEN_REMCSUM;
  313. }
  314. }
  315. skb_gro_pull(skb, hdrlen);
  316. list_for_each_entry(p, head, list) {
  317. const struct guehdr *guehdr2;
  318. if (!NAPI_GRO_CB(p)->same_flow)
  319. continue;
  320. guehdr2 = (struct guehdr *)(p->data + off);
  321. /* Compare base GUE header to be equal (covers
  322. * hlen, version, proto_ctype, and flags.
  323. */
  324. if (guehdr->word != guehdr2->word) {
  325. NAPI_GRO_CB(p)->same_flow = 0;
  326. continue;
  327. }
  328. /* Compare optional fields are the same. */
  329. if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
  330. guehdr->hlen << 2)) {
  331. NAPI_GRO_CB(p)->same_flow = 0;
  332. continue;
  333. }
  334. }
  335. proto = guehdr->proto_ctype;
  336. next_proto:
  337. /* We can clear the encap_mark for GUE as we are essentially doing
  338. * one of two possible things. We are either adding an L4 tunnel
  339. * header to the outer L3 tunnel header, or we are simply
  340. * treating the GRE tunnel header as though it is a UDP protocol
  341. * specific header such as VXLAN or GENEVE.
  342. */
  343. NAPI_GRO_CB(skb)->encap_mark = 0;
  344. /* Flag this frame as already having an outer encap header */
  345. NAPI_GRO_CB(skb)->is_fou = 1;
  346. rcu_read_lock();
  347. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  348. ops = rcu_dereference(offloads[proto]);
  349. if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
  350. goto out_unlock;
  351. pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
  352. flush = 0;
  353. out_unlock:
  354. rcu_read_unlock();
  355. out:
  356. skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
  357. return pp;
  358. }
  359. static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
  360. {
  361. const struct net_offload **offloads;
  362. struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
  363. const struct net_offload *ops;
  364. unsigned int guehlen = 0;
  365. u8 proto;
  366. int err = -ENOENT;
  367. switch (guehdr->version) {
  368. case 0:
  369. proto = guehdr->proto_ctype;
  370. guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
  371. break;
  372. case 1:
  373. switch (((struct iphdr *)guehdr)->version) {
  374. case 4:
  375. proto = IPPROTO_IPIP;
  376. break;
  377. case 6:
  378. proto = IPPROTO_IPV6;
  379. break;
  380. default:
  381. return err;
  382. }
  383. break;
  384. default:
  385. return err;
  386. }
  387. rcu_read_lock();
  388. offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
  389. ops = rcu_dereference(offloads[proto]);
  390. if (WARN_ON(!ops || !ops->callbacks.gro_complete))
  391. goto out_unlock;
  392. err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
  393. skb_set_inner_mac_header(skb, nhoff + guehlen);
  394. out_unlock:
  395. rcu_read_unlock();
  396. return err;
  397. }
  398. static bool fou_cfg_cmp(struct fou *fou, struct fou_cfg *cfg)
  399. {
  400. struct sock *sk = fou->sock->sk;
  401. struct udp_port_cfg *udp_cfg = &cfg->udp_config;
  402. if (fou->family != udp_cfg->family ||
  403. fou->port != udp_cfg->local_udp_port ||
  404. sk->sk_dport != udp_cfg->peer_udp_port ||
  405. sk->sk_bound_dev_if != udp_cfg->bind_ifindex)
  406. return false;
  407. if (fou->family == AF_INET) {
  408. if (sk->sk_rcv_saddr != udp_cfg->local_ip.s_addr ||
  409. sk->sk_daddr != udp_cfg->peer_ip.s_addr)
  410. return false;
  411. else
  412. return true;
  413. #if IS_ENABLED(CONFIG_IPV6)
  414. } else {
  415. if (ipv6_addr_cmp(&sk->sk_v6_rcv_saddr, &udp_cfg->local_ip6) ||
  416. ipv6_addr_cmp(&sk->sk_v6_daddr, &udp_cfg->peer_ip6))
  417. return false;
  418. else
  419. return true;
  420. #endif
  421. }
  422. return false;
  423. }
  424. static int fou_add_to_port_list(struct net *net, struct fou *fou,
  425. struct fou_cfg *cfg)
  426. {
  427. struct fou_net *fn = net_generic(net, fou_net_id);
  428. struct fou *fout;
  429. mutex_lock(&fn->fou_lock);
  430. list_for_each_entry(fout, &fn->fou_list, list) {
  431. if (fou_cfg_cmp(fout, cfg)) {
  432. mutex_unlock(&fn->fou_lock);
  433. return -EALREADY;
  434. }
  435. }
  436. list_add(&fou->list, &fn->fou_list);
  437. mutex_unlock(&fn->fou_lock);
  438. return 0;
  439. }
  440. static void fou_release(struct fou *fou)
  441. {
  442. struct socket *sock = fou->sock;
  443. list_del(&fou->list);
  444. udp_tunnel_sock_release(sock);
  445. kfree_rcu(fou, rcu);
  446. }
  447. static int fou_create(struct net *net, struct fou_cfg *cfg,
  448. struct socket **sockp)
  449. {
  450. struct socket *sock = NULL;
  451. struct fou *fou = NULL;
  452. struct sock *sk;
  453. struct udp_tunnel_sock_cfg tunnel_cfg;
  454. int err;
  455. /* Open UDP socket */
  456. err = udp_sock_create(net, &cfg->udp_config, &sock);
  457. if (err < 0)
  458. goto error;
  459. /* Allocate FOU port structure */
  460. fou = kzalloc(sizeof(*fou), GFP_KERNEL);
  461. if (!fou) {
  462. err = -ENOMEM;
  463. goto error;
  464. }
  465. sk = sock->sk;
  466. fou->port = cfg->udp_config.local_udp_port;
  467. fou->family = cfg->udp_config.family;
  468. fou->flags = cfg->flags;
  469. fou->type = cfg->type;
  470. fou->sock = sock;
  471. memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
  472. tunnel_cfg.encap_type = 1;
  473. tunnel_cfg.sk_user_data = fou;
  474. tunnel_cfg.encap_destroy = NULL;
  475. /* Initial for fou type */
  476. switch (cfg->type) {
  477. case FOU_ENCAP_DIRECT:
  478. tunnel_cfg.encap_rcv = fou_udp_recv;
  479. tunnel_cfg.gro_receive = fou_gro_receive;
  480. tunnel_cfg.gro_complete = fou_gro_complete;
  481. fou->protocol = cfg->protocol;
  482. break;
  483. case FOU_ENCAP_GUE:
  484. tunnel_cfg.encap_rcv = gue_udp_recv;
  485. tunnel_cfg.gro_receive = gue_gro_receive;
  486. tunnel_cfg.gro_complete = gue_gro_complete;
  487. break;
  488. default:
  489. err = -EINVAL;
  490. goto error;
  491. }
  492. setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
  493. sk->sk_allocation = GFP_ATOMIC;
  494. err = fou_add_to_port_list(net, fou, cfg);
  495. if (err)
  496. goto error;
  497. if (sockp)
  498. *sockp = sock;
  499. return 0;
  500. error:
  501. kfree(fou);
  502. if (sock)
  503. udp_tunnel_sock_release(sock);
  504. return err;
  505. }
  506. static int fou_destroy(struct net *net, struct fou_cfg *cfg)
  507. {
  508. struct fou_net *fn = net_generic(net, fou_net_id);
  509. int err = -EINVAL;
  510. struct fou *fou;
  511. mutex_lock(&fn->fou_lock);
  512. list_for_each_entry(fou, &fn->fou_list, list) {
  513. if (fou_cfg_cmp(fou, cfg)) {
  514. fou_release(fou);
  515. err = 0;
  516. break;
  517. }
  518. }
  519. mutex_unlock(&fn->fou_lock);
  520. return err;
  521. }
  522. static struct genl_family fou_nl_family;
  523. static const struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
  524. [FOU_ATTR_PORT] = { .type = NLA_U16, },
  525. [FOU_ATTR_AF] = { .type = NLA_U8, },
  526. [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
  527. [FOU_ATTR_TYPE] = { .type = NLA_U8, },
  528. [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, },
  529. [FOU_ATTR_LOCAL_V4] = { .type = NLA_U32, },
  530. [FOU_ATTR_PEER_V4] = { .type = NLA_U32, },
  531. [FOU_ATTR_LOCAL_V6] = { .len = sizeof(struct in6_addr), },
  532. [FOU_ATTR_PEER_V6] = { .len = sizeof(struct in6_addr), },
  533. [FOU_ATTR_PEER_PORT] = { .type = NLA_U16, },
  534. [FOU_ATTR_IFINDEX] = { .type = NLA_S32, },
  535. };
  536. static int parse_nl_config(struct genl_info *info,
  537. struct fou_cfg *cfg)
  538. {
  539. bool has_local = false, has_peer = false;
  540. struct nlattr *attr;
  541. int ifindex;
  542. __be16 port;
  543. memset(cfg, 0, sizeof(*cfg));
  544. cfg->udp_config.family = AF_INET;
  545. if (info->attrs[FOU_ATTR_AF]) {
  546. u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
  547. switch (family) {
  548. case AF_INET:
  549. break;
  550. case AF_INET6:
  551. cfg->udp_config.ipv6_v6only = 1;
  552. break;
  553. default:
  554. return -EAFNOSUPPORT;
  555. }
  556. cfg->udp_config.family = family;
  557. }
  558. if (info->attrs[FOU_ATTR_PORT]) {
  559. port = nla_get_be16(info->attrs[FOU_ATTR_PORT]);
  560. cfg->udp_config.local_udp_port = port;
  561. }
  562. if (info->attrs[FOU_ATTR_IPPROTO])
  563. cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
  564. if (info->attrs[FOU_ATTR_TYPE])
  565. cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
  566. if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL])
  567. cfg->flags |= FOU_F_REMCSUM_NOPARTIAL;
  568. if (cfg->udp_config.family == AF_INET) {
  569. if (info->attrs[FOU_ATTR_LOCAL_V4]) {
  570. attr = info->attrs[FOU_ATTR_LOCAL_V4];
  571. cfg->udp_config.local_ip.s_addr = nla_get_in_addr(attr);
  572. has_local = true;
  573. }
  574. if (info->attrs[FOU_ATTR_PEER_V4]) {
  575. attr = info->attrs[FOU_ATTR_PEER_V4];
  576. cfg->udp_config.peer_ip.s_addr = nla_get_in_addr(attr);
  577. has_peer = true;
  578. }
  579. #if IS_ENABLED(CONFIG_IPV6)
  580. } else {
  581. if (info->attrs[FOU_ATTR_LOCAL_V6]) {
  582. attr = info->attrs[FOU_ATTR_LOCAL_V6];
  583. cfg->udp_config.local_ip6 = nla_get_in6_addr(attr);
  584. has_local = true;
  585. }
  586. if (info->attrs[FOU_ATTR_PEER_V6]) {
  587. attr = info->attrs[FOU_ATTR_PEER_V6];
  588. cfg->udp_config.peer_ip6 = nla_get_in6_addr(attr);
  589. has_peer = true;
  590. }
  591. #endif
  592. }
  593. if (has_peer) {
  594. if (info->attrs[FOU_ATTR_PEER_PORT]) {
  595. port = nla_get_be16(info->attrs[FOU_ATTR_PEER_PORT]);
  596. cfg->udp_config.peer_udp_port = port;
  597. } else {
  598. return -EINVAL;
  599. }
  600. }
  601. if (info->attrs[FOU_ATTR_IFINDEX]) {
  602. if (!has_local)
  603. return -EINVAL;
  604. ifindex = nla_get_s32(info->attrs[FOU_ATTR_IFINDEX]);
  605. cfg->udp_config.bind_ifindex = ifindex;
  606. }
  607. return 0;
  608. }
  609. static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
  610. {
  611. struct net *net = genl_info_net(info);
  612. struct fou_cfg cfg;
  613. int err;
  614. err = parse_nl_config(info, &cfg);
  615. if (err)
  616. return err;
  617. return fou_create(net, &cfg, NULL);
  618. }
  619. static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
  620. {
  621. struct net *net = genl_info_net(info);
  622. struct fou_cfg cfg;
  623. int err;
  624. err = parse_nl_config(info, &cfg);
  625. if (err)
  626. return err;
  627. return fou_destroy(net, &cfg);
  628. }
  629. static int fou_fill_info(struct fou *fou, struct sk_buff *msg)
  630. {
  631. struct sock *sk = fou->sock->sk;
  632. if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
  633. nla_put_be16(msg, FOU_ATTR_PORT, fou->port) ||
  634. nla_put_be16(msg, FOU_ATTR_PEER_PORT, sk->sk_dport) ||
  635. nla_put_u8(msg, FOU_ATTR_IPPROTO, fou->protocol) ||
  636. nla_put_u8(msg, FOU_ATTR_TYPE, fou->type) ||
  637. nla_put_s32(msg, FOU_ATTR_IFINDEX, sk->sk_bound_dev_if))
  638. return -1;
  639. if (fou->flags & FOU_F_REMCSUM_NOPARTIAL)
  640. if (nla_put_flag(msg, FOU_ATTR_REMCSUM_NOPARTIAL))
  641. return -1;
  642. if (fou->sock->sk->sk_family == AF_INET) {
  643. if (nla_put_in_addr(msg, FOU_ATTR_LOCAL_V4, sk->sk_rcv_saddr))
  644. return -1;
  645. if (nla_put_in_addr(msg, FOU_ATTR_PEER_V4, sk->sk_daddr))
  646. return -1;
  647. #if IS_ENABLED(CONFIG_IPV6)
  648. } else {
  649. if (nla_put_in6_addr(msg, FOU_ATTR_LOCAL_V6,
  650. &sk->sk_v6_rcv_saddr))
  651. return -1;
  652. if (nla_put_in6_addr(msg, FOU_ATTR_PEER_V6, &sk->sk_v6_daddr))
  653. return -1;
  654. #endif
  655. }
  656. return 0;
  657. }
  658. static int fou_dump_info(struct fou *fou, u32 portid, u32 seq,
  659. u32 flags, struct sk_buff *skb, u8 cmd)
  660. {
  661. void *hdr;
  662. hdr = genlmsg_put(skb, portid, seq, &fou_nl_family, flags, cmd);
  663. if (!hdr)
  664. return -ENOMEM;
  665. if (fou_fill_info(fou, skb) < 0)
  666. goto nla_put_failure;
  667. genlmsg_end(skb, hdr);
  668. return 0;
  669. nla_put_failure:
  670. genlmsg_cancel(skb, hdr);
  671. return -EMSGSIZE;
  672. }
  673. static int fou_nl_cmd_get_port(struct sk_buff *skb, struct genl_info *info)
  674. {
  675. struct net *net = genl_info_net(info);
  676. struct fou_net *fn = net_generic(net, fou_net_id);
  677. struct sk_buff *msg;
  678. struct fou_cfg cfg;
  679. struct fou *fout;
  680. __be16 port;
  681. u8 family;
  682. int ret;
  683. ret = parse_nl_config(info, &cfg);
  684. if (ret)
  685. return ret;
  686. port = cfg.udp_config.local_udp_port;
  687. if (port == 0)
  688. return -EINVAL;
  689. family = cfg.udp_config.family;
  690. if (family != AF_INET && family != AF_INET6)
  691. return -EINVAL;
  692. msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  693. if (!msg)
  694. return -ENOMEM;
  695. ret = -ESRCH;
  696. mutex_lock(&fn->fou_lock);
  697. list_for_each_entry(fout, &fn->fou_list, list) {
  698. if (fou_cfg_cmp(fout, &cfg)) {
  699. ret = fou_dump_info(fout, info->snd_portid,
  700. info->snd_seq, 0, msg,
  701. info->genlhdr->cmd);
  702. break;
  703. }
  704. }
  705. mutex_unlock(&fn->fou_lock);
  706. if (ret < 0)
  707. goto out_free;
  708. return genlmsg_reply(msg, info);
  709. out_free:
  710. nlmsg_free(msg);
  711. return ret;
  712. }
  713. static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
  714. {
  715. struct net *net = sock_net(skb->sk);
  716. struct fou_net *fn = net_generic(net, fou_net_id);
  717. struct fou *fout;
  718. int idx = 0, ret;
  719. mutex_lock(&fn->fou_lock);
  720. list_for_each_entry(fout, &fn->fou_list, list) {
  721. if (idx++ < cb->args[0])
  722. continue;
  723. ret = fou_dump_info(fout, NETLINK_CB(cb->skb).portid,
  724. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  725. skb, FOU_CMD_GET);
  726. if (ret)
  727. break;
  728. }
  729. mutex_unlock(&fn->fou_lock);
  730. cb->args[0] = idx;
  731. return skb->len;
  732. }
  733. static const struct genl_small_ops fou_nl_ops[] = {
  734. {
  735. .cmd = FOU_CMD_ADD,
  736. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  737. .doit = fou_nl_cmd_add_port,
  738. .flags = GENL_ADMIN_PERM,
  739. },
  740. {
  741. .cmd = FOU_CMD_DEL,
  742. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  743. .doit = fou_nl_cmd_rm_port,
  744. .flags = GENL_ADMIN_PERM,
  745. },
  746. {
  747. .cmd = FOU_CMD_GET,
  748. .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
  749. .doit = fou_nl_cmd_get_port,
  750. .dumpit = fou_nl_dump,
  751. },
  752. };
  753. static struct genl_family fou_nl_family __ro_after_init = {
  754. .hdrsize = 0,
  755. .name = FOU_GENL_NAME,
  756. .version = FOU_GENL_VERSION,
  757. .maxattr = FOU_ATTR_MAX,
  758. .policy = fou_nl_policy,
  759. .netnsok = true,
  760. .module = THIS_MODULE,
  761. .small_ops = fou_nl_ops,
  762. .n_small_ops = ARRAY_SIZE(fou_nl_ops),
  763. };
  764. size_t fou_encap_hlen(struct ip_tunnel_encap *e)
  765. {
  766. return sizeof(struct udphdr);
  767. }
  768. EXPORT_SYMBOL(fou_encap_hlen);
  769. size_t gue_encap_hlen(struct ip_tunnel_encap *e)
  770. {
  771. size_t len;
  772. bool need_priv = false;
  773. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  774. if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) {
  775. len += GUE_PLEN_REMCSUM;
  776. need_priv = true;
  777. }
  778. len += need_priv ? GUE_LEN_PRIV : 0;
  779. return len;
  780. }
  781. EXPORT_SYMBOL(gue_encap_hlen);
  782. int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  783. u8 *protocol, __be16 *sport, int type)
  784. {
  785. int err;
  786. err = iptunnel_handle_offloads(skb, type);
  787. if (err)
  788. return err;
  789. *sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  790. skb, 0, 0, false);
  791. return 0;
  792. }
  793. EXPORT_SYMBOL(__fou_build_header);
  794. int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  795. u8 *protocol, __be16 *sport, int type)
  796. {
  797. struct guehdr *guehdr;
  798. size_t hdrlen, optlen = 0;
  799. void *data;
  800. bool need_priv = false;
  801. int err;
  802. if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
  803. skb->ip_summed == CHECKSUM_PARTIAL) {
  804. optlen += GUE_PLEN_REMCSUM;
  805. type |= SKB_GSO_TUNNEL_REMCSUM;
  806. need_priv = true;
  807. }
  808. optlen += need_priv ? GUE_LEN_PRIV : 0;
  809. err = iptunnel_handle_offloads(skb, type);
  810. if (err)
  811. return err;
  812. /* Get source port (based on flow hash) before skb_push */
  813. *sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
  814. skb, 0, 0, false);
  815. hdrlen = sizeof(struct guehdr) + optlen;
  816. skb_push(skb, hdrlen);
  817. guehdr = (struct guehdr *)skb->data;
  818. guehdr->control = 0;
  819. guehdr->version = 0;
  820. guehdr->hlen = optlen >> 2;
  821. guehdr->flags = 0;
  822. guehdr->proto_ctype = *protocol;
  823. data = &guehdr[1];
  824. if (need_priv) {
  825. __be32 *flags = data;
  826. guehdr->flags |= GUE_FLAG_PRIV;
  827. *flags = 0;
  828. data += GUE_LEN_PRIV;
  829. if (type & SKB_GSO_TUNNEL_REMCSUM) {
  830. u16 csum_start = skb_checksum_start_offset(skb);
  831. __be16 *pd = data;
  832. if (csum_start < hdrlen)
  833. return -EINVAL;
  834. csum_start -= hdrlen;
  835. pd[0] = htons(csum_start);
  836. pd[1] = htons(csum_start + skb->csum_offset);
  837. if (!skb_is_gso(skb)) {
  838. skb->ip_summed = CHECKSUM_NONE;
  839. skb->encapsulation = 0;
  840. }
  841. *flags |= GUE_PFLAG_REMCSUM;
  842. data += GUE_PLEN_REMCSUM;
  843. }
  844. }
  845. return 0;
  846. }
  847. EXPORT_SYMBOL(__gue_build_header);
  848. #ifdef CONFIG_NET_FOU_IP_TUNNELS
  849. static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e,
  850. struct flowi4 *fl4, u8 *protocol, __be16 sport)
  851. {
  852. struct udphdr *uh;
  853. skb_push(skb, sizeof(struct udphdr));
  854. skb_reset_transport_header(skb);
  855. uh = udp_hdr(skb);
  856. uh->dest = e->dport;
  857. uh->source = sport;
  858. uh->len = htons(skb->len);
  859. udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
  860. fl4->saddr, fl4->daddr, skb->len);
  861. *protocol = IPPROTO_UDP;
  862. }
  863. static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  864. u8 *protocol, struct flowi4 *fl4)
  865. {
  866. int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
  867. SKB_GSO_UDP_TUNNEL;
  868. __be16 sport;
  869. int err;
  870. err = __fou_build_header(skb, e, protocol, &sport, type);
  871. if (err)
  872. return err;
  873. fou_build_udp(skb, e, fl4, protocol, sport);
  874. return 0;
  875. }
  876. static int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
  877. u8 *protocol, struct flowi4 *fl4)
  878. {
  879. int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
  880. SKB_GSO_UDP_TUNNEL;
  881. __be16 sport;
  882. int err;
  883. err = __gue_build_header(skb, e, protocol, &sport, type);
  884. if (err)
  885. return err;
  886. fou_build_udp(skb, e, fl4, protocol, sport);
  887. return 0;
  888. }
  889. static int gue_err_proto_handler(int proto, struct sk_buff *skb, u32 info)
  890. {
  891. const struct net_protocol *ipprot = rcu_dereference(inet_protos[proto]);
  892. if (ipprot && ipprot->err_handler) {
  893. if (!ipprot->err_handler(skb, info))
  894. return 0;
  895. }
  896. return -ENOENT;
  897. }
  898. static int gue_err(struct sk_buff *skb, u32 info)
  899. {
  900. int transport_offset = skb_transport_offset(skb);
  901. struct guehdr *guehdr;
  902. size_t len, optlen;
  903. int ret;
  904. len = sizeof(struct udphdr) + sizeof(struct guehdr);
  905. if (!pskb_may_pull(skb, transport_offset + len))
  906. return -EINVAL;
  907. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  908. switch (guehdr->version) {
  909. case 0: /* Full GUE header present */
  910. break;
  911. case 1: {
  912. /* Direct encapsulation of IPv4 or IPv6 */
  913. skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
  914. switch (((struct iphdr *)guehdr)->version) {
  915. case 4:
  916. ret = gue_err_proto_handler(IPPROTO_IPIP, skb, info);
  917. goto out;
  918. #if IS_ENABLED(CONFIG_IPV6)
  919. case 6:
  920. ret = gue_err_proto_handler(IPPROTO_IPV6, skb, info);
  921. goto out;
  922. #endif
  923. default:
  924. ret = -EOPNOTSUPP;
  925. goto out;
  926. }
  927. }
  928. default: /* Undefined version */
  929. return -EOPNOTSUPP;
  930. }
  931. if (guehdr->control)
  932. return -ENOENT;
  933. optlen = guehdr->hlen << 2;
  934. if (!pskb_may_pull(skb, transport_offset + len + optlen))
  935. return -EINVAL;
  936. guehdr = (struct guehdr *)&udp_hdr(skb)[1];
  937. if (validate_gue_flags(guehdr, optlen))
  938. return -EINVAL;
  939. /* Handling exceptions for direct UDP encapsulation in GUE would lead to
  940. * recursion. Besides, this kind of encapsulation can't even be
  941. * configured currently. Discard this.
  942. */
  943. if (guehdr->proto_ctype == IPPROTO_UDP ||
  944. guehdr->proto_ctype == IPPROTO_UDPLITE)
  945. return -EOPNOTSUPP;
  946. skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
  947. ret = gue_err_proto_handler(guehdr->proto_ctype, skb, info);
  948. out:
  949. skb_set_transport_header(skb, transport_offset);
  950. return ret;
  951. }
  952. static const struct ip_tunnel_encap_ops fou_iptun_ops = {
  953. .encap_hlen = fou_encap_hlen,
  954. .build_header = fou_build_header,
  955. .err_handler = gue_err,
  956. };
  957. static const struct ip_tunnel_encap_ops gue_iptun_ops = {
  958. .encap_hlen = gue_encap_hlen,
  959. .build_header = gue_build_header,
  960. .err_handler = gue_err,
  961. };
  962. static int ip_tunnel_encap_add_fou_ops(void)
  963. {
  964. int ret;
  965. ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  966. if (ret < 0) {
  967. pr_err("can't add fou ops\n");
  968. return ret;
  969. }
  970. ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  971. if (ret < 0) {
  972. pr_err("can't add gue ops\n");
  973. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  974. return ret;
  975. }
  976. return 0;
  977. }
  978. static void ip_tunnel_encap_del_fou_ops(void)
  979. {
  980. ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU);
  981. ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE);
  982. }
  983. #else
  984. static int ip_tunnel_encap_add_fou_ops(void)
  985. {
  986. return 0;
  987. }
  988. static void ip_tunnel_encap_del_fou_ops(void)
  989. {
  990. }
  991. #endif
  992. static __net_init int fou_init_net(struct net *net)
  993. {
  994. struct fou_net *fn = net_generic(net, fou_net_id);
  995. INIT_LIST_HEAD(&fn->fou_list);
  996. mutex_init(&fn->fou_lock);
  997. return 0;
  998. }
  999. static __net_exit void fou_exit_net(struct net *net)
  1000. {
  1001. struct fou_net *fn = net_generic(net, fou_net_id);
  1002. struct fou *fou, *next;
  1003. /* Close all the FOU sockets */
  1004. mutex_lock(&fn->fou_lock);
  1005. list_for_each_entry_safe(fou, next, &fn->fou_list, list)
  1006. fou_release(fou);
  1007. mutex_unlock(&fn->fou_lock);
  1008. }
  1009. static struct pernet_operations fou_net_ops = {
  1010. .init = fou_init_net,
  1011. .exit = fou_exit_net,
  1012. .id = &fou_net_id,
  1013. .size = sizeof(struct fou_net),
  1014. };
  1015. static int __init fou_init(void)
  1016. {
  1017. int ret;
  1018. ret = register_pernet_device(&fou_net_ops);
  1019. if (ret)
  1020. goto exit;
  1021. ret = genl_register_family(&fou_nl_family);
  1022. if (ret < 0)
  1023. goto unregister;
  1024. ret = ip_tunnel_encap_add_fou_ops();
  1025. if (ret == 0)
  1026. return 0;
  1027. genl_unregister_family(&fou_nl_family);
  1028. unregister:
  1029. unregister_pernet_device(&fou_net_ops);
  1030. exit:
  1031. return ret;
  1032. }
  1033. static void __exit fou_fini(void)
  1034. {
  1035. ip_tunnel_encap_del_fou_ops();
  1036. genl_unregister_family(&fou_nl_family);
  1037. unregister_pernet_device(&fou_net_ops);
  1038. }
  1039. module_init(fou_init);
  1040. module_exit(fou_fini);
  1041. MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
  1042. MODULE_LICENSE("GPL");
  1043. MODULE_DESCRIPTION("Foo over UDP");