nf_flow_table_offload.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027
  1. #include <linux/kernel.h>
  2. #include <linux/init.h>
  3. #include <linux/module.h>
  4. #include <linux/netfilter.h>
  5. #include <linux/rhashtable.h>
  6. #include <linux/netdevice.h>
  7. #include <linux/tc_act/tc_csum.h>
  8. #include <net/flow_offload.h>
  9. #include <net/netfilter/nf_flow_table.h>
  10. #include <net/netfilter/nf_tables.h>
  11. #include <net/netfilter/nf_conntrack.h>
  12. #include <net/netfilter/nf_conntrack_acct.h>
  13. #include <net/netfilter/nf_conntrack_core.h>
  14. #include <net/netfilter/nf_conntrack_tuple.h>
  15. static struct workqueue_struct *nf_flow_offload_wq;
  16. struct flow_offload_work {
  17. struct list_head list;
  18. enum flow_cls_command cmd;
  19. int priority;
  20. struct nf_flowtable *flowtable;
  21. struct flow_offload *flow;
  22. struct work_struct work;
  23. };
  24. #define NF_FLOW_DISSECTOR(__match, __type, __field) \
  25. (__match)->dissector.offset[__type] = \
  26. offsetof(struct nf_flow_key, __field)
  27. static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
  28. struct ip_tunnel_info *tun_info)
  29. {
  30. struct nf_flow_key *mask = &match->mask;
  31. struct nf_flow_key *key = &match->key;
  32. unsigned int enc_keys;
  33. if (!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX))
  34. return;
  35. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control);
  36. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
  37. key->enc_key_id.keyid = tunnel_id_to_key32(tun_info->key.tun_id);
  38. mask->enc_key_id.keyid = 0xffffffff;
  39. enc_keys = BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
  40. BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL);
  41. if (ip_tunnel_info_af(tun_info) == AF_INET) {
  42. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
  43. enc_ipv4);
  44. key->enc_ipv4.src = tun_info->key.u.ipv4.dst;
  45. key->enc_ipv4.dst = tun_info->key.u.ipv4.src;
  46. if (key->enc_ipv4.src)
  47. mask->enc_ipv4.src = 0xffffffff;
  48. if (key->enc_ipv4.dst)
  49. mask->enc_ipv4.dst = 0xffffffff;
  50. enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
  51. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  52. } else {
  53. memcpy(&key->enc_ipv6.src, &tun_info->key.u.ipv6.dst,
  54. sizeof(struct in6_addr));
  55. memcpy(&key->enc_ipv6.dst, &tun_info->key.u.ipv6.src,
  56. sizeof(struct in6_addr));
  57. if (memcmp(&key->enc_ipv6.src, &in6addr_any,
  58. sizeof(struct in6_addr)))
  59. memset(&mask->enc_ipv6.src, 0xff,
  60. sizeof(struct in6_addr));
  61. if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
  62. sizeof(struct in6_addr)))
  63. memset(&mask->enc_ipv6.dst, 0xff,
  64. sizeof(struct in6_addr));
  65. enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
  66. key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  67. }
  68. match->dissector.used_keys |= enc_keys;
  69. }
  70. static int nf_flow_rule_match(struct nf_flow_match *match,
  71. const struct flow_offload_tuple *tuple,
  72. struct dst_entry *other_dst)
  73. {
  74. struct nf_flow_key *mask = &match->mask;
  75. struct nf_flow_key *key = &match->key;
  76. struct ip_tunnel_info *tun_info;
  77. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
  78. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
  79. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
  80. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
  81. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
  82. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
  83. NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
  84. if (other_dst && other_dst->lwtstate) {
  85. tun_info = lwt_tun_info(other_dst->lwtstate);
  86. nf_flow_rule_lwt_match(match, tun_info);
  87. }
  88. key->meta.ingress_ifindex = tuple->iifidx;
  89. mask->meta.ingress_ifindex = 0xffffffff;
  90. switch (tuple->l3proto) {
  91. case AF_INET:
  92. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
  93. key->basic.n_proto = htons(ETH_P_IP);
  94. key->ipv4.src = tuple->src_v4.s_addr;
  95. mask->ipv4.src = 0xffffffff;
  96. key->ipv4.dst = tuple->dst_v4.s_addr;
  97. mask->ipv4.dst = 0xffffffff;
  98. break;
  99. case AF_INET6:
  100. key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
  101. key->basic.n_proto = htons(ETH_P_IPV6);
  102. key->ipv6.src = tuple->src_v6;
  103. memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src));
  104. key->ipv6.dst = tuple->dst_v6;
  105. memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst));
  106. break;
  107. default:
  108. return -EOPNOTSUPP;
  109. }
  110. mask->control.addr_type = 0xffff;
  111. match->dissector.used_keys |= BIT(key->control.addr_type);
  112. mask->basic.n_proto = 0xffff;
  113. switch (tuple->l4proto) {
  114. case IPPROTO_TCP:
  115. key->tcp.flags = 0;
  116. mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
  117. match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
  118. break;
  119. case IPPROTO_UDP:
  120. break;
  121. default:
  122. return -EOPNOTSUPP;
  123. }
  124. key->basic.ip_proto = tuple->l4proto;
  125. mask->basic.ip_proto = 0xff;
  126. key->tp.src = tuple->src_port;
  127. mask->tp.src = 0xffff;
  128. key->tp.dst = tuple->dst_port;
  129. mask->tp.dst = 0xffff;
  130. match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
  131. BIT(FLOW_DISSECTOR_KEY_CONTROL) |
  132. BIT(FLOW_DISSECTOR_KEY_BASIC) |
  133. BIT(FLOW_DISSECTOR_KEY_PORTS);
  134. return 0;
  135. }
  136. static void flow_offload_mangle(struct flow_action_entry *entry,
  137. enum flow_action_mangle_base htype, u32 offset,
  138. const __be32 *value, const __be32 *mask)
  139. {
  140. entry->id = FLOW_ACTION_MANGLE;
  141. entry->mangle.htype = htype;
  142. entry->mangle.offset = offset;
  143. memcpy(&entry->mangle.mask, mask, sizeof(u32));
  144. memcpy(&entry->mangle.val, value, sizeof(u32));
  145. }
  146. static inline struct flow_action_entry *
  147. flow_action_entry_next(struct nf_flow_rule *flow_rule)
  148. {
  149. int i = flow_rule->rule->action.num_entries++;
  150. return &flow_rule->rule->action.entries[i];
  151. }
  152. static int flow_offload_eth_src(struct net *net,
  153. const struct flow_offload *flow,
  154. enum flow_offload_tuple_dir dir,
  155. struct nf_flow_rule *flow_rule)
  156. {
  157. const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
  158. struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
  159. struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
  160. struct net_device *dev;
  161. u32 mask, val;
  162. u16 val16;
  163. dev = dev_get_by_index(net, tuple->iifidx);
  164. if (!dev)
  165. return -ENOENT;
  166. mask = ~0xffff0000;
  167. memcpy(&val16, dev->dev_addr, 2);
  168. val = val16 << 16;
  169. flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
  170. &val, &mask);
  171. mask = ~0xffffffff;
  172. memcpy(&val, dev->dev_addr + 2, 4);
  173. flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
  174. &val, &mask);
  175. dev_put(dev);
  176. return 0;
  177. }
  178. static int flow_offload_eth_dst(struct net *net,
  179. const struct flow_offload *flow,
  180. enum flow_offload_tuple_dir dir,
  181. struct nf_flow_rule *flow_rule)
  182. {
  183. struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
  184. struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
  185. const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
  186. const struct dst_entry *dst_cache;
  187. unsigned char ha[ETH_ALEN];
  188. struct neighbour *n;
  189. u32 mask, val;
  190. u8 nud_state;
  191. u16 val16;
  192. dst_cache = flow->tuplehash[dir].tuple.dst_cache;
  193. n = dst_neigh_lookup(dst_cache, daddr);
  194. if (!n)
  195. return -ENOENT;
  196. read_lock_bh(&n->lock);
  197. nud_state = n->nud_state;
  198. ether_addr_copy(ha, n->ha);
  199. read_unlock_bh(&n->lock);
  200. if (!(nud_state & NUD_VALID)) {
  201. neigh_release(n);
  202. return -ENOENT;
  203. }
  204. mask = ~0xffffffff;
  205. memcpy(&val, ha, 4);
  206. flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
  207. &val, &mask);
  208. mask = ~0x0000ffff;
  209. memcpy(&val16, ha + 4, 2);
  210. val = val16;
  211. flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
  212. &val, &mask);
  213. neigh_release(n);
  214. return 0;
  215. }
  216. static void flow_offload_ipv4_snat(struct net *net,
  217. const struct flow_offload *flow,
  218. enum flow_offload_tuple_dir dir,
  219. struct nf_flow_rule *flow_rule)
  220. {
  221. struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
  222. u32 mask = ~htonl(0xffffffff);
  223. __be32 addr;
  224. u32 offset;
  225. switch (dir) {
  226. case FLOW_OFFLOAD_DIR_ORIGINAL:
  227. addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
  228. offset = offsetof(struct iphdr, saddr);
  229. break;
  230. case FLOW_OFFLOAD_DIR_REPLY:
  231. addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
  232. offset = offsetof(struct iphdr, daddr);
  233. break;
  234. default:
  235. return;
  236. }
  237. flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
  238. &addr, &mask);
  239. }
  240. static void flow_offload_ipv4_dnat(struct net *net,
  241. const struct flow_offload *flow,
  242. enum flow_offload_tuple_dir dir,
  243. struct nf_flow_rule *flow_rule)
  244. {
  245. struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
  246. u32 mask = ~htonl(0xffffffff);
  247. __be32 addr;
  248. u32 offset;
  249. switch (dir) {
  250. case FLOW_OFFLOAD_DIR_ORIGINAL:
  251. addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
  252. offset = offsetof(struct iphdr, daddr);
  253. break;
  254. case FLOW_OFFLOAD_DIR_REPLY:
  255. addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
  256. offset = offsetof(struct iphdr, saddr);
  257. break;
  258. default:
  259. return;
  260. }
  261. flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
  262. &addr, &mask);
  263. }
  264. static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
  265. unsigned int offset,
  266. const __be32 *addr, const __be32 *mask)
  267. {
  268. struct flow_action_entry *entry;
  269. int i, j;
  270. for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
  271. entry = flow_action_entry_next(flow_rule);
  272. flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
  273. offset + i, &addr[j], mask);
  274. }
  275. }
  276. static void flow_offload_ipv6_snat(struct net *net,
  277. const struct flow_offload *flow,
  278. enum flow_offload_tuple_dir dir,
  279. struct nf_flow_rule *flow_rule)
  280. {
  281. u32 mask = ~htonl(0xffffffff);
  282. const __be32 *addr;
  283. u32 offset;
  284. switch (dir) {
  285. case FLOW_OFFLOAD_DIR_ORIGINAL:
  286. addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32;
  287. offset = offsetof(struct ipv6hdr, saddr);
  288. break;
  289. case FLOW_OFFLOAD_DIR_REPLY:
  290. addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32;
  291. offset = offsetof(struct ipv6hdr, daddr);
  292. break;
  293. default:
  294. return;
  295. }
  296. flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
  297. }
  298. static void flow_offload_ipv6_dnat(struct net *net,
  299. const struct flow_offload *flow,
  300. enum flow_offload_tuple_dir dir,
  301. struct nf_flow_rule *flow_rule)
  302. {
  303. u32 mask = ~htonl(0xffffffff);
  304. const __be32 *addr;
  305. u32 offset;
  306. switch (dir) {
  307. case FLOW_OFFLOAD_DIR_ORIGINAL:
  308. addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32;
  309. offset = offsetof(struct ipv6hdr, daddr);
  310. break;
  311. case FLOW_OFFLOAD_DIR_REPLY:
  312. addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32;
  313. offset = offsetof(struct ipv6hdr, saddr);
  314. break;
  315. default:
  316. return;
  317. }
  318. flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
  319. }
  320. static int flow_offload_l4proto(const struct flow_offload *flow)
  321. {
  322. u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
  323. u8 type = 0;
  324. switch (protonum) {
  325. case IPPROTO_TCP:
  326. type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
  327. break;
  328. case IPPROTO_UDP:
  329. type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
  330. break;
  331. default:
  332. break;
  333. }
  334. return type;
  335. }
  336. static void flow_offload_port_snat(struct net *net,
  337. const struct flow_offload *flow,
  338. enum flow_offload_tuple_dir dir,
  339. struct nf_flow_rule *flow_rule)
  340. {
  341. struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
  342. u32 mask, port;
  343. u32 offset;
  344. switch (dir) {
  345. case FLOW_OFFLOAD_DIR_ORIGINAL:
  346. port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
  347. offset = 0; /* offsetof(struct tcphdr, source); */
  348. port = htonl(port << 16);
  349. mask = ~htonl(0xffff0000);
  350. break;
  351. case FLOW_OFFLOAD_DIR_REPLY:
  352. port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
  353. offset = 0; /* offsetof(struct tcphdr, dest); */
  354. port = htonl(port);
  355. mask = ~htonl(0xffff);
  356. break;
  357. default:
  358. return;
  359. }
  360. flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
  361. &port, &mask);
  362. }
  363. static void flow_offload_port_dnat(struct net *net,
  364. const struct flow_offload *flow,
  365. enum flow_offload_tuple_dir dir,
  366. struct nf_flow_rule *flow_rule)
  367. {
  368. struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
  369. u32 mask, port;
  370. u32 offset;
  371. switch (dir) {
  372. case FLOW_OFFLOAD_DIR_ORIGINAL:
  373. port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
  374. offset = 0; /* offsetof(struct tcphdr, dest); */
  375. port = htonl(port);
  376. mask = ~htonl(0xffff);
  377. break;
  378. case FLOW_OFFLOAD_DIR_REPLY:
  379. port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
  380. offset = 0; /* offsetof(struct tcphdr, source); */
  381. port = htonl(port << 16);
  382. mask = ~htonl(0xffff0000);
  383. break;
  384. default:
  385. return;
  386. }
  387. flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
  388. &port, &mask);
  389. }
  390. static void flow_offload_ipv4_checksum(struct net *net,
  391. const struct flow_offload *flow,
  392. struct nf_flow_rule *flow_rule)
  393. {
  394. u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
  395. struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
  396. entry->id = FLOW_ACTION_CSUM;
  397. entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
  398. switch (protonum) {
  399. case IPPROTO_TCP:
  400. entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
  401. break;
  402. case IPPROTO_UDP:
  403. entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
  404. break;
  405. }
  406. }
  407. static void flow_offload_redirect(const struct flow_offload *flow,
  408. enum flow_offload_tuple_dir dir,
  409. struct nf_flow_rule *flow_rule)
  410. {
  411. struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
  412. struct rtable *rt;
  413. rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
  414. entry->id = FLOW_ACTION_REDIRECT;
  415. entry->dev = rt->dst.dev;
  416. dev_hold(rt->dst.dev);
  417. }
  418. static void flow_offload_encap_tunnel(const struct flow_offload *flow,
  419. enum flow_offload_tuple_dir dir,
  420. struct nf_flow_rule *flow_rule)
  421. {
  422. struct flow_action_entry *entry;
  423. struct dst_entry *dst;
  424. dst = flow->tuplehash[dir].tuple.dst_cache;
  425. if (dst && dst->lwtstate) {
  426. struct ip_tunnel_info *tun_info;
  427. tun_info = lwt_tun_info(dst->lwtstate);
  428. if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
  429. entry = flow_action_entry_next(flow_rule);
  430. entry->id = FLOW_ACTION_TUNNEL_ENCAP;
  431. entry->tunnel = tun_info;
  432. }
  433. }
  434. }
  435. static void flow_offload_decap_tunnel(const struct flow_offload *flow,
  436. enum flow_offload_tuple_dir dir,
  437. struct nf_flow_rule *flow_rule)
  438. {
  439. struct flow_action_entry *entry;
  440. struct dst_entry *dst;
  441. dst = flow->tuplehash[!dir].tuple.dst_cache;
  442. if (dst && dst->lwtstate) {
  443. struct ip_tunnel_info *tun_info;
  444. tun_info = lwt_tun_info(dst->lwtstate);
  445. if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
  446. entry = flow_action_entry_next(flow_rule);
  447. entry->id = FLOW_ACTION_TUNNEL_DECAP;
  448. }
  449. }
  450. }
  451. int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
  452. enum flow_offload_tuple_dir dir,
  453. struct nf_flow_rule *flow_rule)
  454. {
  455. flow_offload_decap_tunnel(flow, dir, flow_rule);
  456. flow_offload_encap_tunnel(flow, dir, flow_rule);
  457. if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
  458. flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
  459. return -1;
  460. if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
  461. flow_offload_ipv4_snat(net, flow, dir, flow_rule);
  462. flow_offload_port_snat(net, flow, dir, flow_rule);
  463. }
  464. if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
  465. flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
  466. flow_offload_port_dnat(net, flow, dir, flow_rule);
  467. }
  468. if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
  469. test_bit(NF_FLOW_DNAT, &flow->flags))
  470. flow_offload_ipv4_checksum(net, flow, flow_rule);
  471. flow_offload_redirect(flow, dir, flow_rule);
  472. return 0;
  473. }
  474. EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
  475. int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
  476. enum flow_offload_tuple_dir dir,
  477. struct nf_flow_rule *flow_rule)
  478. {
  479. flow_offload_decap_tunnel(flow, dir, flow_rule);
  480. flow_offload_encap_tunnel(flow, dir, flow_rule);
  481. if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
  482. flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
  483. return -1;
  484. if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
  485. flow_offload_ipv6_snat(net, flow, dir, flow_rule);
  486. flow_offload_port_snat(net, flow, dir, flow_rule);
  487. }
  488. if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
  489. flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
  490. flow_offload_port_dnat(net, flow, dir, flow_rule);
  491. }
  492. flow_offload_redirect(flow, dir, flow_rule);
  493. return 0;
  494. }
  495. EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
  496. #define NF_FLOW_RULE_ACTION_MAX 16
  497. static struct nf_flow_rule *
  498. nf_flow_offload_rule_alloc(struct net *net,
  499. const struct flow_offload_work *offload,
  500. enum flow_offload_tuple_dir dir)
  501. {
  502. const struct nf_flowtable *flowtable = offload->flowtable;
  503. const struct flow_offload *flow = offload->flow;
  504. const struct flow_offload_tuple *tuple;
  505. struct nf_flow_rule *flow_rule;
  506. struct dst_entry *other_dst;
  507. int err = -ENOMEM;
  508. flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
  509. if (!flow_rule)
  510. goto err_flow;
  511. flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
  512. if (!flow_rule->rule)
  513. goto err_flow_rule;
  514. flow_rule->rule->match.dissector = &flow_rule->match.dissector;
  515. flow_rule->rule->match.mask = &flow_rule->match.mask;
  516. flow_rule->rule->match.key = &flow_rule->match.key;
  517. tuple = &flow->tuplehash[dir].tuple;
  518. other_dst = flow->tuplehash[!dir].tuple.dst_cache;
  519. err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
  520. if (err < 0)
  521. goto err_flow_match;
  522. flow_rule->rule->action.num_entries = 0;
  523. if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
  524. goto err_flow_match;
  525. return flow_rule;
  526. err_flow_match:
  527. kfree(flow_rule->rule);
  528. err_flow_rule:
  529. kfree(flow_rule);
  530. err_flow:
  531. return NULL;
  532. }
  533. static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
  534. {
  535. struct flow_action_entry *entry;
  536. int i;
  537. for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
  538. entry = &flow_rule->rule->action.entries[i];
  539. if (entry->id != FLOW_ACTION_REDIRECT)
  540. continue;
  541. dev_put(entry->dev);
  542. }
  543. kfree(flow_rule->rule);
  544. kfree(flow_rule);
  545. }
  546. static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
  547. {
  548. int i;
  549. for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
  550. __nf_flow_offload_destroy(flow_rule[i]);
  551. }
  552. static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
  553. struct nf_flow_rule *flow_rule[])
  554. {
  555. struct net *net = read_pnet(&offload->flowtable->net);
  556. flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
  557. FLOW_OFFLOAD_DIR_ORIGINAL);
  558. if (!flow_rule[0])
  559. return -ENOMEM;
  560. flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
  561. FLOW_OFFLOAD_DIR_REPLY);
  562. if (!flow_rule[1]) {
  563. __nf_flow_offload_destroy(flow_rule[0]);
  564. return -ENOMEM;
  565. }
  566. return 0;
  567. }
  568. static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
  569. __be16 proto, int priority,
  570. enum flow_cls_command cmd,
  571. const struct flow_offload_tuple *tuple,
  572. struct netlink_ext_ack *extack)
  573. {
  574. cls_flow->common.protocol = proto;
  575. cls_flow->common.prio = priority;
  576. cls_flow->common.extack = extack;
  577. cls_flow->command = cmd;
  578. cls_flow->cookie = (unsigned long)tuple;
  579. }
  580. static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
  581. struct flow_offload *flow,
  582. struct nf_flow_rule *flow_rule,
  583. enum flow_offload_tuple_dir dir,
  584. int priority, int cmd,
  585. struct flow_stats *stats,
  586. struct list_head *block_cb_list)
  587. {
  588. struct flow_cls_offload cls_flow = {};
  589. struct flow_block_cb *block_cb;
  590. struct netlink_ext_ack extack;
  591. __be16 proto = ETH_P_ALL;
  592. int err, i = 0;
  593. nf_flow_offload_init(&cls_flow, proto, priority, cmd,
  594. &flow->tuplehash[dir].tuple, &extack);
  595. if (cmd == FLOW_CLS_REPLACE)
  596. cls_flow.rule = flow_rule->rule;
  597. down_read(&flowtable->flow_block_lock);
  598. list_for_each_entry(block_cb, block_cb_list, list) {
  599. err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
  600. block_cb->cb_priv);
  601. if (err < 0)
  602. continue;
  603. i++;
  604. }
  605. up_read(&flowtable->flow_block_lock);
  606. if (cmd == FLOW_CLS_STATS)
  607. memcpy(stats, &cls_flow.stats, sizeof(*stats));
  608. return i;
  609. }
  610. static int flow_offload_tuple_add(struct flow_offload_work *offload,
  611. struct nf_flow_rule *flow_rule,
  612. enum flow_offload_tuple_dir dir)
  613. {
  614. return nf_flow_offload_tuple(offload->flowtable, offload->flow,
  615. flow_rule, dir, offload->priority,
  616. FLOW_CLS_REPLACE, NULL,
  617. &offload->flowtable->flow_block.cb_list);
  618. }
  619. static void flow_offload_tuple_del(struct flow_offload_work *offload,
  620. enum flow_offload_tuple_dir dir)
  621. {
  622. nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
  623. offload->priority, FLOW_CLS_DESTROY, NULL,
  624. &offload->flowtable->flow_block.cb_list);
  625. }
  626. static int flow_offload_rule_add(struct flow_offload_work *offload,
  627. struct nf_flow_rule *flow_rule[])
  628. {
  629. int ok_count = 0;
  630. ok_count += flow_offload_tuple_add(offload, flow_rule[0],
  631. FLOW_OFFLOAD_DIR_ORIGINAL);
  632. ok_count += flow_offload_tuple_add(offload, flow_rule[1],
  633. FLOW_OFFLOAD_DIR_REPLY);
  634. if (ok_count == 0)
  635. return -ENOENT;
  636. return 0;
  637. }
  638. static void flow_offload_work_add(struct flow_offload_work *offload)
  639. {
  640. struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
  641. int err;
  642. err = nf_flow_offload_alloc(offload, flow_rule);
  643. if (err < 0)
  644. return;
  645. err = flow_offload_rule_add(offload, flow_rule);
  646. if (err < 0)
  647. goto out;
  648. set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
  649. out:
  650. nf_flow_offload_destroy(flow_rule);
  651. }
  652. static void flow_offload_work_del(struct flow_offload_work *offload)
  653. {
  654. clear_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
  655. flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
  656. flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
  657. set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
  658. }
  659. static void flow_offload_tuple_stats(struct flow_offload_work *offload,
  660. enum flow_offload_tuple_dir dir,
  661. struct flow_stats *stats)
  662. {
  663. nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
  664. offload->priority, FLOW_CLS_STATS, stats,
  665. &offload->flowtable->flow_block.cb_list);
  666. }
  667. static void flow_offload_work_stats(struct flow_offload_work *offload)
  668. {
  669. struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
  670. u64 lastused;
  671. flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
  672. flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
  673. lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
  674. offload->flow->timeout = max_t(u64, offload->flow->timeout,
  675. lastused + NF_FLOW_TIMEOUT);
  676. if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
  677. if (stats[0].pkts)
  678. nf_ct_acct_add(offload->flow->ct,
  679. FLOW_OFFLOAD_DIR_ORIGINAL,
  680. stats[0].pkts, stats[0].bytes);
  681. if (stats[1].pkts)
  682. nf_ct_acct_add(offload->flow->ct,
  683. FLOW_OFFLOAD_DIR_REPLY,
  684. stats[1].pkts, stats[1].bytes);
  685. }
  686. }
  687. static void flow_offload_work_handler(struct work_struct *work)
  688. {
  689. struct flow_offload_work *offload;
  690. offload = container_of(work, struct flow_offload_work, work);
  691. switch (offload->cmd) {
  692. case FLOW_CLS_REPLACE:
  693. flow_offload_work_add(offload);
  694. break;
  695. case FLOW_CLS_DESTROY:
  696. flow_offload_work_del(offload);
  697. break;
  698. case FLOW_CLS_STATS:
  699. flow_offload_work_stats(offload);
  700. break;
  701. default:
  702. WARN_ON_ONCE(1);
  703. }
  704. clear_bit(NF_FLOW_HW_PENDING, &offload->flow->flags);
  705. kfree(offload);
  706. }
  707. static void flow_offload_queue_work(struct flow_offload_work *offload)
  708. {
  709. queue_work(nf_flow_offload_wq, &offload->work);
  710. }
  711. static struct flow_offload_work *
  712. nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
  713. struct flow_offload *flow, unsigned int cmd)
  714. {
  715. struct flow_offload_work *offload;
  716. if (test_and_set_bit(NF_FLOW_HW_PENDING, &flow->flags))
  717. return NULL;
  718. offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
  719. if (!offload) {
  720. clear_bit(NF_FLOW_HW_PENDING, &flow->flags);
  721. return NULL;
  722. }
  723. offload->cmd = cmd;
  724. offload->flow = flow;
  725. offload->priority = flowtable->priority;
  726. offload->flowtable = flowtable;
  727. INIT_WORK(&offload->work, flow_offload_work_handler);
  728. return offload;
  729. }
  730. void nf_flow_offload_add(struct nf_flowtable *flowtable,
  731. struct flow_offload *flow)
  732. {
  733. struct flow_offload_work *offload;
  734. offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
  735. if (!offload)
  736. return;
  737. flow_offload_queue_work(offload);
  738. }
  739. void nf_flow_offload_del(struct nf_flowtable *flowtable,
  740. struct flow_offload *flow)
  741. {
  742. struct flow_offload_work *offload;
  743. offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY);
  744. if (!offload)
  745. return;
  746. set_bit(NF_FLOW_HW_DYING, &flow->flags);
  747. flow_offload_queue_work(offload);
  748. }
  749. void nf_flow_offload_stats(struct nf_flowtable *flowtable,
  750. struct flow_offload *flow)
  751. {
  752. struct flow_offload_work *offload;
  753. __s32 delta;
  754. delta = nf_flow_timeout_delta(flow->timeout);
  755. if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
  756. return;
  757. offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
  758. if (!offload)
  759. return;
  760. flow_offload_queue_work(offload);
  761. }
  762. void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
  763. {
  764. if (nf_flowtable_hw_offload(flowtable))
  765. flush_workqueue(nf_flow_offload_wq);
  766. }
  767. static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
  768. struct flow_block_offload *bo,
  769. enum flow_block_command cmd)
  770. {
  771. struct flow_block_cb *block_cb, *next;
  772. int err = 0;
  773. switch (cmd) {
  774. case FLOW_BLOCK_BIND:
  775. list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
  776. break;
  777. case FLOW_BLOCK_UNBIND:
  778. list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
  779. list_del(&block_cb->list);
  780. flow_block_cb_free(block_cb);
  781. }
  782. break;
  783. default:
  784. WARN_ON_ONCE(1);
  785. err = -EOPNOTSUPP;
  786. }
  787. return err;
  788. }
  789. static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
  790. struct net *net,
  791. enum flow_block_command cmd,
  792. struct nf_flowtable *flowtable,
  793. struct netlink_ext_ack *extack)
  794. {
  795. memset(bo, 0, sizeof(*bo));
  796. bo->net = net;
  797. bo->block = &flowtable->flow_block;
  798. bo->command = cmd;
  799. bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
  800. bo->extack = extack;
  801. bo->cb_list_head = &flowtable->flow_block.cb_list;
  802. INIT_LIST_HEAD(&bo->cb_list);
  803. }
  804. static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
  805. {
  806. struct nf_flowtable *flowtable = block_cb->indr.data;
  807. struct net_device *dev = block_cb->indr.dev;
  808. nf_flow_table_gc_cleanup(flowtable, dev);
  809. down_write(&flowtable->flow_block_lock);
  810. list_del(&block_cb->list);
  811. list_del(&block_cb->driver_list);
  812. flow_block_cb_free(block_cb);
  813. up_write(&flowtable->flow_block_lock);
  814. }
  815. static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
  816. struct nf_flowtable *flowtable,
  817. struct net_device *dev,
  818. enum flow_block_command cmd,
  819. struct netlink_ext_ack *extack)
  820. {
  821. nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
  822. extack);
  823. return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo,
  824. nf_flow_table_indr_cleanup);
  825. }
  826. static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
  827. struct nf_flowtable *flowtable,
  828. struct net_device *dev,
  829. enum flow_block_command cmd,
  830. struct netlink_ext_ack *extack)
  831. {
  832. int err;
  833. nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
  834. extack);
  835. err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
  836. if (err < 0)
  837. return err;
  838. return 0;
  839. }
  840. int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
  841. struct net_device *dev,
  842. enum flow_block_command cmd)
  843. {
  844. struct netlink_ext_ack extack = {};
  845. struct flow_block_offload bo;
  846. int err;
  847. if (!nf_flowtable_hw_offload(flowtable))
  848. return 0;
  849. if (dev->netdev_ops->ndo_setup_tc)
  850. err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd,
  851. &extack);
  852. else
  853. err = nf_flow_table_indr_offload_cmd(&bo, flowtable, dev, cmd,
  854. &extack);
  855. if (err < 0)
  856. return err;
  857. return nf_flow_table_block_setup(flowtable, &bo, cmd);
  858. }
  859. EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
  860. int nf_flow_table_offload_init(void)
  861. {
  862. nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload",
  863. WQ_UNBOUND, 0);
  864. if (!nf_flow_offload_wq)
  865. return -ENOMEM;
  866. return 0;
  867. }
  868. void nf_flow_table_offload_exit(void)
  869. {
  870. destroy_workqueue(nf_flow_offload_wq);
  871. }