nexthop.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Generic nexthop implementation
  3. *
  4. * Copyright (c) 2017-19 Cumulus Networks
  5. * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
  6. */
  7. #include <linux/nexthop.h>
  8. #include <linux/rtnetlink.h>
  9. #include <linux/slab.h>
  10. #include <net/arp.h>
  11. #include <net/ipv6_stubs.h>
  12. #include <net/lwtunnel.h>
  13. #include <net/ndisc.h>
  14. #include <net/nexthop.h>
  15. #include <net/route.h>
  16. #include <net/sock.h>
  17. static void remove_nexthop(struct net *net, struct nexthop *nh,
  18. struct nl_info *nlinfo);
  19. #define NH_DEV_HASHBITS 8
  20. #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
  21. static const struct nla_policy rtm_nh_policy[NHA_MAX + 1] = {
  22. [NHA_ID] = { .type = NLA_U32 },
  23. [NHA_GROUP] = { .type = NLA_BINARY },
  24. [NHA_GROUP_TYPE] = { .type = NLA_U16 },
  25. [NHA_BLACKHOLE] = { .type = NLA_FLAG },
  26. [NHA_OIF] = { .type = NLA_U32 },
  27. [NHA_GATEWAY] = { .type = NLA_BINARY },
  28. [NHA_ENCAP_TYPE] = { .type = NLA_U16 },
  29. [NHA_ENCAP] = { .type = NLA_NESTED },
  30. [NHA_GROUPS] = { .type = NLA_FLAG },
  31. [NHA_MASTER] = { .type = NLA_U32 },
  32. [NHA_FDB] = { .type = NLA_FLAG },
  33. };
  34. static int call_nexthop_notifiers(struct net *net,
  35. enum nexthop_event_type event_type,
  36. struct nexthop *nh)
  37. {
  38. int err;
  39. err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
  40. event_type, nh);
  41. return notifier_to_errno(err);
  42. }
  43. static unsigned int nh_dev_hashfn(unsigned int val)
  44. {
  45. unsigned int mask = NH_DEV_HASHSIZE - 1;
  46. return (val ^
  47. (val >> NH_DEV_HASHBITS) ^
  48. (val >> (NH_DEV_HASHBITS * 2))) & mask;
  49. }
  50. static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
  51. {
  52. struct net_device *dev = nhi->fib_nhc.nhc_dev;
  53. struct hlist_head *head;
  54. unsigned int hash;
  55. WARN_ON(!dev);
  56. hash = nh_dev_hashfn(dev->ifindex);
  57. head = &net->nexthop.devhash[hash];
  58. hlist_add_head(&nhi->dev_hash, head);
  59. }
  60. static void nexthop_free_mpath(struct nexthop *nh)
  61. {
  62. struct nh_group *nhg;
  63. int i;
  64. nhg = rcu_dereference_raw(nh->nh_grp);
  65. for (i = 0; i < nhg->num_nh; ++i) {
  66. struct nh_grp_entry *nhge = &nhg->nh_entries[i];
  67. WARN_ON(!list_empty(&nhge->nh_list));
  68. nexthop_put(nhge->nh);
  69. }
  70. WARN_ON(nhg->spare == nhg);
  71. kfree(nhg->spare);
  72. kfree(nhg);
  73. }
  74. static void nexthop_free_single(struct nexthop *nh)
  75. {
  76. struct nh_info *nhi;
  77. nhi = rcu_dereference_raw(nh->nh_info);
  78. switch (nhi->family) {
  79. case AF_INET:
  80. fib_nh_release(nh->net, &nhi->fib_nh);
  81. break;
  82. case AF_INET6:
  83. ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
  84. break;
  85. }
  86. kfree(nhi);
  87. }
  88. void nexthop_free_rcu(struct rcu_head *head)
  89. {
  90. struct nexthop *nh = container_of(head, struct nexthop, rcu);
  91. if (nh->is_group)
  92. nexthop_free_mpath(nh);
  93. else
  94. nexthop_free_single(nh);
  95. kfree(nh);
  96. }
  97. EXPORT_SYMBOL_GPL(nexthop_free_rcu);
  98. static struct nexthop *nexthop_alloc(void)
  99. {
  100. struct nexthop *nh;
  101. nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
  102. if (nh) {
  103. INIT_LIST_HEAD(&nh->fi_list);
  104. INIT_LIST_HEAD(&nh->f6i_list);
  105. INIT_LIST_HEAD(&nh->grp_list);
  106. INIT_LIST_HEAD(&nh->fdb_list);
  107. }
  108. return nh;
  109. }
  110. static struct nh_group *nexthop_grp_alloc(u16 num_nh)
  111. {
  112. struct nh_group *nhg;
  113. nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
  114. if (nhg)
  115. nhg->num_nh = num_nh;
  116. return nhg;
  117. }
  118. static void nh_base_seq_inc(struct net *net)
  119. {
  120. while (++net->nexthop.seq == 0)
  121. ;
  122. }
  123. /* no reference taken; rcu lock or rtnl must be held */
  124. struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
  125. {
  126. struct rb_node **pp, *parent = NULL, *next;
  127. pp = &net->nexthop.rb_root.rb_node;
  128. while (1) {
  129. struct nexthop *nh;
  130. next = rcu_dereference_raw(*pp);
  131. if (!next)
  132. break;
  133. parent = next;
  134. nh = rb_entry(parent, struct nexthop, rb_node);
  135. if (id < nh->id)
  136. pp = &next->rb_left;
  137. else if (id > nh->id)
  138. pp = &next->rb_right;
  139. else
  140. return nh;
  141. }
  142. return NULL;
  143. }
  144. EXPORT_SYMBOL_GPL(nexthop_find_by_id);
  145. /* used for auto id allocation; called with rtnl held */
  146. static u32 nh_find_unused_id(struct net *net)
  147. {
  148. u32 id_start = net->nexthop.last_id_allocated;
  149. while (1) {
  150. net->nexthop.last_id_allocated++;
  151. if (net->nexthop.last_id_allocated == id_start)
  152. break;
  153. if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
  154. return net->nexthop.last_id_allocated;
  155. }
  156. return 0;
  157. }
  158. static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
  159. {
  160. struct nexthop_grp *p;
  161. size_t len = nhg->num_nh * sizeof(*p);
  162. struct nlattr *nla;
  163. u16 group_type = 0;
  164. int i;
  165. if (nhg->mpath)
  166. group_type = NEXTHOP_GRP_TYPE_MPATH;
  167. if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
  168. goto nla_put_failure;
  169. nla = nla_reserve(skb, NHA_GROUP, len);
  170. if (!nla)
  171. goto nla_put_failure;
  172. p = nla_data(nla);
  173. for (i = 0; i < nhg->num_nh; ++i) {
  174. p->id = nhg->nh_entries[i].nh->id;
  175. p->weight = nhg->nh_entries[i].weight - 1;
  176. p += 1;
  177. }
  178. return 0;
  179. nla_put_failure:
  180. return -EMSGSIZE;
  181. }
  182. static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
  183. int event, u32 portid, u32 seq, unsigned int nlflags)
  184. {
  185. struct fib6_nh *fib6_nh;
  186. struct fib_nh *fib_nh;
  187. struct nlmsghdr *nlh;
  188. struct nh_info *nhi;
  189. struct nhmsg *nhm;
  190. nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
  191. if (!nlh)
  192. return -EMSGSIZE;
  193. nhm = nlmsg_data(nlh);
  194. nhm->nh_family = AF_UNSPEC;
  195. nhm->nh_flags = nh->nh_flags;
  196. nhm->nh_protocol = nh->protocol;
  197. nhm->nh_scope = 0;
  198. nhm->resvd = 0;
  199. if (nla_put_u32(skb, NHA_ID, nh->id))
  200. goto nla_put_failure;
  201. if (nh->is_group) {
  202. struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
  203. if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
  204. goto nla_put_failure;
  205. if (nla_put_nh_group(skb, nhg))
  206. goto nla_put_failure;
  207. goto out;
  208. }
  209. nhi = rtnl_dereference(nh->nh_info);
  210. nhm->nh_family = nhi->family;
  211. if (nhi->reject_nh) {
  212. if (nla_put_flag(skb, NHA_BLACKHOLE))
  213. goto nla_put_failure;
  214. goto out;
  215. } else if (nhi->fdb_nh) {
  216. if (nla_put_flag(skb, NHA_FDB))
  217. goto nla_put_failure;
  218. } else {
  219. const struct net_device *dev;
  220. dev = nhi->fib_nhc.nhc_dev;
  221. if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
  222. goto nla_put_failure;
  223. }
  224. nhm->nh_scope = nhi->fib_nhc.nhc_scope;
  225. switch (nhi->family) {
  226. case AF_INET:
  227. fib_nh = &nhi->fib_nh;
  228. if (fib_nh->fib_nh_gw_family &&
  229. nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
  230. goto nla_put_failure;
  231. break;
  232. case AF_INET6:
  233. fib6_nh = &nhi->fib6_nh;
  234. if (fib6_nh->fib_nh_gw_family &&
  235. nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
  236. goto nla_put_failure;
  237. break;
  238. }
  239. if (nhi->fib_nhc.nhc_lwtstate &&
  240. lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
  241. NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
  242. goto nla_put_failure;
  243. out:
  244. nlmsg_end(skb, nlh);
  245. return 0;
  246. nla_put_failure:
  247. nlmsg_cancel(skb, nlh);
  248. return -EMSGSIZE;
  249. }
  250. static size_t nh_nlmsg_size_grp(struct nexthop *nh)
  251. {
  252. struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
  253. size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
  254. return nla_total_size(sz) +
  255. nla_total_size(2); /* NHA_GROUP_TYPE */
  256. }
  257. static size_t nh_nlmsg_size_single(struct nexthop *nh)
  258. {
  259. struct nh_info *nhi = rtnl_dereference(nh->nh_info);
  260. size_t sz;
  261. /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
  262. * are mutually exclusive
  263. */
  264. sz = nla_total_size(4); /* NHA_OIF */
  265. switch (nhi->family) {
  266. case AF_INET:
  267. if (nhi->fib_nh.fib_nh_gw_family)
  268. sz += nla_total_size(4); /* NHA_GATEWAY */
  269. break;
  270. case AF_INET6:
  271. /* NHA_GATEWAY */
  272. if (nhi->fib6_nh.fib_nh_gw_family)
  273. sz += nla_total_size(sizeof(const struct in6_addr));
  274. break;
  275. }
  276. if (nhi->fib_nhc.nhc_lwtstate) {
  277. sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
  278. sz += nla_total_size(2); /* NHA_ENCAP_TYPE */
  279. }
  280. return sz;
  281. }
  282. static size_t nh_nlmsg_size(struct nexthop *nh)
  283. {
  284. size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
  285. sz += nla_total_size(4); /* NHA_ID */
  286. if (nh->is_group)
  287. sz += nh_nlmsg_size_grp(nh);
  288. else
  289. sz += nh_nlmsg_size_single(nh);
  290. return sz;
  291. }
  292. static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
  293. {
  294. unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
  295. u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
  296. struct sk_buff *skb;
  297. int err = -ENOBUFS;
  298. skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
  299. if (!skb)
  300. goto errout;
  301. err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
  302. if (err < 0) {
  303. /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
  304. WARN_ON(err == -EMSGSIZE);
  305. kfree_skb(skb);
  306. goto errout;
  307. }
  308. rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
  309. info->nlh, gfp_any());
  310. return;
  311. errout:
  312. if (err < 0)
  313. rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
  314. }
  315. static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
  316. bool *is_fdb, struct netlink_ext_ack *extack)
  317. {
  318. if (nh->is_group) {
  319. struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
  320. /* nested multipath (group within a group) is not
  321. * supported
  322. */
  323. if (nhg->mpath) {
  324. NL_SET_ERR_MSG(extack,
  325. "Multipath group can not be a nexthop within a group");
  326. return false;
  327. }
  328. *is_fdb = nhg->fdb_nh;
  329. } else {
  330. struct nh_info *nhi = rtnl_dereference(nh->nh_info);
  331. if (nhi->reject_nh && npaths > 1) {
  332. NL_SET_ERR_MSG(extack,
  333. "Blackhole nexthop can not be used in a group with more than 1 path");
  334. return false;
  335. }
  336. *is_fdb = nhi->fdb_nh;
  337. }
  338. return true;
  339. }
  340. static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
  341. struct netlink_ext_ack *extack)
  342. {
  343. struct nh_info *nhi;
  344. nhi = rtnl_dereference(nh->nh_info);
  345. if (!nhi->fdb_nh) {
  346. NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
  347. return -EINVAL;
  348. }
  349. if (*nh_family == AF_UNSPEC) {
  350. *nh_family = nhi->family;
  351. } else if (*nh_family != nhi->family) {
  352. NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
  353. return -EINVAL;
  354. }
  355. return 0;
  356. }
  357. static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
  358. struct netlink_ext_ack *extack)
  359. {
  360. unsigned int len = nla_len(tb[NHA_GROUP]);
  361. u8 nh_family = AF_UNSPEC;
  362. struct nexthop_grp *nhg;
  363. unsigned int i, j;
  364. u8 nhg_fdb = 0;
  365. if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
  366. NL_SET_ERR_MSG(extack,
  367. "Invalid length for nexthop group attribute");
  368. return -EINVAL;
  369. }
  370. /* convert len to number of nexthop ids */
  371. len /= sizeof(*nhg);
  372. nhg = nla_data(tb[NHA_GROUP]);
  373. for (i = 0; i < len; ++i) {
  374. if (nhg[i].resvd1 || nhg[i].resvd2) {
  375. NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
  376. return -EINVAL;
  377. }
  378. if (nhg[i].weight > 254) {
  379. NL_SET_ERR_MSG(extack, "Invalid value for weight");
  380. return -EINVAL;
  381. }
  382. for (j = i + 1; j < len; ++j) {
  383. if (nhg[i].id == nhg[j].id) {
  384. NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
  385. return -EINVAL;
  386. }
  387. }
  388. }
  389. if (tb[NHA_FDB])
  390. nhg_fdb = 1;
  391. nhg = nla_data(tb[NHA_GROUP]);
  392. for (i = 0; i < len; ++i) {
  393. struct nexthop *nh;
  394. bool is_fdb_nh;
  395. nh = nexthop_find_by_id(net, nhg[i].id);
  396. if (!nh) {
  397. NL_SET_ERR_MSG(extack, "Invalid nexthop id");
  398. return -EINVAL;
  399. }
  400. if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
  401. return -EINVAL;
  402. if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
  403. return -EINVAL;
  404. if (!nhg_fdb && is_fdb_nh) {
  405. NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
  406. return -EINVAL;
  407. }
  408. }
  409. for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
  410. if (!tb[i])
  411. continue;
  412. if (i == NHA_FDB)
  413. continue;
  414. NL_SET_ERR_MSG(extack,
  415. "No other attributes can be set in nexthop groups");
  416. return -EINVAL;
  417. }
  418. return 0;
  419. }
  420. static bool ipv6_good_nh(const struct fib6_nh *nh)
  421. {
  422. int state = NUD_REACHABLE;
  423. struct neighbour *n;
  424. rcu_read_lock_bh();
  425. n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
  426. if (n)
  427. state = n->nud_state;
  428. rcu_read_unlock_bh();
  429. return !!(state & NUD_VALID);
  430. }
  431. static bool ipv4_good_nh(const struct fib_nh *nh)
  432. {
  433. int state = NUD_REACHABLE;
  434. struct neighbour *n;
  435. rcu_read_lock_bh();
  436. n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
  437. (__force u32)nh->fib_nh_gw4);
  438. if (n)
  439. state = n->nud_state;
  440. rcu_read_unlock_bh();
  441. return !!(state & NUD_VALID);
  442. }
  443. struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
  444. {
  445. struct nexthop *rc = NULL;
  446. struct nh_group *nhg;
  447. int i;
  448. if (!nh->is_group)
  449. return nh;
  450. nhg = rcu_dereference(nh->nh_grp);
  451. for (i = 0; i < nhg->num_nh; ++i) {
  452. struct nh_grp_entry *nhge = &nhg->nh_entries[i];
  453. struct nh_info *nhi;
  454. if (hash > atomic_read(&nhge->upper_bound))
  455. continue;
  456. nhi = rcu_dereference(nhge->nh->nh_info);
  457. if (nhi->fdb_nh)
  458. return nhge->nh;
  459. /* nexthops always check if it is good and does
  460. * not rely on a sysctl for this behavior
  461. */
  462. switch (nhi->family) {
  463. case AF_INET:
  464. if (ipv4_good_nh(&nhi->fib_nh))
  465. return nhge->nh;
  466. break;
  467. case AF_INET6:
  468. if (ipv6_good_nh(&nhi->fib6_nh))
  469. return nhge->nh;
  470. break;
  471. }
  472. if (!rc)
  473. rc = nhge->nh;
  474. }
  475. return rc;
  476. }
  477. EXPORT_SYMBOL_GPL(nexthop_select_path);
  478. int nexthop_for_each_fib6_nh(struct nexthop *nh,
  479. int (*cb)(struct fib6_nh *nh, void *arg),
  480. void *arg)
  481. {
  482. struct nh_info *nhi;
  483. int err;
  484. if (nh->is_group) {
  485. struct nh_group *nhg;
  486. int i;
  487. nhg = rcu_dereference_rtnl(nh->nh_grp);
  488. for (i = 0; i < nhg->num_nh; i++) {
  489. struct nh_grp_entry *nhge = &nhg->nh_entries[i];
  490. nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
  491. err = cb(&nhi->fib6_nh, arg);
  492. if (err)
  493. return err;
  494. }
  495. } else {
  496. nhi = rcu_dereference_rtnl(nh->nh_info);
  497. err = cb(&nhi->fib6_nh, arg);
  498. if (err)
  499. return err;
  500. }
  501. return 0;
  502. }
  503. EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
  504. static int check_src_addr(const struct in6_addr *saddr,
  505. struct netlink_ext_ack *extack)
  506. {
  507. if (!ipv6_addr_any(saddr)) {
  508. NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
  509. return -EINVAL;
  510. }
  511. return 0;
  512. }
  513. int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
  514. struct netlink_ext_ack *extack)
  515. {
  516. struct nh_info *nhi;
  517. bool is_fdb_nh;
  518. /* fib6_src is unique to a fib6_info and limits the ability to cache
  519. * routes in fib6_nh within a nexthop that is potentially shared
  520. * across multiple fib entries. If the config wants to use source
  521. * routing it can not use nexthop objects. mlxsw also does not allow
  522. * fib6_src on routes.
  523. */
  524. if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
  525. return -EINVAL;
  526. if (nh->is_group) {
  527. struct nh_group *nhg;
  528. nhg = rtnl_dereference(nh->nh_grp);
  529. if (nhg->has_v4)
  530. goto no_v4_nh;
  531. is_fdb_nh = nhg->fdb_nh;
  532. } else {
  533. nhi = rtnl_dereference(nh->nh_info);
  534. if (nhi->family == AF_INET)
  535. goto no_v4_nh;
  536. is_fdb_nh = nhi->fdb_nh;
  537. }
  538. if (is_fdb_nh) {
  539. NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
  540. return -EINVAL;
  541. }
  542. return 0;
  543. no_v4_nh:
  544. NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
  545. return -EINVAL;
  546. }
  547. EXPORT_SYMBOL_GPL(fib6_check_nexthop);
  548. /* if existing nexthop has ipv6 routes linked to it, need
  549. * to verify this new spec works with ipv6
  550. */
  551. static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
  552. struct netlink_ext_ack *extack)
  553. {
  554. struct fib6_info *f6i;
  555. if (list_empty(&old->f6i_list))
  556. return 0;
  557. list_for_each_entry(f6i, &old->f6i_list, nh_list) {
  558. if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
  559. return -EINVAL;
  560. }
  561. return fib6_check_nexthop(new, NULL, extack);
  562. }
  563. static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
  564. struct netlink_ext_ack *extack)
  565. {
  566. if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
  567. NL_SET_ERR_MSG(extack,
  568. "Route with host scope can not have a gateway");
  569. return -EINVAL;
  570. }
  571. if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
  572. NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
  573. return -EINVAL;
  574. }
  575. return 0;
  576. }
  577. /* Invoked by fib add code to verify nexthop by id is ok with
  578. * config for prefix; parts of fib_check_nh not done when nexthop
  579. * object is used.
  580. */
  581. int fib_check_nexthop(struct nexthop *nh, u8 scope,
  582. struct netlink_ext_ack *extack)
  583. {
  584. struct nh_info *nhi;
  585. int err = 0;
  586. if (nh->is_group) {
  587. struct nh_group *nhg;
  588. nhg = rtnl_dereference(nh->nh_grp);
  589. if (nhg->fdb_nh) {
  590. NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
  591. err = -EINVAL;
  592. goto out;
  593. }
  594. if (scope == RT_SCOPE_HOST) {
  595. NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
  596. err = -EINVAL;
  597. goto out;
  598. }
  599. /* all nexthops in a group have the same scope */
  600. nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
  601. err = nexthop_check_scope(nhi, scope, extack);
  602. } else {
  603. nhi = rtnl_dereference(nh->nh_info);
  604. if (nhi->fdb_nh) {
  605. NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
  606. err = -EINVAL;
  607. goto out;
  608. }
  609. err = nexthop_check_scope(nhi, scope, extack);
  610. }
  611. out:
  612. return err;
  613. }
  614. static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
  615. struct netlink_ext_ack *extack)
  616. {
  617. struct fib_info *fi;
  618. list_for_each_entry(fi, &old->fi_list, nh_list) {
  619. int err;
  620. err = fib_check_nexthop(new, fi->fib_scope, extack);
  621. if (err)
  622. return err;
  623. }
  624. return 0;
  625. }
  626. static void nh_group_rebalance(struct nh_group *nhg)
  627. {
  628. int total = 0;
  629. int w = 0;
  630. int i;
  631. for (i = 0; i < nhg->num_nh; ++i)
  632. total += nhg->nh_entries[i].weight;
  633. for (i = 0; i < nhg->num_nh; ++i) {
  634. struct nh_grp_entry *nhge = &nhg->nh_entries[i];
  635. int upper_bound;
  636. w += nhge->weight;
  637. upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
  638. atomic_set(&nhge->upper_bound, upper_bound);
  639. }
  640. }
  641. static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
  642. struct nl_info *nlinfo)
  643. {
  644. struct nh_grp_entry *nhges, *new_nhges;
  645. struct nexthop *nhp = nhge->nh_parent;
  646. struct nexthop *nh = nhge->nh;
  647. struct nh_group *nhg, *newg;
  648. int i, j;
  649. WARN_ON(!nh);
  650. nhg = rtnl_dereference(nhp->nh_grp);
  651. newg = nhg->spare;
  652. /* last entry, keep it visible and remove the parent */
  653. if (nhg->num_nh == 1) {
  654. remove_nexthop(net, nhp, nlinfo);
  655. return;
  656. }
  657. newg->has_v4 = false;
  658. newg->mpath = nhg->mpath;
  659. newg->fdb_nh = nhg->fdb_nh;
  660. newg->num_nh = nhg->num_nh;
  661. /* copy old entries to new except the one getting removed */
  662. nhges = nhg->nh_entries;
  663. new_nhges = newg->nh_entries;
  664. for (i = 0, j = 0; i < nhg->num_nh; ++i) {
  665. struct nh_info *nhi;
  666. /* current nexthop getting removed */
  667. if (nhg->nh_entries[i].nh == nh) {
  668. newg->num_nh--;
  669. continue;
  670. }
  671. nhi = rtnl_dereference(nhges[i].nh->nh_info);
  672. if (nhi->family == AF_INET)
  673. newg->has_v4 = true;
  674. list_del(&nhges[i].nh_list);
  675. new_nhges[j].nh_parent = nhges[i].nh_parent;
  676. new_nhges[j].nh = nhges[i].nh;
  677. new_nhges[j].weight = nhges[i].weight;
  678. list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
  679. j++;
  680. }
  681. nh_group_rebalance(newg);
  682. rcu_assign_pointer(nhp->nh_grp, newg);
  683. list_del(&nhge->nh_list);
  684. nexthop_put(nhge->nh);
  685. if (nlinfo)
  686. nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
  687. }
  688. static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
  689. struct nl_info *nlinfo)
  690. {
  691. struct nh_grp_entry *nhge, *tmp;
  692. list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
  693. remove_nh_grp_entry(net, nhge, nlinfo);
  694. /* make sure all see the newly published array before releasing rtnl */
  695. synchronize_net();
  696. }
  697. static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
  698. {
  699. struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
  700. int i, num_nh = nhg->num_nh;
  701. for (i = 0; i < num_nh; ++i) {
  702. struct nh_grp_entry *nhge = &nhg->nh_entries[i];
  703. if (WARN_ON(!nhge->nh))
  704. continue;
  705. list_del_init(&nhge->nh_list);
  706. }
  707. }
  708. /* not called for nexthop replace */
  709. static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
  710. {
  711. struct fib6_info *f6i, *tmp;
  712. bool do_flush = false;
  713. struct fib_info *fi;
  714. list_for_each_entry(fi, &nh->fi_list, nh_list) {
  715. fi->fib_flags |= RTNH_F_DEAD;
  716. do_flush = true;
  717. }
  718. if (do_flush)
  719. fib_flush(net);
  720. /* ip6_del_rt removes the entry from this list hence the _safe */
  721. list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
  722. /* __ip6_del_rt does a release, so do a hold here */
  723. fib6_info_hold(f6i);
  724. ipv6_stub->ip6_del_rt(net, f6i,
  725. !net->ipv4.sysctl_nexthop_compat_mode);
  726. }
  727. }
  728. static void __remove_nexthop(struct net *net, struct nexthop *nh,
  729. struct nl_info *nlinfo)
  730. {
  731. __remove_nexthop_fib(net, nh);
  732. if (nh->is_group) {
  733. remove_nexthop_group(nh, nlinfo);
  734. } else {
  735. struct nh_info *nhi;
  736. nhi = rtnl_dereference(nh->nh_info);
  737. if (nhi->fib_nhc.nhc_dev)
  738. hlist_del(&nhi->dev_hash);
  739. remove_nexthop_from_groups(net, nh, nlinfo);
  740. }
  741. }
  742. static void remove_nexthop(struct net *net, struct nexthop *nh,
  743. struct nl_info *nlinfo)
  744. {
  745. call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh);
  746. /* remove from the tree */
  747. rb_erase(&nh->rb_node, &net->nexthop.rb_root);
  748. if (nlinfo)
  749. nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
  750. __remove_nexthop(net, nh, nlinfo);
  751. nh_base_seq_inc(net);
  752. nexthop_put(nh);
  753. }
  754. /* if any FIB entries reference this nexthop, any dst entries
  755. * need to be regenerated
  756. */
  757. static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
  758. {
  759. struct fib6_info *f6i;
  760. if (!list_empty(&nh->fi_list))
  761. rt_cache_flush(net);
  762. list_for_each_entry(f6i, &nh->f6i_list, nh_list)
  763. ipv6_stub->fib6_update_sernum(net, f6i);
  764. }
  765. static int replace_nexthop_grp(struct net *net, struct nexthop *old,
  766. struct nexthop *new,
  767. struct netlink_ext_ack *extack)
  768. {
  769. struct nh_group *oldg, *newg;
  770. int i;
  771. if (!new->is_group) {
  772. NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
  773. return -EINVAL;
  774. }
  775. oldg = rtnl_dereference(old->nh_grp);
  776. newg = rtnl_dereference(new->nh_grp);
  777. /* update parents - used by nexthop code for cleanup */
  778. for (i = 0; i < newg->num_nh; i++)
  779. newg->nh_entries[i].nh_parent = old;
  780. rcu_assign_pointer(old->nh_grp, newg);
  781. for (i = 0; i < oldg->num_nh; i++)
  782. oldg->nh_entries[i].nh_parent = new;
  783. rcu_assign_pointer(new->nh_grp, oldg);
  784. return 0;
  785. }
  786. static void nh_group_v4_update(struct nh_group *nhg)
  787. {
  788. struct nh_grp_entry *nhges;
  789. bool has_v4 = false;
  790. int i;
  791. nhges = nhg->nh_entries;
  792. for (i = 0; i < nhg->num_nh; i++) {
  793. struct nh_info *nhi;
  794. nhi = rtnl_dereference(nhges[i].nh->nh_info);
  795. if (nhi->family == AF_INET)
  796. has_v4 = true;
  797. }
  798. nhg->has_v4 = has_v4;
  799. }
  800. static int replace_nexthop_single(struct net *net, struct nexthop *old,
  801. struct nexthop *new,
  802. struct netlink_ext_ack *extack)
  803. {
  804. struct nh_info *oldi, *newi;
  805. if (new->is_group) {
  806. NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
  807. return -EINVAL;
  808. }
  809. oldi = rtnl_dereference(old->nh_info);
  810. newi = rtnl_dereference(new->nh_info);
  811. newi->nh_parent = old;
  812. oldi->nh_parent = new;
  813. old->protocol = new->protocol;
  814. old->nh_flags = new->nh_flags;
  815. rcu_assign_pointer(old->nh_info, newi);
  816. rcu_assign_pointer(new->nh_info, oldi);
  817. /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
  818. * update IPv4 indication in all the groups using the nexthop.
  819. */
  820. if (oldi->family == AF_INET && newi->family == AF_INET6) {
  821. struct nh_grp_entry *nhge;
  822. list_for_each_entry(nhge, &old->grp_list, nh_list) {
  823. struct nexthop *nhp = nhge->nh_parent;
  824. struct nh_group *nhg;
  825. nhg = rtnl_dereference(nhp->nh_grp);
  826. nh_group_v4_update(nhg);
  827. }
  828. }
  829. return 0;
  830. }
  831. static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
  832. struct nl_info *info)
  833. {
  834. struct fib6_info *f6i;
  835. if (!list_empty(&nh->fi_list)) {
  836. struct fib_info *fi;
  837. /* expectation is a few fib_info per nexthop and then
  838. * a lot of routes per fib_info. So mark the fib_info
  839. * and then walk the fib tables once
  840. */
  841. list_for_each_entry(fi, &nh->fi_list, nh_list)
  842. fi->nh_updated = true;
  843. fib_info_notify_update(net, info);
  844. list_for_each_entry(fi, &nh->fi_list, nh_list)
  845. fi->nh_updated = false;
  846. }
  847. list_for_each_entry(f6i, &nh->f6i_list, nh_list)
  848. ipv6_stub->fib6_rt_update(net, f6i, info);
  849. }
  850. /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
  851. * linked to this nexthop and for all groups that the nexthop
  852. * is a member of
  853. */
  854. static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
  855. struct nl_info *info)
  856. {
  857. struct nh_grp_entry *nhge;
  858. __nexthop_replace_notify(net, nh, info);
  859. list_for_each_entry(nhge, &nh->grp_list, nh_list)
  860. __nexthop_replace_notify(net, nhge->nh_parent, info);
  861. }
  862. static int replace_nexthop(struct net *net, struct nexthop *old,
  863. struct nexthop *new, struct netlink_ext_ack *extack)
  864. {
  865. bool new_is_reject = false;
  866. struct nh_grp_entry *nhge;
  867. int err;
  868. /* check that existing FIB entries are ok with the
  869. * new nexthop definition
  870. */
  871. err = fib_check_nh_list(old, new, extack);
  872. if (err)
  873. return err;
  874. err = fib6_check_nh_list(old, new, extack);
  875. if (err)
  876. return err;
  877. if (!new->is_group) {
  878. struct nh_info *nhi = rtnl_dereference(new->nh_info);
  879. new_is_reject = nhi->reject_nh;
  880. }
  881. list_for_each_entry(nhge, &old->grp_list, nh_list) {
  882. /* if new nexthop is a blackhole, any groups using this
  883. * nexthop cannot have more than 1 path
  884. */
  885. if (new_is_reject &&
  886. nexthop_num_path(nhge->nh_parent) > 1) {
  887. NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
  888. return -EINVAL;
  889. }
  890. err = fib_check_nh_list(nhge->nh_parent, new, extack);
  891. if (err)
  892. return err;
  893. err = fib6_check_nh_list(nhge->nh_parent, new, extack);
  894. if (err)
  895. return err;
  896. }
  897. if (old->is_group)
  898. err = replace_nexthop_grp(net, old, new, extack);
  899. else
  900. err = replace_nexthop_single(net, old, new, extack);
  901. if (!err) {
  902. nh_rt_cache_flush(net, old);
  903. __remove_nexthop(net, new, NULL);
  904. nexthop_put(new);
  905. }
  906. return err;
  907. }
  908. /* called with rtnl_lock held */
  909. static int insert_nexthop(struct net *net, struct nexthop *new_nh,
  910. struct nh_config *cfg, struct netlink_ext_ack *extack)
  911. {
  912. struct rb_node **pp, *parent = NULL, *next;
  913. struct rb_root *root = &net->nexthop.rb_root;
  914. bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
  915. bool create = !!(cfg->nlflags & NLM_F_CREATE);
  916. u32 new_id = new_nh->id;
  917. int replace_notify = 0;
  918. int rc = -EEXIST;
  919. pp = &root->rb_node;
  920. while (1) {
  921. struct nexthop *nh;
  922. next = *pp;
  923. if (!next)
  924. break;
  925. parent = next;
  926. nh = rb_entry(parent, struct nexthop, rb_node);
  927. if (new_id < nh->id) {
  928. pp = &next->rb_left;
  929. } else if (new_id > nh->id) {
  930. pp = &next->rb_right;
  931. } else if (replace) {
  932. rc = replace_nexthop(net, nh, new_nh, extack);
  933. if (!rc) {
  934. new_nh = nh; /* send notification with old nh */
  935. replace_notify = 1;
  936. }
  937. goto out;
  938. } else {
  939. /* id already exists and not a replace */
  940. goto out;
  941. }
  942. }
  943. if (replace && !create) {
  944. NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
  945. rc = -ENOENT;
  946. goto out;
  947. }
  948. rb_link_node_rcu(&new_nh->rb_node, parent, pp);
  949. rb_insert_color(&new_nh->rb_node, root);
  950. rc = 0;
  951. out:
  952. if (!rc) {
  953. nh_base_seq_inc(net);
  954. nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
  955. if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode)
  956. nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
  957. }
  958. return rc;
  959. }
  960. /* rtnl */
  961. /* remove all nexthops tied to a device being deleted */
  962. static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
  963. {
  964. unsigned int hash = nh_dev_hashfn(dev->ifindex);
  965. struct net *net = dev_net(dev);
  966. struct hlist_head *head = &net->nexthop.devhash[hash];
  967. struct hlist_node *n;
  968. struct nh_info *nhi;
  969. hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
  970. if (nhi->fib_nhc.nhc_dev != dev)
  971. continue;
  972. if (nhi->reject_nh &&
  973. (event == NETDEV_DOWN || event == NETDEV_CHANGE))
  974. continue;
  975. remove_nexthop(net, nhi->nh_parent, NULL);
  976. }
  977. }
  978. /* rtnl; called when net namespace is deleted */
  979. static void flush_all_nexthops(struct net *net)
  980. {
  981. struct rb_root *root = &net->nexthop.rb_root;
  982. struct rb_node *node;
  983. struct nexthop *nh;
  984. while ((node = rb_first(root))) {
  985. nh = rb_entry(node, struct nexthop, rb_node);
  986. remove_nexthop(net, nh, NULL);
  987. cond_resched();
  988. }
  989. }
  990. static struct nexthop *nexthop_create_group(struct net *net,
  991. struct nh_config *cfg)
  992. {
  993. struct nlattr *grps_attr = cfg->nh_grp;
  994. struct nexthop_grp *entry = nla_data(grps_attr);
  995. u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
  996. struct nh_group *nhg;
  997. struct nexthop *nh;
  998. int i;
  999. if (WARN_ON(!num_nh))
  1000. return ERR_PTR(-EINVAL);
  1001. nh = nexthop_alloc();
  1002. if (!nh)
  1003. return ERR_PTR(-ENOMEM);
  1004. nh->is_group = 1;
  1005. nhg = nexthop_grp_alloc(num_nh);
  1006. if (!nhg) {
  1007. kfree(nh);
  1008. return ERR_PTR(-ENOMEM);
  1009. }
  1010. /* spare group used for removals */
  1011. nhg->spare = nexthop_grp_alloc(num_nh);
  1012. if (!nhg->spare) {
  1013. kfree(nhg);
  1014. kfree(nh);
  1015. return ERR_PTR(-ENOMEM);
  1016. }
  1017. nhg->spare->spare = nhg;
  1018. for (i = 0; i < nhg->num_nh; ++i) {
  1019. struct nexthop *nhe;
  1020. struct nh_info *nhi;
  1021. nhe = nexthop_find_by_id(net, entry[i].id);
  1022. if (!nexthop_get(nhe))
  1023. goto out_no_nh;
  1024. nhi = rtnl_dereference(nhe->nh_info);
  1025. if (nhi->family == AF_INET)
  1026. nhg->has_v4 = true;
  1027. nhg->nh_entries[i].nh = nhe;
  1028. nhg->nh_entries[i].weight = entry[i].weight + 1;
  1029. list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
  1030. nhg->nh_entries[i].nh_parent = nh;
  1031. }
  1032. if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
  1033. nhg->mpath = 1;
  1034. nh_group_rebalance(nhg);
  1035. }
  1036. if (cfg->nh_fdb)
  1037. nhg->fdb_nh = 1;
  1038. rcu_assign_pointer(nh->nh_grp, nhg);
  1039. return nh;
  1040. out_no_nh:
  1041. for (i--; i >= 0; --i) {
  1042. list_del(&nhg->nh_entries[i].nh_list);
  1043. nexthop_put(nhg->nh_entries[i].nh);
  1044. }
  1045. kfree(nhg->spare);
  1046. kfree(nhg);
  1047. kfree(nh);
  1048. return ERR_PTR(-ENOENT);
  1049. }
  1050. static int nh_create_ipv4(struct net *net, struct nexthop *nh,
  1051. struct nh_info *nhi, struct nh_config *cfg,
  1052. struct netlink_ext_ack *extack)
  1053. {
  1054. struct fib_nh *fib_nh = &nhi->fib_nh;
  1055. struct fib_config fib_cfg = {
  1056. .fc_oif = cfg->nh_ifindex,
  1057. .fc_gw4 = cfg->gw.ipv4,
  1058. .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
  1059. .fc_flags = cfg->nh_flags,
  1060. .fc_nlinfo = cfg->nlinfo,
  1061. .fc_encap = cfg->nh_encap,
  1062. .fc_encap_type = cfg->nh_encap_type,
  1063. };
  1064. u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
  1065. int err;
  1066. err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
  1067. if (err) {
  1068. fib_nh_release(net, fib_nh);
  1069. goto out;
  1070. }
  1071. if (nhi->fdb_nh)
  1072. goto out;
  1073. /* sets nh_dev if successful */
  1074. err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
  1075. if (!err) {
  1076. nh->nh_flags = fib_nh->fib_nh_flags;
  1077. fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
  1078. fib_nh->fib_nh_scope);
  1079. } else {
  1080. fib_nh_release(net, fib_nh);
  1081. }
  1082. out:
  1083. return err;
  1084. }
  1085. static int nh_create_ipv6(struct net *net, struct nexthop *nh,
  1086. struct nh_info *nhi, struct nh_config *cfg,
  1087. struct netlink_ext_ack *extack)
  1088. {
  1089. struct fib6_nh *fib6_nh = &nhi->fib6_nh;
  1090. struct fib6_config fib6_cfg = {
  1091. .fc_table = l3mdev_fib_table(cfg->dev),
  1092. .fc_ifindex = cfg->nh_ifindex,
  1093. .fc_gateway = cfg->gw.ipv6,
  1094. .fc_flags = cfg->nh_flags,
  1095. .fc_nlinfo = cfg->nlinfo,
  1096. .fc_encap = cfg->nh_encap,
  1097. .fc_encap_type = cfg->nh_encap_type,
  1098. .fc_is_fdb = cfg->nh_fdb,
  1099. };
  1100. int err;
  1101. if (!ipv6_addr_any(&cfg->gw.ipv6))
  1102. fib6_cfg.fc_flags |= RTF_GATEWAY;
  1103. /* sets nh_dev if successful */
  1104. err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
  1105. extack);
  1106. if (err) {
  1107. /* IPv6 is not enabled, don't call fib6_nh_release */
  1108. if (err == -EAFNOSUPPORT)
  1109. goto out;
  1110. ipv6_stub->fib6_nh_release(fib6_nh);
  1111. } else {
  1112. nh->nh_flags = fib6_nh->fib_nh_flags;
  1113. }
  1114. out:
  1115. return err;
  1116. }
  1117. static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
  1118. struct netlink_ext_ack *extack)
  1119. {
  1120. struct nh_info *nhi;
  1121. struct nexthop *nh;
  1122. int err = 0;
  1123. nh = nexthop_alloc();
  1124. if (!nh)
  1125. return ERR_PTR(-ENOMEM);
  1126. nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
  1127. if (!nhi) {
  1128. kfree(nh);
  1129. return ERR_PTR(-ENOMEM);
  1130. }
  1131. nh->nh_flags = cfg->nh_flags;
  1132. nh->net = net;
  1133. nhi->nh_parent = nh;
  1134. nhi->family = cfg->nh_family;
  1135. nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
  1136. if (cfg->nh_fdb)
  1137. nhi->fdb_nh = 1;
  1138. if (cfg->nh_blackhole) {
  1139. nhi->reject_nh = 1;
  1140. cfg->nh_ifindex = net->loopback_dev->ifindex;
  1141. }
  1142. switch (cfg->nh_family) {
  1143. case AF_INET:
  1144. err = nh_create_ipv4(net, nh, nhi, cfg, extack);
  1145. break;
  1146. case AF_INET6:
  1147. err = nh_create_ipv6(net, nh, nhi, cfg, extack);
  1148. break;
  1149. }
  1150. if (err) {
  1151. kfree(nhi);
  1152. kfree(nh);
  1153. return ERR_PTR(err);
  1154. }
  1155. /* add the entry to the device based hash */
  1156. if (!nhi->fdb_nh)
  1157. nexthop_devhash_add(net, nhi);
  1158. rcu_assign_pointer(nh->nh_info, nhi);
  1159. return nh;
  1160. }
  1161. /* called with rtnl lock held */
  1162. static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
  1163. struct netlink_ext_ack *extack)
  1164. {
  1165. struct nexthop *nh;
  1166. int err;
  1167. if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
  1168. NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
  1169. return ERR_PTR(-EINVAL);
  1170. }
  1171. if (!cfg->nh_id) {
  1172. cfg->nh_id = nh_find_unused_id(net);
  1173. if (!cfg->nh_id) {
  1174. NL_SET_ERR_MSG(extack, "No unused id");
  1175. return ERR_PTR(-EINVAL);
  1176. }
  1177. }
  1178. if (cfg->nh_grp)
  1179. nh = nexthop_create_group(net, cfg);
  1180. else
  1181. nh = nexthop_create(net, cfg, extack);
  1182. if (IS_ERR(nh))
  1183. return nh;
  1184. refcount_set(&nh->refcnt, 1);
  1185. nh->id = cfg->nh_id;
  1186. nh->protocol = cfg->nh_protocol;
  1187. nh->net = net;
  1188. err = insert_nexthop(net, nh, cfg, extack);
  1189. if (err) {
  1190. __remove_nexthop(net, nh, NULL);
  1191. nexthop_put(nh);
  1192. nh = ERR_PTR(err);
  1193. }
  1194. return nh;
  1195. }
  1196. static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
  1197. struct nlmsghdr *nlh, struct nh_config *cfg,
  1198. struct netlink_ext_ack *extack)
  1199. {
  1200. struct nhmsg *nhm = nlmsg_data(nlh);
  1201. struct nlattr *tb[NHA_MAX + 1];
  1202. int err;
  1203. err = nlmsg_parse(nlh, sizeof(*nhm), tb, NHA_MAX, rtm_nh_policy,
  1204. extack);
  1205. if (err < 0)
  1206. return err;
  1207. err = -EINVAL;
  1208. if (nhm->resvd || nhm->nh_scope) {
  1209. NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
  1210. goto out;
  1211. }
  1212. if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
  1213. NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
  1214. goto out;
  1215. }
  1216. switch (nhm->nh_family) {
  1217. case AF_INET:
  1218. case AF_INET6:
  1219. break;
  1220. case AF_UNSPEC:
  1221. if (tb[NHA_GROUP])
  1222. break;
  1223. fallthrough;
  1224. default:
  1225. NL_SET_ERR_MSG(extack, "Invalid address family");
  1226. goto out;
  1227. }
  1228. if (tb[NHA_GROUPS] || tb[NHA_MASTER]) {
  1229. NL_SET_ERR_MSG(extack, "Invalid attributes in request");
  1230. goto out;
  1231. }
  1232. memset(cfg, 0, sizeof(*cfg));
  1233. cfg->nlflags = nlh->nlmsg_flags;
  1234. cfg->nlinfo.portid = NETLINK_CB(skb).portid;
  1235. cfg->nlinfo.nlh = nlh;
  1236. cfg->nlinfo.nl_net = net;
  1237. cfg->nh_family = nhm->nh_family;
  1238. cfg->nh_protocol = nhm->nh_protocol;
  1239. cfg->nh_flags = nhm->nh_flags;
  1240. if (tb[NHA_ID])
  1241. cfg->nh_id = nla_get_u32(tb[NHA_ID]);
  1242. if (tb[NHA_FDB]) {
  1243. if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
  1244. tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
  1245. NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
  1246. goto out;
  1247. }
  1248. if (nhm->nh_flags) {
  1249. NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
  1250. goto out;
  1251. }
  1252. cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
  1253. }
  1254. if (tb[NHA_GROUP]) {
  1255. if (nhm->nh_family != AF_UNSPEC) {
  1256. NL_SET_ERR_MSG(extack, "Invalid family for group");
  1257. goto out;
  1258. }
  1259. cfg->nh_grp = tb[NHA_GROUP];
  1260. cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
  1261. if (tb[NHA_GROUP_TYPE])
  1262. cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
  1263. if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
  1264. NL_SET_ERR_MSG(extack, "Invalid group type");
  1265. goto out;
  1266. }
  1267. err = nh_check_attr_group(net, tb, extack);
  1268. /* no other attributes should be set */
  1269. goto out;
  1270. }
  1271. if (tb[NHA_BLACKHOLE]) {
  1272. if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
  1273. tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
  1274. NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
  1275. goto out;
  1276. }
  1277. cfg->nh_blackhole = 1;
  1278. err = 0;
  1279. goto out;
  1280. }
  1281. if (!cfg->nh_fdb && !tb[NHA_OIF]) {
  1282. NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
  1283. goto out;
  1284. }
  1285. if (!cfg->nh_fdb && tb[NHA_OIF]) {
  1286. cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
  1287. if (cfg->nh_ifindex)
  1288. cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
  1289. if (!cfg->dev) {
  1290. NL_SET_ERR_MSG(extack, "Invalid device index");
  1291. goto out;
  1292. } else if (!(cfg->dev->flags & IFF_UP)) {
  1293. NL_SET_ERR_MSG(extack, "Nexthop device is not up");
  1294. err = -ENETDOWN;
  1295. goto out;
  1296. } else if (!netif_carrier_ok(cfg->dev)) {
  1297. NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
  1298. err = -ENETDOWN;
  1299. goto out;
  1300. }
  1301. }
  1302. err = -EINVAL;
  1303. if (tb[NHA_GATEWAY]) {
  1304. struct nlattr *gwa = tb[NHA_GATEWAY];
  1305. switch (cfg->nh_family) {
  1306. case AF_INET:
  1307. if (nla_len(gwa) != sizeof(u32)) {
  1308. NL_SET_ERR_MSG(extack, "Invalid gateway");
  1309. goto out;
  1310. }
  1311. cfg->gw.ipv4 = nla_get_be32(gwa);
  1312. break;
  1313. case AF_INET6:
  1314. if (nla_len(gwa) != sizeof(struct in6_addr)) {
  1315. NL_SET_ERR_MSG(extack, "Invalid gateway");
  1316. goto out;
  1317. }
  1318. cfg->gw.ipv6 = nla_get_in6_addr(gwa);
  1319. break;
  1320. default:
  1321. NL_SET_ERR_MSG(extack,
  1322. "Unknown address family for gateway");
  1323. goto out;
  1324. }
  1325. } else {
  1326. /* device only nexthop (no gateway) */
  1327. if (cfg->nh_flags & RTNH_F_ONLINK) {
  1328. NL_SET_ERR_MSG(extack,
  1329. "ONLINK flag can not be set for nexthop without a gateway");
  1330. goto out;
  1331. }
  1332. }
  1333. if (tb[NHA_ENCAP]) {
  1334. cfg->nh_encap = tb[NHA_ENCAP];
  1335. if (!tb[NHA_ENCAP_TYPE]) {
  1336. NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
  1337. goto out;
  1338. }
  1339. cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
  1340. err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
  1341. if (err < 0)
  1342. goto out;
  1343. } else if (tb[NHA_ENCAP_TYPE]) {
  1344. NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
  1345. goto out;
  1346. }
  1347. err = 0;
  1348. out:
  1349. return err;
  1350. }
  1351. /* rtnl */
  1352. static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
  1353. struct netlink_ext_ack *extack)
  1354. {
  1355. struct net *net = sock_net(skb->sk);
  1356. struct nh_config cfg;
  1357. struct nexthop *nh;
  1358. int err;
  1359. err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
  1360. if (!err) {
  1361. nh = nexthop_add(net, &cfg, extack);
  1362. if (IS_ERR(nh))
  1363. err = PTR_ERR(nh);
  1364. }
  1365. return err;
  1366. }
  1367. static int nh_valid_get_del_req(struct nlmsghdr *nlh, u32 *id,
  1368. struct netlink_ext_ack *extack)
  1369. {
  1370. struct nhmsg *nhm = nlmsg_data(nlh);
  1371. struct nlattr *tb[NHA_MAX + 1];
  1372. int err, i;
  1373. err = nlmsg_parse(nlh, sizeof(*nhm), tb, NHA_MAX, rtm_nh_policy,
  1374. extack);
  1375. if (err < 0)
  1376. return err;
  1377. err = -EINVAL;
  1378. for (i = 0; i < __NHA_MAX; ++i) {
  1379. if (!tb[i])
  1380. continue;
  1381. switch (i) {
  1382. case NHA_ID:
  1383. break;
  1384. default:
  1385. NL_SET_ERR_MSG_ATTR(extack, tb[i],
  1386. "Unexpected attribute in request");
  1387. goto out;
  1388. }
  1389. }
  1390. if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
  1391. NL_SET_ERR_MSG(extack, "Invalid values in header");
  1392. goto out;
  1393. }
  1394. if (!tb[NHA_ID]) {
  1395. NL_SET_ERR_MSG(extack, "Nexthop id is missing");
  1396. goto out;
  1397. }
  1398. *id = nla_get_u32(tb[NHA_ID]);
  1399. if (!(*id))
  1400. NL_SET_ERR_MSG(extack, "Invalid nexthop id");
  1401. else
  1402. err = 0;
  1403. out:
  1404. return err;
  1405. }
  1406. /* rtnl */
  1407. static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
  1408. struct netlink_ext_ack *extack)
  1409. {
  1410. struct net *net = sock_net(skb->sk);
  1411. struct nl_info nlinfo = {
  1412. .nlh = nlh,
  1413. .nl_net = net,
  1414. .portid = NETLINK_CB(skb).portid,
  1415. };
  1416. struct nexthop *nh;
  1417. int err;
  1418. u32 id;
  1419. err = nh_valid_get_del_req(nlh, &id, extack);
  1420. if (err)
  1421. return err;
  1422. nh = nexthop_find_by_id(net, id);
  1423. if (!nh)
  1424. return -ENOENT;
  1425. remove_nexthop(net, nh, &nlinfo);
  1426. return 0;
  1427. }
  1428. /* rtnl */
  1429. static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
  1430. struct netlink_ext_ack *extack)
  1431. {
  1432. struct net *net = sock_net(in_skb->sk);
  1433. struct sk_buff *skb = NULL;
  1434. struct nexthop *nh;
  1435. int err;
  1436. u32 id;
  1437. err = nh_valid_get_del_req(nlh, &id, extack);
  1438. if (err)
  1439. return err;
  1440. err = -ENOBUFS;
  1441. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  1442. if (!skb)
  1443. goto out;
  1444. err = -ENOENT;
  1445. nh = nexthop_find_by_id(net, id);
  1446. if (!nh)
  1447. goto errout_free;
  1448. err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
  1449. nlh->nlmsg_seq, 0);
  1450. if (err < 0) {
  1451. WARN_ON(err == -EMSGSIZE);
  1452. goto errout_free;
  1453. }
  1454. err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
  1455. out:
  1456. return err;
  1457. errout_free:
  1458. kfree_skb(skb);
  1459. goto out;
  1460. }
  1461. static bool nh_dump_filtered(struct nexthop *nh, int dev_idx, int master_idx,
  1462. bool group_filter, u8 family)
  1463. {
  1464. const struct net_device *dev;
  1465. const struct nh_info *nhi;
  1466. if (group_filter && !nh->is_group)
  1467. return true;
  1468. if (!dev_idx && !master_idx && !family)
  1469. return false;
  1470. if (nh->is_group)
  1471. return true;
  1472. nhi = rtnl_dereference(nh->nh_info);
  1473. if (family && nhi->family != family)
  1474. return true;
  1475. dev = nhi->fib_nhc.nhc_dev;
  1476. if (dev_idx && (!dev || dev->ifindex != dev_idx))
  1477. return true;
  1478. if (master_idx) {
  1479. struct net_device *master;
  1480. if (!dev)
  1481. return true;
  1482. master = netdev_master_upper_dev_get((struct net_device *)dev);
  1483. if (!master || master->ifindex != master_idx)
  1484. return true;
  1485. }
  1486. return false;
  1487. }
  1488. static int nh_valid_dump_req(const struct nlmsghdr *nlh, int *dev_idx,
  1489. int *master_idx, bool *group_filter,
  1490. bool *fdb_filter, struct netlink_callback *cb)
  1491. {
  1492. struct netlink_ext_ack *extack = cb->extack;
  1493. struct nlattr *tb[NHA_MAX + 1];
  1494. struct nhmsg *nhm;
  1495. int err, i;
  1496. u32 idx;
  1497. err = nlmsg_parse(nlh, sizeof(*nhm), tb, NHA_MAX, rtm_nh_policy,
  1498. NULL);
  1499. if (err < 0)
  1500. return err;
  1501. for (i = 0; i <= NHA_MAX; ++i) {
  1502. if (!tb[i])
  1503. continue;
  1504. switch (i) {
  1505. case NHA_OIF:
  1506. idx = nla_get_u32(tb[i]);
  1507. if (idx > INT_MAX) {
  1508. NL_SET_ERR_MSG(extack, "Invalid device index");
  1509. return -EINVAL;
  1510. }
  1511. *dev_idx = idx;
  1512. break;
  1513. case NHA_MASTER:
  1514. idx = nla_get_u32(tb[i]);
  1515. if (idx > INT_MAX) {
  1516. NL_SET_ERR_MSG(extack, "Invalid master device index");
  1517. return -EINVAL;
  1518. }
  1519. *master_idx = idx;
  1520. break;
  1521. case NHA_GROUPS:
  1522. *group_filter = true;
  1523. break;
  1524. case NHA_FDB:
  1525. *fdb_filter = true;
  1526. break;
  1527. default:
  1528. NL_SET_ERR_MSG(extack, "Unsupported attribute in dump request");
  1529. return -EINVAL;
  1530. }
  1531. }
  1532. nhm = nlmsg_data(nlh);
  1533. if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
  1534. NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
  1535. return -EINVAL;
  1536. }
  1537. return 0;
  1538. }
  1539. /* rtnl */
  1540. static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
  1541. {
  1542. bool group_filter = false, fdb_filter = false;
  1543. struct nhmsg *nhm = nlmsg_data(cb->nlh);
  1544. int dev_filter_idx = 0, master_idx = 0;
  1545. struct net *net = sock_net(skb->sk);
  1546. struct rb_root *root = &net->nexthop.rb_root;
  1547. struct rb_node *node;
  1548. int idx = 0, s_idx;
  1549. int err;
  1550. err = nh_valid_dump_req(cb->nlh, &dev_filter_idx, &master_idx,
  1551. &group_filter, &fdb_filter, cb);
  1552. if (err < 0)
  1553. return err;
  1554. s_idx = cb->args[0];
  1555. for (node = rb_first(root); node; node = rb_next(node)) {
  1556. struct nexthop *nh;
  1557. if (idx < s_idx)
  1558. goto cont;
  1559. nh = rb_entry(node, struct nexthop, rb_node);
  1560. if (nh_dump_filtered(nh, dev_filter_idx, master_idx,
  1561. group_filter, nhm->nh_family))
  1562. goto cont;
  1563. err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
  1564. NETLINK_CB(cb->skb).portid,
  1565. cb->nlh->nlmsg_seq, NLM_F_MULTI);
  1566. if (err < 0) {
  1567. if (likely(skb->len))
  1568. goto out;
  1569. goto out_err;
  1570. }
  1571. cont:
  1572. idx++;
  1573. }
  1574. out:
  1575. err = skb->len;
  1576. out_err:
  1577. cb->args[0] = idx;
  1578. cb->seq = net->nexthop.seq;
  1579. nl_dump_check_consistent(cb, nlmsg_hdr(skb));
  1580. return err;
  1581. }
  1582. static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
  1583. {
  1584. unsigned int hash = nh_dev_hashfn(dev->ifindex);
  1585. struct net *net = dev_net(dev);
  1586. struct hlist_head *head = &net->nexthop.devhash[hash];
  1587. struct hlist_node *n;
  1588. struct nh_info *nhi;
  1589. hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
  1590. if (nhi->fib_nhc.nhc_dev == dev) {
  1591. if (nhi->family == AF_INET)
  1592. fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
  1593. orig_mtu);
  1594. }
  1595. }
  1596. }
  1597. /* rtnl */
  1598. static int nh_netdev_event(struct notifier_block *this,
  1599. unsigned long event, void *ptr)
  1600. {
  1601. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1602. struct netdev_notifier_info_ext *info_ext;
  1603. switch (event) {
  1604. case NETDEV_DOWN:
  1605. case NETDEV_UNREGISTER:
  1606. nexthop_flush_dev(dev, event);
  1607. break;
  1608. case NETDEV_CHANGE:
  1609. if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
  1610. nexthop_flush_dev(dev, event);
  1611. break;
  1612. case NETDEV_CHANGEMTU:
  1613. info_ext = ptr;
  1614. nexthop_sync_mtu(dev, info_ext->ext.mtu);
  1615. rt_cache_flush(dev_net(dev));
  1616. break;
  1617. }
  1618. return NOTIFY_DONE;
  1619. }
  1620. static struct notifier_block nh_netdev_notifier = {
  1621. .notifier_call = nh_netdev_event,
  1622. };
  1623. int register_nexthop_notifier(struct net *net, struct notifier_block *nb)
  1624. {
  1625. return blocking_notifier_chain_register(&net->nexthop.notifier_chain,
  1626. nb);
  1627. }
  1628. EXPORT_SYMBOL(register_nexthop_notifier);
  1629. int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
  1630. {
  1631. return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
  1632. nb);
  1633. }
  1634. EXPORT_SYMBOL(unregister_nexthop_notifier);
  1635. static void __net_exit nexthop_net_exit(struct net *net)
  1636. {
  1637. rtnl_lock();
  1638. flush_all_nexthops(net);
  1639. rtnl_unlock();
  1640. kfree(net->nexthop.devhash);
  1641. }
  1642. static int __net_init nexthop_net_init(struct net *net)
  1643. {
  1644. size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
  1645. net->nexthop.rb_root = RB_ROOT;
  1646. net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
  1647. if (!net->nexthop.devhash)
  1648. return -ENOMEM;
  1649. BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
  1650. return 0;
  1651. }
  1652. static struct pernet_operations nexthop_net_ops = {
  1653. .init = nexthop_net_init,
  1654. .exit = nexthop_net_exit,
  1655. };
  1656. static int __init nexthop_init(void)
  1657. {
  1658. register_pernet_subsys(&nexthop_net_ops);
  1659. register_netdevice_notifier(&nh_netdev_notifier);
  1660. rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
  1661. rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
  1662. rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
  1663. rtm_dump_nexthop, 0);
  1664. rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
  1665. rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
  1666. rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
  1667. rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
  1668. return 0;
  1669. }
  1670. subsys_initcall(nexthop_init);