xfrm_policy.c 107 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * xfrm_policy.c
  4. *
  5. * Changes:
  6. * Mitsuru KANDA @USAGI
  7. * Kazunori MIYAZAWA @USAGI
  8. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  9. * IPv6 support
  10. * Kazunori MIYAZAWA @USAGI
  11. * YOSHIFUJI Hideaki
  12. * Split up af-specific portion
  13. * Derek Atkins <derek@ihtfp.com> Add the post_input processor
  14. *
  15. */
  16. #include <linux/err.h>
  17. #include <linux/slab.h>
  18. #include <linux/kmod.h>
  19. #include <linux/list.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/notifier.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/netfilter.h>
  25. #include <linux/module.h>
  26. #include <linux/cache.h>
  27. #include <linux/cpu.h>
  28. #include <linux/audit.h>
  29. #include <linux/rhashtable.h>
  30. #include <linux/if_tunnel.h>
  31. #include <net/dst.h>
  32. #include <net/flow.h>
  33. #ifndef __GENKSYMS__
  34. #include <net/inet_ecn.h>
  35. #endif
  36. #include <net/xfrm.h>
  37. #include <net/ip.h>
  38. #ifndef __GENKSYMS__
  39. #include <net/gre.h>
  40. #endif
  41. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  42. #include <net/mip6.h>
  43. #endif
  44. #ifdef CONFIG_XFRM_STATISTICS
  45. #include <net/snmp.h>
  46. #endif
  47. #ifdef CONFIG_XFRM_ESPINTCP
  48. #include <net/espintcp.h>
  49. #endif
  50. #include "xfrm_hash.h"
  51. #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
  52. #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
  53. #define XFRM_MAX_QUEUE_LEN 100
  54. struct xfrm_flo {
  55. struct dst_entry *dst_orig;
  56. u8 flags;
  57. };
  58. /* prefixes smaller than this are stored in lists, not trees. */
  59. #define INEXACT_PREFIXLEN_IPV4 16
  60. #define INEXACT_PREFIXLEN_IPV6 48
  61. struct xfrm_pol_inexact_node {
  62. struct rb_node node;
  63. union {
  64. xfrm_address_t addr;
  65. struct rcu_head rcu;
  66. };
  67. u8 prefixlen;
  68. struct rb_root root;
  69. /* the policies matching this node, can be empty list */
  70. struct hlist_head hhead;
  71. };
  72. /* xfrm inexact policy search tree:
  73. * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
  74. * |
  75. * +---- root_d: sorted by daddr:prefix
  76. * | |
  77. * | xfrm_pol_inexact_node
  78. * | |
  79. * | +- root: sorted by saddr/prefix
  80. * | | |
  81. * | | xfrm_pol_inexact_node
  82. * | | |
  83. * | | + root: unused
  84. * | | |
  85. * | | + hhead: saddr:daddr policies
  86. * | |
  87. * | +- coarse policies and all any:daddr policies
  88. * |
  89. * +---- root_s: sorted by saddr:prefix
  90. * | |
  91. * | xfrm_pol_inexact_node
  92. * | |
  93. * | + root: unused
  94. * | |
  95. * | + hhead: saddr:any policies
  96. * |
  97. * +---- coarse policies and all any:any policies
  98. *
  99. * Lookups return four candidate lists:
  100. * 1. any:any list from top-level xfrm_pol_inexact_bin
  101. * 2. any:daddr list from daddr tree
  102. * 3. saddr:daddr list from 2nd level daddr tree
  103. * 4. saddr:any list from saddr tree
  104. *
  105. * This result set then needs to be searched for the policy with
  106. * the lowest priority. If two results have same prio, youngest one wins.
  107. */
  108. struct xfrm_pol_inexact_key {
  109. possible_net_t net;
  110. u32 if_id;
  111. u16 family;
  112. u8 dir, type;
  113. };
  114. struct xfrm_pol_inexact_bin {
  115. struct xfrm_pol_inexact_key k;
  116. struct rhash_head head;
  117. /* list containing '*:*' policies */
  118. struct hlist_head hhead;
  119. seqcount_spinlock_t count;
  120. /* tree sorted by daddr/prefix */
  121. struct rb_root root_d;
  122. /* tree sorted by saddr/prefix */
  123. struct rb_root root_s;
  124. /* slow path below */
  125. struct list_head inexact_bins;
  126. struct rcu_head rcu;
  127. };
  128. enum xfrm_pol_inexact_candidate_type {
  129. XFRM_POL_CAND_BOTH,
  130. XFRM_POL_CAND_SADDR,
  131. XFRM_POL_CAND_DADDR,
  132. XFRM_POL_CAND_ANY,
  133. XFRM_POL_CAND_MAX,
  134. };
  135. struct xfrm_pol_inexact_candidates {
  136. struct hlist_head *res[XFRM_POL_CAND_MAX];
  137. };
  138. static DEFINE_SPINLOCK(xfrm_if_cb_lock);
  139. static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
  140. static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
  141. static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
  142. __read_mostly;
  143. static struct kmem_cache *xfrm_dst_cache __ro_after_init;
  144. static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
  145. static struct rhashtable xfrm_policy_inexact_table;
  146. static const struct rhashtable_params xfrm_pol_inexact_params;
  147. static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
  148. static int stale_bundle(struct dst_entry *dst);
  149. static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  150. static void xfrm_policy_queue_process(struct timer_list *t);
  151. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
  152. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  153. int dir);
  154. static struct xfrm_pol_inexact_bin *
  155. xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
  156. u32 if_id);
  157. static struct xfrm_pol_inexact_bin *
  158. xfrm_policy_inexact_lookup_rcu(struct net *net,
  159. u8 type, u16 family, u8 dir, u32 if_id);
  160. static struct xfrm_policy *
  161. xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
  162. bool excl);
  163. static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
  164. struct xfrm_policy *policy);
  165. static bool
  166. xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
  167. struct xfrm_pol_inexact_bin *b,
  168. const xfrm_address_t *saddr,
  169. const xfrm_address_t *daddr);
  170. static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
  171. {
  172. return refcount_inc_not_zero(&policy->refcnt);
  173. }
  174. static inline bool
  175. __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  176. {
  177. const struct flowi4 *fl4 = &fl->u.ip4;
  178. return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
  179. addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
  180. !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
  181. !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
  182. (fl4->flowi4_proto == sel->proto || !sel->proto) &&
  183. (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
  184. }
  185. static inline bool
  186. __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
  187. {
  188. const struct flowi6 *fl6 = &fl->u.ip6;
  189. return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
  190. addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
  191. !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
  192. !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
  193. (fl6->flowi6_proto == sel->proto || !sel->proto) &&
  194. (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
  195. }
  196. bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
  197. unsigned short family)
  198. {
  199. switch (family) {
  200. case AF_INET:
  201. return __xfrm4_selector_match(sel, fl);
  202. case AF_INET6:
  203. return __xfrm6_selector_match(sel, fl);
  204. }
  205. return false;
  206. }
  207. static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
  208. {
  209. const struct xfrm_policy_afinfo *afinfo;
  210. if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
  211. return NULL;
  212. rcu_read_lock();
  213. afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
  214. if (unlikely(!afinfo))
  215. rcu_read_unlock();
  216. return afinfo;
  217. }
  218. /* Called with rcu_read_lock(). */
  219. static const struct xfrm_if_cb *xfrm_if_get_cb(void)
  220. {
  221. return rcu_dereference(xfrm_if_cb);
  222. }
  223. struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
  224. const xfrm_address_t *saddr,
  225. const xfrm_address_t *daddr,
  226. int family, u32 mark)
  227. {
  228. const struct xfrm_policy_afinfo *afinfo;
  229. struct dst_entry *dst;
  230. afinfo = xfrm_policy_get_afinfo(family);
  231. if (unlikely(afinfo == NULL))
  232. return ERR_PTR(-EAFNOSUPPORT);
  233. dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
  234. rcu_read_unlock();
  235. return dst;
  236. }
  237. EXPORT_SYMBOL(__xfrm_dst_lookup);
  238. static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
  239. int tos, int oif,
  240. xfrm_address_t *prev_saddr,
  241. xfrm_address_t *prev_daddr,
  242. int family, u32 mark)
  243. {
  244. struct net *net = xs_net(x);
  245. xfrm_address_t *saddr = &x->props.saddr;
  246. xfrm_address_t *daddr = &x->id.daddr;
  247. struct dst_entry *dst;
  248. if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
  249. saddr = x->coaddr;
  250. daddr = prev_daddr;
  251. }
  252. if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
  253. saddr = prev_saddr;
  254. daddr = x->coaddr;
  255. }
  256. dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
  257. if (!IS_ERR(dst)) {
  258. if (prev_saddr != saddr)
  259. memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
  260. if (prev_daddr != daddr)
  261. memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
  262. }
  263. return dst;
  264. }
  265. static inline unsigned long make_jiffies(long secs)
  266. {
  267. if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
  268. return MAX_SCHEDULE_TIMEOUT-1;
  269. else
  270. return secs*HZ;
  271. }
  272. static void xfrm_policy_timer(struct timer_list *t)
  273. {
  274. struct xfrm_policy *xp = from_timer(xp, t, timer);
  275. time64_t now = ktime_get_real_seconds();
  276. time64_t next = TIME64_MAX;
  277. int warn = 0;
  278. int dir;
  279. read_lock(&xp->lock);
  280. if (unlikely(xp->walk.dead))
  281. goto out;
  282. dir = xfrm_policy_id2dir(xp->index);
  283. if (xp->lft.hard_add_expires_seconds) {
  284. time64_t tmo = xp->lft.hard_add_expires_seconds +
  285. xp->curlft.add_time - now;
  286. if (tmo <= 0)
  287. goto expired;
  288. if (tmo < next)
  289. next = tmo;
  290. }
  291. if (xp->lft.hard_use_expires_seconds) {
  292. time64_t tmo = xp->lft.hard_use_expires_seconds +
  293. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  294. if (tmo <= 0)
  295. goto expired;
  296. if (tmo < next)
  297. next = tmo;
  298. }
  299. if (xp->lft.soft_add_expires_seconds) {
  300. time64_t tmo = xp->lft.soft_add_expires_seconds +
  301. xp->curlft.add_time - now;
  302. if (tmo <= 0) {
  303. warn = 1;
  304. tmo = XFRM_KM_TIMEOUT;
  305. }
  306. if (tmo < next)
  307. next = tmo;
  308. }
  309. if (xp->lft.soft_use_expires_seconds) {
  310. time64_t tmo = xp->lft.soft_use_expires_seconds +
  311. (xp->curlft.use_time ? : xp->curlft.add_time) - now;
  312. if (tmo <= 0) {
  313. warn = 1;
  314. tmo = XFRM_KM_TIMEOUT;
  315. }
  316. if (tmo < next)
  317. next = tmo;
  318. }
  319. if (warn)
  320. km_policy_expired(xp, dir, 0, 0);
  321. if (next != TIME64_MAX &&
  322. !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
  323. xfrm_pol_hold(xp);
  324. out:
  325. read_unlock(&xp->lock);
  326. xfrm_pol_put(xp);
  327. return;
  328. expired:
  329. read_unlock(&xp->lock);
  330. if (!xfrm_policy_delete(xp, dir))
  331. km_policy_expired(xp, dir, 1, 0);
  332. xfrm_pol_put(xp);
  333. }
  334. /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
  335. * SPD calls.
  336. */
  337. struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
  338. {
  339. struct xfrm_policy *policy;
  340. policy = kzalloc(sizeof(struct xfrm_policy), gfp);
  341. if (policy) {
  342. write_pnet(&policy->xp_net, net);
  343. INIT_LIST_HEAD(&policy->walk.all);
  344. INIT_HLIST_NODE(&policy->bydst_inexact_list);
  345. INIT_HLIST_NODE(&policy->bydst);
  346. INIT_HLIST_NODE(&policy->byidx);
  347. rwlock_init(&policy->lock);
  348. refcount_set(&policy->refcnt, 1);
  349. skb_queue_head_init(&policy->polq.hold_queue);
  350. timer_setup(&policy->timer, xfrm_policy_timer, 0);
  351. timer_setup(&policy->polq.hold_timer,
  352. xfrm_policy_queue_process, 0);
  353. }
  354. return policy;
  355. }
  356. EXPORT_SYMBOL(xfrm_policy_alloc);
  357. static void xfrm_policy_destroy_rcu(struct rcu_head *head)
  358. {
  359. struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
  360. security_xfrm_policy_free(policy->security);
  361. kfree(policy);
  362. }
  363. /* Destroy xfrm_policy: descendant resources must be released to this moment. */
  364. void xfrm_policy_destroy(struct xfrm_policy *policy)
  365. {
  366. BUG_ON(!policy->walk.dead);
  367. if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
  368. BUG();
  369. call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
  370. }
  371. EXPORT_SYMBOL(xfrm_policy_destroy);
  372. /* Rule must be locked. Release descendant resources, announce
  373. * entry dead. The rule must be unlinked from lists to the moment.
  374. */
  375. static void xfrm_policy_kill(struct xfrm_policy *policy)
  376. {
  377. write_lock_bh(&policy->lock);
  378. policy->walk.dead = 1;
  379. write_unlock_bh(&policy->lock);
  380. atomic_inc(&policy->genid);
  381. if (del_timer(&policy->polq.hold_timer))
  382. xfrm_pol_put(policy);
  383. skb_queue_purge(&policy->polq.hold_queue);
  384. if (del_timer(&policy->timer))
  385. xfrm_pol_put(policy);
  386. xfrm_pol_put(policy);
  387. }
  388. static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
  389. static inline unsigned int idx_hash(struct net *net, u32 index)
  390. {
  391. return __idx_hash(index, net->xfrm.policy_idx_hmask);
  392. }
  393. /* calculate policy hash thresholds */
  394. static void __get_hash_thresh(struct net *net,
  395. unsigned short family, int dir,
  396. u8 *dbits, u8 *sbits)
  397. {
  398. switch (family) {
  399. case AF_INET:
  400. *dbits = net->xfrm.policy_bydst[dir].dbits4;
  401. *sbits = net->xfrm.policy_bydst[dir].sbits4;
  402. break;
  403. case AF_INET6:
  404. *dbits = net->xfrm.policy_bydst[dir].dbits6;
  405. *sbits = net->xfrm.policy_bydst[dir].sbits6;
  406. break;
  407. default:
  408. *dbits = 0;
  409. *sbits = 0;
  410. }
  411. }
  412. static struct hlist_head *policy_hash_bysel(struct net *net,
  413. const struct xfrm_selector *sel,
  414. unsigned short family, int dir)
  415. {
  416. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  417. unsigned int hash;
  418. u8 dbits;
  419. u8 sbits;
  420. __get_hash_thresh(net, family, dir, &dbits, &sbits);
  421. hash = __sel_hash(sel, family, hmask, dbits, sbits);
  422. if (hash == hmask + 1)
  423. return NULL;
  424. return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
  425. lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
  426. }
  427. static struct hlist_head *policy_hash_direct(struct net *net,
  428. const xfrm_address_t *daddr,
  429. const xfrm_address_t *saddr,
  430. unsigned short family, int dir)
  431. {
  432. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  433. unsigned int hash;
  434. u8 dbits;
  435. u8 sbits;
  436. __get_hash_thresh(net, family, dir, &dbits, &sbits);
  437. hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
  438. return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
  439. lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
  440. }
  441. static void xfrm_dst_hash_transfer(struct net *net,
  442. struct hlist_head *list,
  443. struct hlist_head *ndsttable,
  444. unsigned int nhashmask,
  445. int dir)
  446. {
  447. struct hlist_node *tmp, *entry0 = NULL;
  448. struct xfrm_policy *pol;
  449. unsigned int h0 = 0;
  450. u8 dbits;
  451. u8 sbits;
  452. redo:
  453. hlist_for_each_entry_safe(pol, tmp, list, bydst) {
  454. unsigned int h;
  455. __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
  456. h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
  457. pol->family, nhashmask, dbits, sbits);
  458. if (!entry0) {
  459. hlist_del_rcu(&pol->bydst);
  460. hlist_add_head_rcu(&pol->bydst, ndsttable + h);
  461. h0 = h;
  462. } else {
  463. if (h != h0)
  464. continue;
  465. hlist_del_rcu(&pol->bydst);
  466. hlist_add_behind_rcu(&pol->bydst, entry0);
  467. }
  468. entry0 = &pol->bydst;
  469. }
  470. if (!hlist_empty(list)) {
  471. entry0 = NULL;
  472. goto redo;
  473. }
  474. }
  475. static void xfrm_idx_hash_transfer(struct hlist_head *list,
  476. struct hlist_head *nidxtable,
  477. unsigned int nhashmask)
  478. {
  479. struct hlist_node *tmp;
  480. struct xfrm_policy *pol;
  481. hlist_for_each_entry_safe(pol, tmp, list, byidx) {
  482. unsigned int h;
  483. h = __idx_hash(pol->index, nhashmask);
  484. hlist_add_head(&pol->byidx, nidxtable+h);
  485. }
  486. }
  487. static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
  488. {
  489. return ((old_hmask + 1) << 1) - 1;
  490. }
  491. static void xfrm_bydst_resize(struct net *net, int dir)
  492. {
  493. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  494. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  495. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  496. struct hlist_head *ndst = xfrm_hash_alloc(nsize);
  497. struct hlist_head *odst;
  498. int i;
  499. if (!ndst)
  500. return;
  501. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  502. write_seqcount_begin(&xfrm_policy_hash_generation);
  503. odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
  504. lockdep_is_held(&net->xfrm.xfrm_policy_lock));
  505. for (i = hmask; i >= 0; i--)
  506. xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
  507. rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
  508. net->xfrm.policy_bydst[dir].hmask = nhashmask;
  509. write_seqcount_end(&xfrm_policy_hash_generation);
  510. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  511. synchronize_rcu();
  512. xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
  513. }
  514. static void xfrm_byidx_resize(struct net *net, int total)
  515. {
  516. unsigned int hmask = net->xfrm.policy_idx_hmask;
  517. unsigned int nhashmask = xfrm_new_hash_mask(hmask);
  518. unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
  519. struct hlist_head *oidx = net->xfrm.policy_byidx;
  520. struct hlist_head *nidx = xfrm_hash_alloc(nsize);
  521. int i;
  522. if (!nidx)
  523. return;
  524. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  525. for (i = hmask; i >= 0; i--)
  526. xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
  527. net->xfrm.policy_byidx = nidx;
  528. net->xfrm.policy_idx_hmask = nhashmask;
  529. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  530. xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
  531. }
  532. static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
  533. {
  534. unsigned int cnt = net->xfrm.policy_count[dir];
  535. unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
  536. if (total)
  537. *total += cnt;
  538. if ((hmask + 1) < xfrm_policy_hashmax &&
  539. cnt > hmask)
  540. return 1;
  541. return 0;
  542. }
  543. static inline int xfrm_byidx_should_resize(struct net *net, int total)
  544. {
  545. unsigned int hmask = net->xfrm.policy_idx_hmask;
  546. if ((hmask + 1) < xfrm_policy_hashmax &&
  547. total > hmask)
  548. return 1;
  549. return 0;
  550. }
  551. void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
  552. {
  553. si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
  554. si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
  555. si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
  556. si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
  557. si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
  558. si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
  559. si->spdhcnt = net->xfrm.policy_idx_hmask;
  560. si->spdhmcnt = xfrm_policy_hashmax;
  561. }
  562. EXPORT_SYMBOL(xfrm_spd_getinfo);
  563. static DEFINE_MUTEX(hash_resize_mutex);
  564. static void xfrm_hash_resize(struct work_struct *work)
  565. {
  566. struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
  567. int dir, total;
  568. mutex_lock(&hash_resize_mutex);
  569. total = 0;
  570. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  571. if (xfrm_bydst_should_resize(net, dir, &total))
  572. xfrm_bydst_resize(net, dir);
  573. }
  574. if (xfrm_byidx_should_resize(net, total))
  575. xfrm_byidx_resize(net, total);
  576. mutex_unlock(&hash_resize_mutex);
  577. }
  578. /* Make sure *pol can be inserted into fastbin.
  579. * Useful to check that later insert requests will be sucessful
  580. * (provided xfrm_policy_lock is held throughout).
  581. */
  582. static struct xfrm_pol_inexact_bin *
  583. xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
  584. {
  585. struct xfrm_pol_inexact_bin *bin, *prev;
  586. struct xfrm_pol_inexact_key k = {
  587. .family = pol->family,
  588. .type = pol->type,
  589. .dir = dir,
  590. .if_id = pol->if_id,
  591. };
  592. struct net *net = xp_net(pol);
  593. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  594. write_pnet(&k.net, net);
  595. bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
  596. xfrm_pol_inexact_params);
  597. if (bin)
  598. return bin;
  599. bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
  600. if (!bin)
  601. return NULL;
  602. bin->k = k;
  603. INIT_HLIST_HEAD(&bin->hhead);
  604. bin->root_d = RB_ROOT;
  605. bin->root_s = RB_ROOT;
  606. seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
  607. prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
  608. &bin->k, &bin->head,
  609. xfrm_pol_inexact_params);
  610. if (!prev) {
  611. list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
  612. return bin;
  613. }
  614. kfree(bin);
  615. return IS_ERR(prev) ? NULL : prev;
  616. }
  617. static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
  618. int family, u8 prefixlen)
  619. {
  620. if (xfrm_addr_any(addr, family))
  621. return true;
  622. if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
  623. return true;
  624. if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
  625. return true;
  626. return false;
  627. }
  628. static bool
  629. xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
  630. {
  631. const xfrm_address_t *addr;
  632. bool saddr_any, daddr_any;
  633. u8 prefixlen;
  634. addr = &policy->selector.saddr;
  635. prefixlen = policy->selector.prefixlen_s;
  636. saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
  637. policy->family,
  638. prefixlen);
  639. addr = &policy->selector.daddr;
  640. prefixlen = policy->selector.prefixlen_d;
  641. daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
  642. policy->family,
  643. prefixlen);
  644. return saddr_any && daddr_any;
  645. }
  646. static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
  647. const xfrm_address_t *addr, u8 prefixlen)
  648. {
  649. node->addr = *addr;
  650. node->prefixlen = prefixlen;
  651. }
  652. static struct xfrm_pol_inexact_node *
  653. xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
  654. {
  655. struct xfrm_pol_inexact_node *node;
  656. node = kzalloc(sizeof(*node), GFP_ATOMIC);
  657. if (node)
  658. xfrm_pol_inexact_node_init(node, addr, prefixlen);
  659. return node;
  660. }
  661. static int xfrm_policy_addr_delta(const xfrm_address_t *a,
  662. const xfrm_address_t *b,
  663. u8 prefixlen, u16 family)
  664. {
  665. u32 ma, mb, mask;
  666. unsigned int pdw, pbi;
  667. int delta = 0;
  668. switch (family) {
  669. case AF_INET:
  670. if (prefixlen == 0)
  671. return 0;
  672. mask = ~0U << (32 - prefixlen);
  673. ma = ntohl(a->a4) & mask;
  674. mb = ntohl(b->a4) & mask;
  675. if (ma < mb)
  676. delta = -1;
  677. else if (ma > mb)
  678. delta = 1;
  679. break;
  680. case AF_INET6:
  681. pdw = prefixlen >> 5;
  682. pbi = prefixlen & 0x1f;
  683. if (pdw) {
  684. delta = memcmp(a->a6, b->a6, pdw << 2);
  685. if (delta)
  686. return delta;
  687. }
  688. if (pbi) {
  689. mask = ~0U << (32 - pbi);
  690. ma = ntohl(a->a6[pdw]) & mask;
  691. mb = ntohl(b->a6[pdw]) & mask;
  692. if (ma < mb)
  693. delta = -1;
  694. else if (ma > mb)
  695. delta = 1;
  696. }
  697. break;
  698. default:
  699. break;
  700. }
  701. return delta;
  702. }
  703. static void xfrm_policy_inexact_list_reinsert(struct net *net,
  704. struct xfrm_pol_inexact_node *n,
  705. u16 family)
  706. {
  707. unsigned int matched_s, matched_d;
  708. struct xfrm_policy *policy, *p;
  709. matched_s = 0;
  710. matched_d = 0;
  711. list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
  712. struct hlist_node *newpos = NULL;
  713. bool matches_s, matches_d;
  714. if (!policy->bydst_reinsert)
  715. continue;
  716. WARN_ON_ONCE(policy->family != family);
  717. policy->bydst_reinsert = false;
  718. hlist_for_each_entry(p, &n->hhead, bydst) {
  719. if (policy->priority > p->priority)
  720. newpos = &p->bydst;
  721. else if (policy->priority == p->priority &&
  722. policy->pos > p->pos)
  723. newpos = &p->bydst;
  724. else
  725. break;
  726. }
  727. if (newpos)
  728. hlist_add_behind_rcu(&policy->bydst, newpos);
  729. else
  730. hlist_add_head_rcu(&policy->bydst, &n->hhead);
  731. /* paranoia checks follow.
  732. * Check that the reinserted policy matches at least
  733. * saddr or daddr for current node prefix.
  734. *
  735. * Matching both is fine, matching saddr in one policy
  736. * (but not daddr) and then matching only daddr in another
  737. * is a bug.
  738. */
  739. matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
  740. &n->addr,
  741. n->prefixlen,
  742. family) == 0;
  743. matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
  744. &n->addr,
  745. n->prefixlen,
  746. family) == 0;
  747. if (matches_s && matches_d)
  748. continue;
  749. WARN_ON_ONCE(!matches_s && !matches_d);
  750. if (matches_s)
  751. matched_s++;
  752. if (matches_d)
  753. matched_d++;
  754. WARN_ON_ONCE(matched_s && matched_d);
  755. }
  756. }
  757. static void xfrm_policy_inexact_node_reinsert(struct net *net,
  758. struct xfrm_pol_inexact_node *n,
  759. struct rb_root *new,
  760. u16 family)
  761. {
  762. struct xfrm_pol_inexact_node *node;
  763. struct rb_node **p, *parent;
  764. /* we should not have another subtree here */
  765. WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
  766. restart:
  767. parent = NULL;
  768. p = &new->rb_node;
  769. while (*p) {
  770. u8 prefixlen;
  771. int delta;
  772. parent = *p;
  773. node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
  774. prefixlen = min(node->prefixlen, n->prefixlen);
  775. delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
  776. prefixlen, family);
  777. if (delta < 0) {
  778. p = &parent->rb_left;
  779. } else if (delta > 0) {
  780. p = &parent->rb_right;
  781. } else {
  782. bool same_prefixlen = node->prefixlen == n->prefixlen;
  783. struct xfrm_policy *tmp;
  784. hlist_for_each_entry(tmp, &n->hhead, bydst) {
  785. tmp->bydst_reinsert = true;
  786. hlist_del_rcu(&tmp->bydst);
  787. }
  788. node->prefixlen = prefixlen;
  789. xfrm_policy_inexact_list_reinsert(net, node, family);
  790. if (same_prefixlen) {
  791. kfree_rcu(n, rcu);
  792. return;
  793. }
  794. rb_erase(*p, new);
  795. kfree_rcu(n, rcu);
  796. n = node;
  797. goto restart;
  798. }
  799. }
  800. rb_link_node_rcu(&n->node, parent, p);
  801. rb_insert_color(&n->node, new);
  802. }
  803. /* merge nodes v and n */
  804. static void xfrm_policy_inexact_node_merge(struct net *net,
  805. struct xfrm_pol_inexact_node *v,
  806. struct xfrm_pol_inexact_node *n,
  807. u16 family)
  808. {
  809. struct xfrm_pol_inexact_node *node;
  810. struct xfrm_policy *tmp;
  811. struct rb_node *rnode;
  812. /* To-be-merged node v has a subtree.
  813. *
  814. * Dismantle it and insert its nodes to n->root.
  815. */
  816. while ((rnode = rb_first(&v->root)) != NULL) {
  817. node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
  818. rb_erase(&node->node, &v->root);
  819. xfrm_policy_inexact_node_reinsert(net, node, &n->root,
  820. family);
  821. }
  822. hlist_for_each_entry(tmp, &v->hhead, bydst) {
  823. tmp->bydst_reinsert = true;
  824. hlist_del_rcu(&tmp->bydst);
  825. }
  826. xfrm_policy_inexact_list_reinsert(net, n, family);
  827. }
  828. static struct xfrm_pol_inexact_node *
  829. xfrm_policy_inexact_insert_node(struct net *net,
  830. struct rb_root *root,
  831. xfrm_address_t *addr,
  832. u16 family, u8 prefixlen, u8 dir)
  833. {
  834. struct xfrm_pol_inexact_node *cached = NULL;
  835. struct rb_node **p, *parent = NULL;
  836. struct xfrm_pol_inexact_node *node;
  837. p = &root->rb_node;
  838. while (*p) {
  839. int delta;
  840. parent = *p;
  841. node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
  842. delta = xfrm_policy_addr_delta(addr, &node->addr,
  843. node->prefixlen,
  844. family);
  845. if (delta == 0 && prefixlen >= node->prefixlen) {
  846. WARN_ON_ONCE(cached); /* ipsec policies got lost */
  847. return node;
  848. }
  849. if (delta < 0)
  850. p = &parent->rb_left;
  851. else
  852. p = &parent->rb_right;
  853. if (prefixlen < node->prefixlen) {
  854. delta = xfrm_policy_addr_delta(addr, &node->addr,
  855. prefixlen,
  856. family);
  857. if (delta)
  858. continue;
  859. /* This node is a subnet of the new prefix. It needs
  860. * to be removed and re-inserted with the smaller
  861. * prefix and all nodes that are now also covered
  862. * by the reduced prefixlen.
  863. */
  864. rb_erase(&node->node, root);
  865. if (!cached) {
  866. xfrm_pol_inexact_node_init(node, addr,
  867. prefixlen);
  868. cached = node;
  869. } else {
  870. /* This node also falls within the new
  871. * prefixlen. Merge the to-be-reinserted
  872. * node and this one.
  873. */
  874. xfrm_policy_inexact_node_merge(net, node,
  875. cached, family);
  876. kfree_rcu(node, rcu);
  877. }
  878. /* restart */
  879. p = &root->rb_node;
  880. parent = NULL;
  881. }
  882. }
  883. node = cached;
  884. if (!node) {
  885. node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
  886. if (!node)
  887. return NULL;
  888. }
  889. rb_link_node_rcu(&node->node, parent, p);
  890. rb_insert_color(&node->node, root);
  891. return node;
  892. }
  893. static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
  894. {
  895. struct xfrm_pol_inexact_node *node;
  896. struct rb_node *rn = rb_first(r);
  897. while (rn) {
  898. node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
  899. xfrm_policy_inexact_gc_tree(&node->root, rm);
  900. rn = rb_next(rn);
  901. if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
  902. WARN_ON_ONCE(rm);
  903. continue;
  904. }
  905. rb_erase(&node->node, r);
  906. kfree_rcu(node, rcu);
  907. }
  908. }
  909. static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
  910. {
  911. write_seqcount_begin(&b->count);
  912. xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
  913. xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
  914. write_seqcount_end(&b->count);
  915. if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
  916. !hlist_empty(&b->hhead)) {
  917. WARN_ON_ONCE(net_exit);
  918. return;
  919. }
  920. if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
  921. xfrm_pol_inexact_params) == 0) {
  922. list_del(&b->inexact_bins);
  923. kfree_rcu(b, rcu);
  924. }
  925. }
  926. static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
  927. {
  928. struct net *net = read_pnet(&b->k.net);
  929. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  930. __xfrm_policy_inexact_prune_bin(b, false);
  931. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  932. }
  933. static void __xfrm_policy_inexact_flush(struct net *net)
  934. {
  935. struct xfrm_pol_inexact_bin *bin, *t;
  936. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  937. list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
  938. __xfrm_policy_inexact_prune_bin(bin, false);
  939. }
  940. static struct hlist_head *
  941. xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
  942. struct xfrm_policy *policy, u8 dir)
  943. {
  944. struct xfrm_pol_inexact_node *n;
  945. struct net *net;
  946. net = xp_net(policy);
  947. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  948. if (xfrm_policy_inexact_insert_use_any_list(policy))
  949. return &bin->hhead;
  950. if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
  951. policy->family,
  952. policy->selector.prefixlen_d)) {
  953. write_seqcount_begin(&bin->count);
  954. n = xfrm_policy_inexact_insert_node(net,
  955. &bin->root_s,
  956. &policy->selector.saddr,
  957. policy->family,
  958. policy->selector.prefixlen_s,
  959. dir);
  960. write_seqcount_end(&bin->count);
  961. if (!n)
  962. return NULL;
  963. return &n->hhead;
  964. }
  965. /* daddr is fixed */
  966. write_seqcount_begin(&bin->count);
  967. n = xfrm_policy_inexact_insert_node(net,
  968. &bin->root_d,
  969. &policy->selector.daddr,
  970. policy->family,
  971. policy->selector.prefixlen_d, dir);
  972. write_seqcount_end(&bin->count);
  973. if (!n)
  974. return NULL;
  975. /* saddr is wildcard */
  976. if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
  977. policy->family,
  978. policy->selector.prefixlen_s))
  979. return &n->hhead;
  980. write_seqcount_begin(&bin->count);
  981. n = xfrm_policy_inexact_insert_node(net,
  982. &n->root,
  983. &policy->selector.saddr,
  984. policy->family,
  985. policy->selector.prefixlen_s, dir);
  986. write_seqcount_end(&bin->count);
  987. if (!n)
  988. return NULL;
  989. return &n->hhead;
  990. }
  991. static struct xfrm_policy *
  992. xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
  993. {
  994. struct xfrm_pol_inexact_bin *bin;
  995. struct xfrm_policy *delpol;
  996. struct hlist_head *chain;
  997. struct net *net;
  998. bin = xfrm_policy_inexact_alloc_bin(policy, dir);
  999. if (!bin)
  1000. return ERR_PTR(-ENOMEM);
  1001. net = xp_net(policy);
  1002. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  1003. chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
  1004. if (!chain) {
  1005. __xfrm_policy_inexact_prune_bin(bin, false);
  1006. return ERR_PTR(-ENOMEM);
  1007. }
  1008. delpol = xfrm_policy_insert_list(chain, policy, excl);
  1009. if (delpol && excl) {
  1010. __xfrm_policy_inexact_prune_bin(bin, false);
  1011. return ERR_PTR(-EEXIST);
  1012. }
  1013. chain = &net->xfrm.policy_inexact[dir];
  1014. xfrm_policy_insert_inexact_list(chain, policy);
  1015. if (delpol)
  1016. __xfrm_policy_inexact_prune_bin(bin, false);
  1017. return delpol;
  1018. }
  1019. static void xfrm_hash_rebuild(struct work_struct *work)
  1020. {
  1021. struct net *net = container_of(work, struct net,
  1022. xfrm.policy_hthresh.work);
  1023. unsigned int hmask;
  1024. struct xfrm_policy *pol;
  1025. struct xfrm_policy *policy;
  1026. struct hlist_head *chain;
  1027. struct hlist_head *odst;
  1028. struct hlist_node *newpos;
  1029. int i;
  1030. int dir;
  1031. unsigned seq;
  1032. u8 lbits4, rbits4, lbits6, rbits6;
  1033. mutex_lock(&hash_resize_mutex);
  1034. /* read selector prefixlen thresholds */
  1035. do {
  1036. seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
  1037. lbits4 = net->xfrm.policy_hthresh.lbits4;
  1038. rbits4 = net->xfrm.policy_hthresh.rbits4;
  1039. lbits6 = net->xfrm.policy_hthresh.lbits6;
  1040. rbits6 = net->xfrm.policy_hthresh.rbits6;
  1041. } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
  1042. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1043. write_seqcount_begin(&xfrm_policy_hash_generation);
  1044. /* make sure that we can insert the indirect policies again before
  1045. * we start with destructive action.
  1046. */
  1047. list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
  1048. struct xfrm_pol_inexact_bin *bin;
  1049. u8 dbits, sbits;
  1050. dir = xfrm_policy_id2dir(policy->index);
  1051. if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
  1052. continue;
  1053. if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
  1054. if (policy->family == AF_INET) {
  1055. dbits = rbits4;
  1056. sbits = lbits4;
  1057. } else {
  1058. dbits = rbits6;
  1059. sbits = lbits6;
  1060. }
  1061. } else {
  1062. if (policy->family == AF_INET) {
  1063. dbits = lbits4;
  1064. sbits = rbits4;
  1065. } else {
  1066. dbits = lbits6;
  1067. sbits = rbits6;
  1068. }
  1069. }
  1070. if (policy->selector.prefixlen_d < dbits ||
  1071. policy->selector.prefixlen_s < sbits)
  1072. continue;
  1073. bin = xfrm_policy_inexact_alloc_bin(policy, dir);
  1074. if (!bin)
  1075. goto out_unlock;
  1076. if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
  1077. goto out_unlock;
  1078. }
  1079. /* reset the bydst and inexact table in all directions */
  1080. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  1081. struct hlist_node *n;
  1082. hlist_for_each_entry_safe(policy, n,
  1083. &net->xfrm.policy_inexact[dir],
  1084. bydst_inexact_list) {
  1085. hlist_del_rcu(&policy->bydst);
  1086. hlist_del_init(&policy->bydst_inexact_list);
  1087. }
  1088. hmask = net->xfrm.policy_bydst[dir].hmask;
  1089. odst = net->xfrm.policy_bydst[dir].table;
  1090. for (i = hmask; i >= 0; i--) {
  1091. hlist_for_each_entry_safe(policy, n, odst + i, bydst)
  1092. hlist_del_rcu(&policy->bydst);
  1093. }
  1094. if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
  1095. /* dir out => dst = remote, src = local */
  1096. net->xfrm.policy_bydst[dir].dbits4 = rbits4;
  1097. net->xfrm.policy_bydst[dir].sbits4 = lbits4;
  1098. net->xfrm.policy_bydst[dir].dbits6 = rbits6;
  1099. net->xfrm.policy_bydst[dir].sbits6 = lbits6;
  1100. } else {
  1101. /* dir in/fwd => dst = local, src = remote */
  1102. net->xfrm.policy_bydst[dir].dbits4 = lbits4;
  1103. net->xfrm.policy_bydst[dir].sbits4 = rbits4;
  1104. net->xfrm.policy_bydst[dir].dbits6 = lbits6;
  1105. net->xfrm.policy_bydst[dir].sbits6 = rbits6;
  1106. }
  1107. }
  1108. /* re-insert all policies by order of creation */
  1109. list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
  1110. if (policy->walk.dead)
  1111. continue;
  1112. dir = xfrm_policy_id2dir(policy->index);
  1113. if (dir >= XFRM_POLICY_MAX) {
  1114. /* skip socket policies */
  1115. continue;
  1116. }
  1117. newpos = NULL;
  1118. chain = policy_hash_bysel(net, &policy->selector,
  1119. policy->family, dir);
  1120. if (!chain) {
  1121. void *p = xfrm_policy_inexact_insert(policy, dir, 0);
  1122. WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
  1123. continue;
  1124. }
  1125. hlist_for_each_entry(pol, chain, bydst) {
  1126. if (policy->priority >= pol->priority)
  1127. newpos = &pol->bydst;
  1128. else
  1129. break;
  1130. }
  1131. if (newpos)
  1132. hlist_add_behind_rcu(&policy->bydst, newpos);
  1133. else
  1134. hlist_add_head_rcu(&policy->bydst, chain);
  1135. }
  1136. out_unlock:
  1137. __xfrm_policy_inexact_flush(net);
  1138. write_seqcount_end(&xfrm_policy_hash_generation);
  1139. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1140. mutex_unlock(&hash_resize_mutex);
  1141. }
  1142. void xfrm_policy_hash_rebuild(struct net *net)
  1143. {
  1144. schedule_work(&net->xfrm.policy_hthresh.work);
  1145. }
  1146. EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
  1147. /* Generate new index... KAME seems to generate them ordered by cost
  1148. * of an absolute inpredictability of ordering of rules. This will not pass. */
  1149. static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
  1150. {
  1151. static u32 idx_generator;
  1152. for (;;) {
  1153. struct hlist_head *list;
  1154. struct xfrm_policy *p;
  1155. u32 idx;
  1156. int found;
  1157. if (!index) {
  1158. idx = (idx_generator | dir);
  1159. idx_generator += 8;
  1160. } else {
  1161. idx = index;
  1162. index = 0;
  1163. }
  1164. if (idx == 0)
  1165. idx = 8;
  1166. list = net->xfrm.policy_byidx + idx_hash(net, idx);
  1167. found = 0;
  1168. hlist_for_each_entry(p, list, byidx) {
  1169. if (p->index == idx) {
  1170. found = 1;
  1171. break;
  1172. }
  1173. }
  1174. if (!found)
  1175. return idx;
  1176. }
  1177. }
  1178. static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
  1179. {
  1180. u32 *p1 = (u32 *) s1;
  1181. u32 *p2 = (u32 *) s2;
  1182. int len = sizeof(struct xfrm_selector) / sizeof(u32);
  1183. int i;
  1184. for (i = 0; i < len; i++) {
  1185. if (p1[i] != p2[i])
  1186. return 1;
  1187. }
  1188. return 0;
  1189. }
  1190. static void xfrm_policy_requeue(struct xfrm_policy *old,
  1191. struct xfrm_policy *new)
  1192. {
  1193. struct xfrm_policy_queue *pq = &old->polq;
  1194. struct sk_buff_head list;
  1195. if (skb_queue_empty(&pq->hold_queue))
  1196. return;
  1197. __skb_queue_head_init(&list);
  1198. spin_lock_bh(&pq->hold_queue.lock);
  1199. skb_queue_splice_init(&pq->hold_queue, &list);
  1200. if (del_timer(&pq->hold_timer))
  1201. xfrm_pol_put(old);
  1202. spin_unlock_bh(&pq->hold_queue.lock);
  1203. pq = &new->polq;
  1204. spin_lock_bh(&pq->hold_queue.lock);
  1205. skb_queue_splice(&list, &pq->hold_queue);
  1206. pq->timeout = XFRM_QUEUE_TMO_MIN;
  1207. if (!mod_timer(&pq->hold_timer, jiffies))
  1208. xfrm_pol_hold(new);
  1209. spin_unlock_bh(&pq->hold_queue.lock);
  1210. }
  1211. static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
  1212. struct xfrm_policy *pol)
  1213. {
  1214. return mark->v == pol->mark.v && mark->m == pol->mark.m;
  1215. }
  1216. static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
  1217. {
  1218. const struct xfrm_pol_inexact_key *k = data;
  1219. u32 a = k->type << 24 | k->dir << 16 | k->family;
  1220. return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
  1221. seed);
  1222. }
  1223. static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
  1224. {
  1225. const struct xfrm_pol_inexact_bin *b = data;
  1226. return xfrm_pol_bin_key(&b->k, 0, seed);
  1227. }
  1228. static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
  1229. const void *ptr)
  1230. {
  1231. const struct xfrm_pol_inexact_key *key = arg->key;
  1232. const struct xfrm_pol_inexact_bin *b = ptr;
  1233. int ret;
  1234. if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
  1235. return -1;
  1236. ret = b->k.dir ^ key->dir;
  1237. if (ret)
  1238. return ret;
  1239. ret = b->k.type ^ key->type;
  1240. if (ret)
  1241. return ret;
  1242. ret = b->k.family ^ key->family;
  1243. if (ret)
  1244. return ret;
  1245. return b->k.if_id ^ key->if_id;
  1246. }
  1247. static const struct rhashtable_params xfrm_pol_inexact_params = {
  1248. .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
  1249. .hashfn = xfrm_pol_bin_key,
  1250. .obj_hashfn = xfrm_pol_bin_obj,
  1251. .obj_cmpfn = xfrm_pol_bin_cmp,
  1252. .automatic_shrinking = true,
  1253. };
  1254. static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
  1255. struct xfrm_policy *policy)
  1256. {
  1257. struct xfrm_policy *pol, *delpol = NULL;
  1258. struct hlist_node *newpos = NULL;
  1259. int i = 0;
  1260. hlist_for_each_entry(pol, chain, bydst_inexact_list) {
  1261. if (pol->type == policy->type &&
  1262. pol->if_id == policy->if_id &&
  1263. !selector_cmp(&pol->selector, &policy->selector) &&
  1264. xfrm_policy_mark_match(&policy->mark, pol) &&
  1265. xfrm_sec_ctx_match(pol->security, policy->security) &&
  1266. !WARN_ON(delpol)) {
  1267. delpol = pol;
  1268. if (policy->priority > pol->priority)
  1269. continue;
  1270. } else if (policy->priority >= pol->priority) {
  1271. newpos = &pol->bydst_inexact_list;
  1272. continue;
  1273. }
  1274. if (delpol)
  1275. break;
  1276. }
  1277. if (newpos)
  1278. hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
  1279. else
  1280. hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
  1281. hlist_for_each_entry(pol, chain, bydst_inexact_list) {
  1282. pol->pos = i;
  1283. i++;
  1284. }
  1285. }
  1286. static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
  1287. struct xfrm_policy *policy,
  1288. bool excl)
  1289. {
  1290. struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
  1291. hlist_for_each_entry(pol, chain, bydst) {
  1292. if (pol->type == policy->type &&
  1293. pol->if_id == policy->if_id &&
  1294. !selector_cmp(&pol->selector, &policy->selector) &&
  1295. xfrm_policy_mark_match(&policy->mark, pol) &&
  1296. xfrm_sec_ctx_match(pol->security, policy->security) &&
  1297. !WARN_ON(delpol)) {
  1298. if (excl)
  1299. return ERR_PTR(-EEXIST);
  1300. delpol = pol;
  1301. if (policy->priority > pol->priority)
  1302. continue;
  1303. } else if (policy->priority >= pol->priority) {
  1304. newpos = pol;
  1305. continue;
  1306. }
  1307. if (delpol)
  1308. break;
  1309. }
  1310. if (newpos)
  1311. hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
  1312. else
  1313. hlist_add_head_rcu(&policy->bydst, chain);
  1314. return delpol;
  1315. }
  1316. int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
  1317. {
  1318. struct net *net = xp_net(policy);
  1319. struct xfrm_policy *delpol;
  1320. struct hlist_head *chain;
  1321. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1322. chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
  1323. if (chain)
  1324. delpol = xfrm_policy_insert_list(chain, policy, excl);
  1325. else
  1326. delpol = xfrm_policy_inexact_insert(policy, dir, excl);
  1327. if (IS_ERR(delpol)) {
  1328. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1329. return PTR_ERR(delpol);
  1330. }
  1331. __xfrm_policy_link(policy, dir);
  1332. /* After previous checking, family can either be AF_INET or AF_INET6 */
  1333. if (policy->family == AF_INET)
  1334. rt_genid_bump_ipv4(net);
  1335. else
  1336. rt_genid_bump_ipv6(net);
  1337. if (delpol) {
  1338. xfrm_policy_requeue(delpol, policy);
  1339. __xfrm_policy_unlink(delpol, dir);
  1340. }
  1341. policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
  1342. hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
  1343. policy->curlft.add_time = ktime_get_real_seconds();
  1344. policy->curlft.use_time = 0;
  1345. if (!mod_timer(&policy->timer, jiffies + HZ))
  1346. xfrm_pol_hold(policy);
  1347. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1348. if (delpol)
  1349. xfrm_policy_kill(delpol);
  1350. else if (xfrm_bydst_should_resize(net, dir, NULL))
  1351. schedule_work(&net->xfrm.policy_hash_work);
  1352. return 0;
  1353. }
  1354. EXPORT_SYMBOL(xfrm_policy_insert);
  1355. static struct xfrm_policy *
  1356. __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
  1357. u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
  1358. struct xfrm_sec_ctx *ctx)
  1359. {
  1360. struct xfrm_policy *pol;
  1361. if (!chain)
  1362. return NULL;
  1363. hlist_for_each_entry(pol, chain, bydst) {
  1364. if (pol->type == type &&
  1365. pol->if_id == if_id &&
  1366. xfrm_policy_mark_match(mark, pol) &&
  1367. !selector_cmp(sel, &pol->selector) &&
  1368. xfrm_sec_ctx_match(ctx, pol->security))
  1369. return pol;
  1370. }
  1371. return NULL;
  1372. }
  1373. struct xfrm_policy *
  1374. xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
  1375. u8 type, int dir, struct xfrm_selector *sel,
  1376. struct xfrm_sec_ctx *ctx, int delete, int *err)
  1377. {
  1378. struct xfrm_pol_inexact_bin *bin = NULL;
  1379. struct xfrm_policy *pol, *ret = NULL;
  1380. struct hlist_head *chain;
  1381. *err = 0;
  1382. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1383. chain = policy_hash_bysel(net, sel, sel->family, dir);
  1384. if (!chain) {
  1385. struct xfrm_pol_inexact_candidates cand;
  1386. int i;
  1387. bin = xfrm_policy_inexact_lookup(net, type,
  1388. sel->family, dir, if_id);
  1389. if (!bin) {
  1390. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1391. return NULL;
  1392. }
  1393. if (!xfrm_policy_find_inexact_candidates(&cand, bin,
  1394. &sel->saddr,
  1395. &sel->daddr)) {
  1396. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1397. return NULL;
  1398. }
  1399. pol = NULL;
  1400. for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
  1401. struct xfrm_policy *tmp;
  1402. tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
  1403. if_id, type, dir,
  1404. sel, ctx);
  1405. if (!tmp)
  1406. continue;
  1407. if (!pol || tmp->pos < pol->pos)
  1408. pol = tmp;
  1409. }
  1410. } else {
  1411. pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
  1412. sel, ctx);
  1413. }
  1414. if (pol) {
  1415. xfrm_pol_hold(pol);
  1416. if (delete) {
  1417. *err = security_xfrm_policy_delete(pol->security);
  1418. if (*err) {
  1419. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1420. return pol;
  1421. }
  1422. __xfrm_policy_unlink(pol, dir);
  1423. }
  1424. ret = pol;
  1425. }
  1426. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1427. if (ret && delete)
  1428. xfrm_policy_kill(ret);
  1429. if (bin && delete)
  1430. xfrm_policy_inexact_prune_bin(bin);
  1431. return ret;
  1432. }
  1433. EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
  1434. struct xfrm_policy *
  1435. xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
  1436. u8 type, int dir, u32 id, int delete, int *err)
  1437. {
  1438. struct xfrm_policy *pol, *ret;
  1439. struct hlist_head *chain;
  1440. *err = -ENOENT;
  1441. if (xfrm_policy_id2dir(id) != dir)
  1442. return NULL;
  1443. *err = 0;
  1444. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1445. chain = net->xfrm.policy_byidx + idx_hash(net, id);
  1446. ret = NULL;
  1447. hlist_for_each_entry(pol, chain, byidx) {
  1448. if (pol->type == type && pol->index == id &&
  1449. pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
  1450. xfrm_pol_hold(pol);
  1451. if (delete) {
  1452. *err = security_xfrm_policy_delete(
  1453. pol->security);
  1454. if (*err) {
  1455. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1456. return pol;
  1457. }
  1458. __xfrm_policy_unlink(pol, dir);
  1459. }
  1460. ret = pol;
  1461. break;
  1462. }
  1463. }
  1464. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1465. if (ret && delete)
  1466. xfrm_policy_kill(ret);
  1467. return ret;
  1468. }
  1469. EXPORT_SYMBOL(xfrm_policy_byid);
  1470. #ifdef CONFIG_SECURITY_NETWORK_XFRM
  1471. static inline int
  1472. xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
  1473. {
  1474. struct xfrm_policy *pol;
  1475. int err = 0;
  1476. list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
  1477. if (pol->walk.dead ||
  1478. xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
  1479. pol->type != type)
  1480. continue;
  1481. err = security_xfrm_policy_delete(pol->security);
  1482. if (err) {
  1483. xfrm_audit_policy_delete(pol, 0, task_valid);
  1484. return err;
  1485. }
  1486. }
  1487. return err;
  1488. }
  1489. #else
  1490. static inline int
  1491. xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
  1492. {
  1493. return 0;
  1494. }
  1495. #endif
  1496. int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
  1497. {
  1498. int dir, err = 0, cnt = 0;
  1499. struct xfrm_policy *pol;
  1500. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1501. err = xfrm_policy_flush_secctx_check(net, type, task_valid);
  1502. if (err)
  1503. goto out;
  1504. again:
  1505. list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
  1506. dir = xfrm_policy_id2dir(pol->index);
  1507. if (pol->walk.dead ||
  1508. dir >= XFRM_POLICY_MAX ||
  1509. pol->type != type)
  1510. continue;
  1511. __xfrm_policy_unlink(pol, dir);
  1512. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1513. cnt++;
  1514. xfrm_audit_policy_delete(pol, 1, task_valid);
  1515. xfrm_policy_kill(pol);
  1516. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1517. goto again;
  1518. }
  1519. if (cnt)
  1520. __xfrm_policy_inexact_flush(net);
  1521. else
  1522. err = -ESRCH;
  1523. out:
  1524. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1525. return err;
  1526. }
  1527. EXPORT_SYMBOL(xfrm_policy_flush);
  1528. int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
  1529. int (*func)(struct xfrm_policy *, int, int, void*),
  1530. void *data)
  1531. {
  1532. struct xfrm_policy *pol;
  1533. struct xfrm_policy_walk_entry *x;
  1534. int error = 0;
  1535. if (walk->type >= XFRM_POLICY_TYPE_MAX &&
  1536. walk->type != XFRM_POLICY_TYPE_ANY)
  1537. return -EINVAL;
  1538. if (list_empty(&walk->walk.all) && walk->seq != 0)
  1539. return 0;
  1540. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1541. if (list_empty(&walk->walk.all))
  1542. x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
  1543. else
  1544. x = list_first_entry(&walk->walk.all,
  1545. struct xfrm_policy_walk_entry, all);
  1546. list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
  1547. if (x->dead)
  1548. continue;
  1549. pol = container_of(x, struct xfrm_policy, walk);
  1550. if (walk->type != XFRM_POLICY_TYPE_ANY &&
  1551. walk->type != pol->type)
  1552. continue;
  1553. error = func(pol, xfrm_policy_id2dir(pol->index),
  1554. walk->seq, data);
  1555. if (error) {
  1556. list_move_tail(&walk->walk.all, &x->all);
  1557. goto out;
  1558. }
  1559. walk->seq++;
  1560. }
  1561. if (walk->seq == 0) {
  1562. error = -ENOENT;
  1563. goto out;
  1564. }
  1565. list_del_init(&walk->walk.all);
  1566. out:
  1567. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1568. return error;
  1569. }
  1570. EXPORT_SYMBOL(xfrm_policy_walk);
  1571. void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
  1572. {
  1573. INIT_LIST_HEAD(&walk->walk.all);
  1574. walk->walk.dead = 1;
  1575. walk->type = type;
  1576. walk->seq = 0;
  1577. }
  1578. EXPORT_SYMBOL(xfrm_policy_walk_init);
  1579. void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
  1580. {
  1581. if (list_empty(&walk->walk.all))
  1582. return;
  1583. spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
  1584. list_del(&walk->walk.all);
  1585. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1586. }
  1587. EXPORT_SYMBOL(xfrm_policy_walk_done);
  1588. /*
  1589. * Find policy to apply to this flow.
  1590. *
  1591. * Returns 0 if policy found, else an -errno.
  1592. */
  1593. static int xfrm_policy_match(const struct xfrm_policy *pol,
  1594. const struct flowi *fl,
  1595. u8 type, u16 family, int dir, u32 if_id)
  1596. {
  1597. const struct xfrm_selector *sel = &pol->selector;
  1598. int ret = -ESRCH;
  1599. bool match;
  1600. if (pol->family != family ||
  1601. pol->if_id != if_id ||
  1602. (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
  1603. pol->type != type)
  1604. return ret;
  1605. match = xfrm_selector_match(sel, fl, family);
  1606. if (match)
  1607. ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
  1608. dir);
  1609. return ret;
  1610. }
  1611. static struct xfrm_pol_inexact_node *
  1612. xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
  1613. seqcount_spinlock_t *count,
  1614. const xfrm_address_t *addr, u16 family)
  1615. {
  1616. const struct rb_node *parent;
  1617. int seq;
  1618. again:
  1619. seq = read_seqcount_begin(count);
  1620. parent = rcu_dereference_raw(r->rb_node);
  1621. while (parent) {
  1622. struct xfrm_pol_inexact_node *node;
  1623. int delta;
  1624. node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
  1625. delta = xfrm_policy_addr_delta(addr, &node->addr,
  1626. node->prefixlen, family);
  1627. if (delta < 0) {
  1628. parent = rcu_dereference_raw(parent->rb_left);
  1629. continue;
  1630. } else if (delta > 0) {
  1631. parent = rcu_dereference_raw(parent->rb_right);
  1632. continue;
  1633. }
  1634. return node;
  1635. }
  1636. if (read_seqcount_retry(count, seq))
  1637. goto again;
  1638. return NULL;
  1639. }
  1640. static bool
  1641. xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
  1642. struct xfrm_pol_inexact_bin *b,
  1643. const xfrm_address_t *saddr,
  1644. const xfrm_address_t *daddr)
  1645. {
  1646. struct xfrm_pol_inexact_node *n;
  1647. u16 family;
  1648. if (!b)
  1649. return false;
  1650. family = b->k.family;
  1651. memset(cand, 0, sizeof(*cand));
  1652. cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
  1653. n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
  1654. family);
  1655. if (n) {
  1656. cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
  1657. n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
  1658. family);
  1659. if (n)
  1660. cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
  1661. }
  1662. n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
  1663. family);
  1664. if (n)
  1665. cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
  1666. return true;
  1667. }
  1668. static struct xfrm_pol_inexact_bin *
  1669. xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
  1670. u8 dir, u32 if_id)
  1671. {
  1672. struct xfrm_pol_inexact_key k = {
  1673. .family = family,
  1674. .type = type,
  1675. .dir = dir,
  1676. .if_id = if_id,
  1677. };
  1678. write_pnet(&k.net, net);
  1679. return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
  1680. xfrm_pol_inexact_params);
  1681. }
  1682. static struct xfrm_pol_inexact_bin *
  1683. xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
  1684. u8 dir, u32 if_id)
  1685. {
  1686. struct xfrm_pol_inexact_bin *bin;
  1687. lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
  1688. rcu_read_lock();
  1689. bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
  1690. rcu_read_unlock();
  1691. return bin;
  1692. }
  1693. static struct xfrm_policy *
  1694. __xfrm_policy_eval_candidates(struct hlist_head *chain,
  1695. struct xfrm_policy *prefer,
  1696. const struct flowi *fl,
  1697. u8 type, u16 family, int dir, u32 if_id)
  1698. {
  1699. u32 priority = prefer ? prefer->priority : ~0u;
  1700. struct xfrm_policy *pol;
  1701. if (!chain)
  1702. return NULL;
  1703. hlist_for_each_entry_rcu(pol, chain, bydst) {
  1704. int err;
  1705. if (pol->priority > priority)
  1706. break;
  1707. err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
  1708. if (err) {
  1709. if (err != -ESRCH)
  1710. return ERR_PTR(err);
  1711. continue;
  1712. }
  1713. if (prefer) {
  1714. /* matches. Is it older than *prefer? */
  1715. if (pol->priority == priority &&
  1716. prefer->pos < pol->pos)
  1717. return prefer;
  1718. }
  1719. return pol;
  1720. }
  1721. return NULL;
  1722. }
  1723. static struct xfrm_policy *
  1724. xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
  1725. struct xfrm_policy *prefer,
  1726. const struct flowi *fl,
  1727. u8 type, u16 family, int dir, u32 if_id)
  1728. {
  1729. struct xfrm_policy *tmp;
  1730. int i;
  1731. for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
  1732. tmp = __xfrm_policy_eval_candidates(cand->res[i],
  1733. prefer,
  1734. fl, type, family, dir,
  1735. if_id);
  1736. if (!tmp)
  1737. continue;
  1738. if (IS_ERR(tmp))
  1739. return tmp;
  1740. prefer = tmp;
  1741. }
  1742. return prefer;
  1743. }
  1744. static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
  1745. const struct flowi *fl,
  1746. u16 family, u8 dir,
  1747. u32 if_id)
  1748. {
  1749. struct xfrm_pol_inexact_candidates cand;
  1750. const xfrm_address_t *daddr, *saddr;
  1751. struct xfrm_pol_inexact_bin *bin;
  1752. struct xfrm_policy *pol, *ret;
  1753. struct hlist_head *chain;
  1754. unsigned int sequence;
  1755. int err;
  1756. daddr = xfrm_flowi_daddr(fl, family);
  1757. saddr = xfrm_flowi_saddr(fl, family);
  1758. if (unlikely(!daddr || !saddr))
  1759. return NULL;
  1760. rcu_read_lock();
  1761. retry:
  1762. do {
  1763. sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
  1764. chain = policy_hash_direct(net, daddr, saddr, family, dir);
  1765. } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
  1766. ret = NULL;
  1767. hlist_for_each_entry_rcu(pol, chain, bydst) {
  1768. err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
  1769. if (err) {
  1770. if (err == -ESRCH)
  1771. continue;
  1772. else {
  1773. ret = ERR_PTR(err);
  1774. goto fail;
  1775. }
  1776. } else {
  1777. ret = pol;
  1778. break;
  1779. }
  1780. }
  1781. bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
  1782. if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
  1783. daddr))
  1784. goto skip_inexact;
  1785. pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
  1786. family, dir, if_id);
  1787. if (pol) {
  1788. ret = pol;
  1789. if (IS_ERR(pol))
  1790. goto fail;
  1791. }
  1792. skip_inexact:
  1793. if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
  1794. goto retry;
  1795. if (ret && !xfrm_pol_hold_rcu(ret))
  1796. goto retry;
  1797. fail:
  1798. rcu_read_unlock();
  1799. return ret;
  1800. }
  1801. static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
  1802. const struct flowi *fl,
  1803. u16 family, u8 dir, u32 if_id)
  1804. {
  1805. #ifdef CONFIG_XFRM_SUB_POLICY
  1806. struct xfrm_policy *pol;
  1807. pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
  1808. dir, if_id);
  1809. if (pol != NULL)
  1810. return pol;
  1811. #endif
  1812. return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
  1813. dir, if_id);
  1814. }
  1815. static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
  1816. const struct flowi *fl,
  1817. u16 family, u32 if_id)
  1818. {
  1819. struct xfrm_policy *pol;
  1820. rcu_read_lock();
  1821. again:
  1822. pol = rcu_dereference(sk->sk_policy[dir]);
  1823. if (pol != NULL) {
  1824. bool match;
  1825. int err = 0;
  1826. if (pol->family != family) {
  1827. pol = NULL;
  1828. goto out;
  1829. }
  1830. match = xfrm_selector_match(&pol->selector, fl, family);
  1831. if (match) {
  1832. if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
  1833. pol->if_id != if_id) {
  1834. pol = NULL;
  1835. goto out;
  1836. }
  1837. err = security_xfrm_policy_lookup(pol->security,
  1838. fl->flowi_secid,
  1839. dir);
  1840. if (!err) {
  1841. if (!xfrm_pol_hold_rcu(pol))
  1842. goto again;
  1843. } else if (err == -ESRCH) {
  1844. pol = NULL;
  1845. } else {
  1846. pol = ERR_PTR(err);
  1847. }
  1848. } else
  1849. pol = NULL;
  1850. }
  1851. out:
  1852. rcu_read_unlock();
  1853. return pol;
  1854. }
  1855. static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
  1856. {
  1857. struct net *net = xp_net(pol);
  1858. list_add(&pol->walk.all, &net->xfrm.policy_all);
  1859. net->xfrm.policy_count[dir]++;
  1860. xfrm_pol_hold(pol);
  1861. }
  1862. static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
  1863. int dir)
  1864. {
  1865. struct net *net = xp_net(pol);
  1866. if (list_empty(&pol->walk.all))
  1867. return NULL;
  1868. /* Socket policies are not hashed. */
  1869. if (!hlist_unhashed(&pol->bydst)) {
  1870. hlist_del_rcu(&pol->bydst);
  1871. hlist_del_init(&pol->bydst_inexact_list);
  1872. hlist_del(&pol->byidx);
  1873. }
  1874. list_del_init(&pol->walk.all);
  1875. net->xfrm.policy_count[dir]--;
  1876. return pol;
  1877. }
  1878. static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
  1879. {
  1880. __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
  1881. }
  1882. static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
  1883. {
  1884. __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
  1885. }
  1886. int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
  1887. {
  1888. struct net *net = xp_net(pol);
  1889. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1890. pol = __xfrm_policy_unlink(pol, dir);
  1891. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1892. if (pol) {
  1893. xfrm_policy_kill(pol);
  1894. return 0;
  1895. }
  1896. return -ENOENT;
  1897. }
  1898. EXPORT_SYMBOL(xfrm_policy_delete);
  1899. int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  1900. {
  1901. struct net *net = sock_net(sk);
  1902. struct xfrm_policy *old_pol;
  1903. #ifdef CONFIG_XFRM_SUB_POLICY
  1904. if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
  1905. return -EINVAL;
  1906. #endif
  1907. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1908. old_pol = rcu_dereference_protected(sk->sk_policy[dir],
  1909. lockdep_is_held(&net->xfrm.xfrm_policy_lock));
  1910. if (pol) {
  1911. pol->curlft.add_time = ktime_get_real_seconds();
  1912. pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
  1913. xfrm_sk_policy_link(pol, dir);
  1914. }
  1915. rcu_assign_pointer(sk->sk_policy[dir], pol);
  1916. if (old_pol) {
  1917. if (pol)
  1918. xfrm_policy_requeue(old_pol, pol);
  1919. /* Unlinking succeeds always. This is the only function
  1920. * allowed to delete or replace socket policy.
  1921. */
  1922. xfrm_sk_policy_unlink(old_pol, dir);
  1923. }
  1924. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1925. if (old_pol) {
  1926. xfrm_policy_kill(old_pol);
  1927. }
  1928. return 0;
  1929. }
  1930. static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
  1931. {
  1932. struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
  1933. struct net *net = xp_net(old);
  1934. if (newp) {
  1935. newp->selector = old->selector;
  1936. if (security_xfrm_policy_clone(old->security,
  1937. &newp->security)) {
  1938. kfree(newp);
  1939. return NULL; /* ENOMEM */
  1940. }
  1941. newp->lft = old->lft;
  1942. newp->curlft = old->curlft;
  1943. newp->mark = old->mark;
  1944. newp->if_id = old->if_id;
  1945. newp->action = old->action;
  1946. newp->flags = old->flags;
  1947. newp->xfrm_nr = old->xfrm_nr;
  1948. newp->index = old->index;
  1949. newp->type = old->type;
  1950. newp->family = old->family;
  1951. memcpy(newp->xfrm_vec, old->xfrm_vec,
  1952. newp->xfrm_nr*sizeof(struct xfrm_tmpl));
  1953. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  1954. xfrm_sk_policy_link(newp, dir);
  1955. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  1956. xfrm_pol_put(newp);
  1957. }
  1958. return newp;
  1959. }
  1960. int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
  1961. {
  1962. const struct xfrm_policy *p;
  1963. struct xfrm_policy *np;
  1964. int i, ret = 0;
  1965. rcu_read_lock();
  1966. for (i = 0; i < 2; i++) {
  1967. p = rcu_dereference(osk->sk_policy[i]);
  1968. if (p) {
  1969. np = clone_policy(p, i);
  1970. if (unlikely(!np)) {
  1971. ret = -ENOMEM;
  1972. break;
  1973. }
  1974. rcu_assign_pointer(sk->sk_policy[i], np);
  1975. }
  1976. }
  1977. rcu_read_unlock();
  1978. return ret;
  1979. }
  1980. static int
  1981. xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
  1982. xfrm_address_t *remote, unsigned short family, u32 mark)
  1983. {
  1984. int err;
  1985. const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  1986. if (unlikely(afinfo == NULL))
  1987. return -EINVAL;
  1988. err = afinfo->get_saddr(net, oif, local, remote, mark);
  1989. rcu_read_unlock();
  1990. return err;
  1991. }
  1992. /* Resolve list of templates for the flow, given policy. */
  1993. static int
  1994. xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
  1995. struct xfrm_state **xfrm, unsigned short family)
  1996. {
  1997. struct net *net = xp_net(policy);
  1998. int nx;
  1999. int i, error;
  2000. xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
  2001. xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
  2002. xfrm_address_t tmp;
  2003. for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
  2004. struct xfrm_state *x;
  2005. xfrm_address_t *remote = daddr;
  2006. xfrm_address_t *local = saddr;
  2007. struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
  2008. if (tmpl->mode == XFRM_MODE_TUNNEL ||
  2009. tmpl->mode == XFRM_MODE_BEET) {
  2010. remote = &tmpl->id.daddr;
  2011. local = &tmpl->saddr;
  2012. if (xfrm_addr_any(local, tmpl->encap_family)) {
  2013. error = xfrm_get_saddr(net, fl->flowi_oif,
  2014. &tmp, remote,
  2015. tmpl->encap_family, 0);
  2016. if (error)
  2017. goto fail;
  2018. local = &tmp;
  2019. }
  2020. }
  2021. x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
  2022. family, policy->if_id);
  2023. if (x && x->km.state == XFRM_STATE_VALID) {
  2024. xfrm[nx++] = x;
  2025. daddr = remote;
  2026. saddr = local;
  2027. continue;
  2028. }
  2029. if (x) {
  2030. error = (x->km.state == XFRM_STATE_ERROR ?
  2031. -EINVAL : -EAGAIN);
  2032. xfrm_state_put(x);
  2033. } else if (error == -ESRCH) {
  2034. error = -EAGAIN;
  2035. }
  2036. if (!tmpl->optional)
  2037. goto fail;
  2038. }
  2039. return nx;
  2040. fail:
  2041. for (nx--; nx >= 0; nx--)
  2042. xfrm_state_put(xfrm[nx]);
  2043. return error;
  2044. }
  2045. static int
  2046. xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
  2047. struct xfrm_state **xfrm, unsigned short family)
  2048. {
  2049. struct xfrm_state *tp[XFRM_MAX_DEPTH];
  2050. struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
  2051. int cnx = 0;
  2052. int error;
  2053. int ret;
  2054. int i;
  2055. for (i = 0; i < npols; i++) {
  2056. if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
  2057. error = -ENOBUFS;
  2058. goto fail;
  2059. }
  2060. ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
  2061. if (ret < 0) {
  2062. error = ret;
  2063. goto fail;
  2064. } else
  2065. cnx += ret;
  2066. }
  2067. /* found states are sorted for outbound processing */
  2068. if (npols > 1)
  2069. xfrm_state_sort(xfrm, tpp, cnx, family);
  2070. return cnx;
  2071. fail:
  2072. for (cnx--; cnx >= 0; cnx--)
  2073. xfrm_state_put(tpp[cnx]);
  2074. return error;
  2075. }
  2076. static int xfrm_get_tos(const struct flowi *fl, int family)
  2077. {
  2078. if (family == AF_INET)
  2079. return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
  2080. return 0;
  2081. }
  2082. static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
  2083. {
  2084. const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  2085. struct dst_ops *dst_ops;
  2086. struct xfrm_dst *xdst;
  2087. if (!afinfo)
  2088. return ERR_PTR(-EINVAL);
  2089. switch (family) {
  2090. case AF_INET:
  2091. dst_ops = &net->xfrm.xfrm4_dst_ops;
  2092. break;
  2093. #if IS_ENABLED(CONFIG_IPV6)
  2094. case AF_INET6:
  2095. dst_ops = &net->xfrm.xfrm6_dst_ops;
  2096. break;
  2097. #endif
  2098. default:
  2099. BUG();
  2100. }
  2101. xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
  2102. if (likely(xdst)) {
  2103. struct dst_entry *dst = &xdst->u.dst;
  2104. memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
  2105. } else
  2106. xdst = ERR_PTR(-ENOBUFS);
  2107. rcu_read_unlock();
  2108. return xdst;
  2109. }
  2110. static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
  2111. int nfheader_len)
  2112. {
  2113. if (dst->ops->family == AF_INET6) {
  2114. struct rt6_info *rt = (struct rt6_info *)dst;
  2115. path->path_cookie = rt6_get_cookie(rt);
  2116. path->u.rt6.rt6i_nfheader_len = nfheader_len;
  2117. }
  2118. }
  2119. static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
  2120. const struct flowi *fl)
  2121. {
  2122. const struct xfrm_policy_afinfo *afinfo =
  2123. xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
  2124. int err;
  2125. if (!afinfo)
  2126. return -EINVAL;
  2127. err = afinfo->fill_dst(xdst, dev, fl);
  2128. rcu_read_unlock();
  2129. return err;
  2130. }
  2131. /* Allocate chain of dst_entry's, attach known xfrm's, calculate
  2132. * all the metrics... Shortly, bundle a bundle.
  2133. */
  2134. static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
  2135. struct xfrm_state **xfrm,
  2136. struct xfrm_dst **bundle,
  2137. int nx,
  2138. const struct flowi *fl,
  2139. struct dst_entry *dst)
  2140. {
  2141. const struct xfrm_state_afinfo *afinfo;
  2142. const struct xfrm_mode *inner_mode;
  2143. struct net *net = xp_net(policy);
  2144. unsigned long now = jiffies;
  2145. struct net_device *dev;
  2146. struct xfrm_dst *xdst_prev = NULL;
  2147. struct xfrm_dst *xdst0 = NULL;
  2148. int i = 0;
  2149. int err;
  2150. int header_len = 0;
  2151. int nfheader_len = 0;
  2152. int trailer_len = 0;
  2153. int tos;
  2154. int family = policy->selector.family;
  2155. xfrm_address_t saddr, daddr;
  2156. xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
  2157. tos = xfrm_get_tos(fl, family);
  2158. dst_hold(dst);
  2159. for (; i < nx; i++) {
  2160. struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
  2161. struct dst_entry *dst1 = &xdst->u.dst;
  2162. err = PTR_ERR(xdst);
  2163. if (IS_ERR(xdst)) {
  2164. dst_release(dst);
  2165. goto put_states;
  2166. }
  2167. bundle[i] = xdst;
  2168. if (!xdst_prev)
  2169. xdst0 = xdst;
  2170. else
  2171. /* Ref count is taken during xfrm_alloc_dst()
  2172. * No need to do dst_clone() on dst1
  2173. */
  2174. xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
  2175. if (xfrm[i]->sel.family == AF_UNSPEC) {
  2176. inner_mode = xfrm_ip2inner_mode(xfrm[i],
  2177. xfrm_af2proto(family));
  2178. if (!inner_mode) {
  2179. err = -EAFNOSUPPORT;
  2180. dst_release(dst);
  2181. goto put_states;
  2182. }
  2183. } else
  2184. inner_mode = &xfrm[i]->inner_mode;
  2185. xdst->route = dst;
  2186. dst_copy_metrics(dst1, dst);
  2187. if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
  2188. __u32 mark = 0;
  2189. if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
  2190. mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
  2191. family = xfrm[i]->props.family;
  2192. dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
  2193. &saddr, &daddr, family, mark);
  2194. err = PTR_ERR(dst);
  2195. if (IS_ERR(dst))
  2196. goto put_states;
  2197. } else
  2198. dst_hold(dst);
  2199. dst1->xfrm = xfrm[i];
  2200. xdst->xfrm_genid = xfrm[i]->genid;
  2201. dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
  2202. dst1->lastuse = now;
  2203. dst1->input = dst_discard;
  2204. rcu_read_lock();
  2205. afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
  2206. if (likely(afinfo))
  2207. dst1->output = afinfo->output;
  2208. else
  2209. dst1->output = dst_discard_out;
  2210. rcu_read_unlock();
  2211. xdst_prev = xdst;
  2212. header_len += xfrm[i]->props.header_len;
  2213. if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
  2214. nfheader_len += xfrm[i]->props.header_len;
  2215. trailer_len += xfrm[i]->props.trailer_len;
  2216. }
  2217. xfrm_dst_set_child(xdst_prev, dst);
  2218. xdst0->path = dst;
  2219. err = -ENODEV;
  2220. dev = dst->dev;
  2221. if (!dev)
  2222. goto free_dst;
  2223. xfrm_init_path(xdst0, dst, nfheader_len);
  2224. xfrm_init_pmtu(bundle, nx);
  2225. for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
  2226. xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
  2227. err = xfrm_fill_dst(xdst_prev, dev, fl);
  2228. if (err)
  2229. goto free_dst;
  2230. xdst_prev->u.dst.header_len = header_len;
  2231. xdst_prev->u.dst.trailer_len = trailer_len;
  2232. header_len -= xdst_prev->u.dst.xfrm->props.header_len;
  2233. trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
  2234. }
  2235. return &xdst0->u.dst;
  2236. put_states:
  2237. for (; i < nx; i++)
  2238. xfrm_state_put(xfrm[i]);
  2239. free_dst:
  2240. if (xdst0)
  2241. dst_release_immediate(&xdst0->u.dst);
  2242. return ERR_PTR(err);
  2243. }
  2244. static int xfrm_expand_policies(const struct flowi *fl, u16 family,
  2245. struct xfrm_policy **pols,
  2246. int *num_pols, int *num_xfrms)
  2247. {
  2248. int i;
  2249. if (*num_pols == 0 || !pols[0]) {
  2250. *num_pols = 0;
  2251. *num_xfrms = 0;
  2252. return 0;
  2253. }
  2254. if (IS_ERR(pols[0]))
  2255. return PTR_ERR(pols[0]);
  2256. *num_xfrms = pols[0]->xfrm_nr;
  2257. #ifdef CONFIG_XFRM_SUB_POLICY
  2258. if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
  2259. pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  2260. pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
  2261. XFRM_POLICY_TYPE_MAIN,
  2262. fl, family,
  2263. XFRM_POLICY_OUT,
  2264. pols[0]->if_id);
  2265. if (pols[1]) {
  2266. if (IS_ERR(pols[1])) {
  2267. xfrm_pols_put(pols, *num_pols);
  2268. return PTR_ERR(pols[1]);
  2269. }
  2270. (*num_pols)++;
  2271. (*num_xfrms) += pols[1]->xfrm_nr;
  2272. }
  2273. }
  2274. #endif
  2275. for (i = 0; i < *num_pols; i++) {
  2276. if (pols[i]->action != XFRM_POLICY_ALLOW) {
  2277. *num_xfrms = -1;
  2278. break;
  2279. }
  2280. }
  2281. return 0;
  2282. }
  2283. static struct xfrm_dst *
  2284. xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
  2285. const struct flowi *fl, u16 family,
  2286. struct dst_entry *dst_orig)
  2287. {
  2288. struct net *net = xp_net(pols[0]);
  2289. struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
  2290. struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
  2291. struct xfrm_dst *xdst;
  2292. struct dst_entry *dst;
  2293. int err;
  2294. /* Try to instantiate a bundle */
  2295. err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
  2296. if (err <= 0) {
  2297. if (err == 0)
  2298. return NULL;
  2299. if (err != -EAGAIN)
  2300. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  2301. return ERR_PTR(err);
  2302. }
  2303. dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
  2304. if (IS_ERR(dst)) {
  2305. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  2306. return ERR_CAST(dst);
  2307. }
  2308. xdst = (struct xfrm_dst *)dst;
  2309. xdst->num_xfrms = err;
  2310. xdst->num_pols = num_pols;
  2311. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
  2312. xdst->policy_genid = atomic_read(&pols[0]->genid);
  2313. return xdst;
  2314. }
  2315. static void xfrm_policy_queue_process(struct timer_list *t)
  2316. {
  2317. struct sk_buff *skb;
  2318. struct sock *sk;
  2319. struct dst_entry *dst;
  2320. struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
  2321. struct net *net = xp_net(pol);
  2322. struct xfrm_policy_queue *pq = &pol->polq;
  2323. struct flowi fl;
  2324. struct sk_buff_head list;
  2325. __u32 skb_mark;
  2326. spin_lock(&pq->hold_queue.lock);
  2327. skb = skb_peek(&pq->hold_queue);
  2328. if (!skb) {
  2329. spin_unlock(&pq->hold_queue.lock);
  2330. goto out;
  2331. }
  2332. dst = skb_dst(skb);
  2333. sk = skb->sk;
  2334. /* Fixup the mark to support VTI. */
  2335. skb_mark = skb->mark;
  2336. skb->mark = pol->mark.v;
  2337. xfrm_decode_session(skb, &fl, dst->ops->family);
  2338. skb->mark = skb_mark;
  2339. spin_unlock(&pq->hold_queue.lock);
  2340. dst_hold(xfrm_dst_path(dst));
  2341. dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
  2342. if (IS_ERR(dst))
  2343. goto purge_queue;
  2344. if (dst->flags & DST_XFRM_QUEUE) {
  2345. dst_release(dst);
  2346. if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
  2347. goto purge_queue;
  2348. pq->timeout = pq->timeout << 1;
  2349. if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
  2350. xfrm_pol_hold(pol);
  2351. goto out;
  2352. }
  2353. dst_release(dst);
  2354. __skb_queue_head_init(&list);
  2355. spin_lock(&pq->hold_queue.lock);
  2356. pq->timeout = 0;
  2357. skb_queue_splice_init(&pq->hold_queue, &list);
  2358. spin_unlock(&pq->hold_queue.lock);
  2359. while (!skb_queue_empty(&list)) {
  2360. skb = __skb_dequeue(&list);
  2361. /* Fixup the mark to support VTI. */
  2362. skb_mark = skb->mark;
  2363. skb->mark = pol->mark.v;
  2364. xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
  2365. skb->mark = skb_mark;
  2366. dst_hold(xfrm_dst_path(skb_dst(skb)));
  2367. dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
  2368. if (IS_ERR(dst)) {
  2369. kfree_skb(skb);
  2370. continue;
  2371. }
  2372. nf_reset_ct(skb);
  2373. skb_dst_drop(skb);
  2374. skb_dst_set(skb, dst);
  2375. dst_output(net, skb->sk, skb);
  2376. }
  2377. out:
  2378. xfrm_pol_put(pol);
  2379. return;
  2380. purge_queue:
  2381. pq->timeout = 0;
  2382. skb_queue_purge(&pq->hold_queue);
  2383. xfrm_pol_put(pol);
  2384. }
  2385. static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  2386. {
  2387. unsigned long sched_next;
  2388. struct dst_entry *dst = skb_dst(skb);
  2389. struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
  2390. struct xfrm_policy *pol = xdst->pols[0];
  2391. struct xfrm_policy_queue *pq = &pol->polq;
  2392. if (unlikely(skb_fclone_busy(sk, skb))) {
  2393. kfree_skb(skb);
  2394. return 0;
  2395. }
  2396. if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
  2397. kfree_skb(skb);
  2398. return -EAGAIN;
  2399. }
  2400. skb_dst_force(skb);
  2401. spin_lock_bh(&pq->hold_queue.lock);
  2402. if (!pq->timeout)
  2403. pq->timeout = XFRM_QUEUE_TMO_MIN;
  2404. sched_next = jiffies + pq->timeout;
  2405. if (del_timer(&pq->hold_timer)) {
  2406. if (time_before(pq->hold_timer.expires, sched_next))
  2407. sched_next = pq->hold_timer.expires;
  2408. xfrm_pol_put(pol);
  2409. }
  2410. __skb_queue_tail(&pq->hold_queue, skb);
  2411. if (!mod_timer(&pq->hold_timer, sched_next))
  2412. xfrm_pol_hold(pol);
  2413. spin_unlock_bh(&pq->hold_queue.lock);
  2414. return 0;
  2415. }
  2416. static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
  2417. struct xfrm_flo *xflo,
  2418. const struct flowi *fl,
  2419. int num_xfrms,
  2420. u16 family)
  2421. {
  2422. int err;
  2423. struct net_device *dev;
  2424. struct dst_entry *dst;
  2425. struct dst_entry *dst1;
  2426. struct xfrm_dst *xdst;
  2427. xdst = xfrm_alloc_dst(net, family);
  2428. if (IS_ERR(xdst))
  2429. return xdst;
  2430. if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
  2431. net->xfrm.sysctl_larval_drop ||
  2432. num_xfrms <= 0)
  2433. return xdst;
  2434. dst = xflo->dst_orig;
  2435. dst1 = &xdst->u.dst;
  2436. dst_hold(dst);
  2437. xdst->route = dst;
  2438. dst_copy_metrics(dst1, dst);
  2439. dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
  2440. dst1->flags |= DST_XFRM_QUEUE;
  2441. dst1->lastuse = jiffies;
  2442. dst1->input = dst_discard;
  2443. dst1->output = xdst_queue_output;
  2444. dst_hold(dst);
  2445. xfrm_dst_set_child(xdst, dst);
  2446. xdst->path = dst;
  2447. xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
  2448. err = -ENODEV;
  2449. dev = dst->dev;
  2450. if (!dev)
  2451. goto free_dst;
  2452. err = xfrm_fill_dst(xdst, dev, fl);
  2453. if (err)
  2454. goto free_dst;
  2455. out:
  2456. return xdst;
  2457. free_dst:
  2458. dst_release(dst1);
  2459. xdst = ERR_PTR(err);
  2460. goto out;
  2461. }
  2462. static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
  2463. const struct flowi *fl,
  2464. u16 family, u8 dir,
  2465. struct xfrm_flo *xflo, u32 if_id)
  2466. {
  2467. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  2468. int num_pols = 0, num_xfrms = 0, err;
  2469. struct xfrm_dst *xdst;
  2470. /* Resolve policies to use if we couldn't get them from
  2471. * previous cache entry */
  2472. num_pols = 1;
  2473. pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
  2474. err = xfrm_expand_policies(fl, family, pols,
  2475. &num_pols, &num_xfrms);
  2476. if (err < 0)
  2477. goto inc_error;
  2478. if (num_pols == 0)
  2479. return NULL;
  2480. if (num_xfrms <= 0)
  2481. goto make_dummy_bundle;
  2482. xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
  2483. xflo->dst_orig);
  2484. if (IS_ERR(xdst)) {
  2485. err = PTR_ERR(xdst);
  2486. if (err == -EREMOTE) {
  2487. xfrm_pols_put(pols, num_pols);
  2488. return NULL;
  2489. }
  2490. if (err != -EAGAIN)
  2491. goto error;
  2492. goto make_dummy_bundle;
  2493. } else if (xdst == NULL) {
  2494. num_xfrms = 0;
  2495. goto make_dummy_bundle;
  2496. }
  2497. return xdst;
  2498. make_dummy_bundle:
  2499. /* We found policies, but there's no bundles to instantiate:
  2500. * either because the policy blocks, has no transformations or
  2501. * we could not build template (no xfrm_states).*/
  2502. xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
  2503. if (IS_ERR(xdst)) {
  2504. xfrm_pols_put(pols, num_pols);
  2505. return ERR_CAST(xdst);
  2506. }
  2507. xdst->num_pols = num_pols;
  2508. xdst->num_xfrms = num_xfrms;
  2509. memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
  2510. return xdst;
  2511. inc_error:
  2512. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
  2513. error:
  2514. xfrm_pols_put(pols, num_pols);
  2515. return ERR_PTR(err);
  2516. }
  2517. static struct dst_entry *make_blackhole(struct net *net, u16 family,
  2518. struct dst_entry *dst_orig)
  2519. {
  2520. const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
  2521. struct dst_entry *ret;
  2522. if (!afinfo) {
  2523. dst_release(dst_orig);
  2524. return ERR_PTR(-EINVAL);
  2525. } else {
  2526. ret = afinfo->blackhole_route(net, dst_orig);
  2527. }
  2528. rcu_read_unlock();
  2529. return ret;
  2530. }
  2531. /* Finds/creates a bundle for given flow and if_id
  2532. *
  2533. * At the moment we eat a raw IP route. Mostly to speed up lookups
  2534. * on interfaces with disabled IPsec.
  2535. *
  2536. * xfrm_lookup uses an if_id of 0 by default, and is provided for
  2537. * compatibility
  2538. */
  2539. struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
  2540. struct dst_entry *dst_orig,
  2541. const struct flowi *fl,
  2542. const struct sock *sk,
  2543. int flags, u32 if_id)
  2544. {
  2545. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  2546. struct xfrm_dst *xdst;
  2547. struct dst_entry *dst, *route;
  2548. u16 family = dst_orig->ops->family;
  2549. u8 dir = XFRM_POLICY_OUT;
  2550. int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
  2551. dst = NULL;
  2552. xdst = NULL;
  2553. route = NULL;
  2554. sk = sk_const_to_full_sk(sk);
  2555. if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
  2556. num_pols = 1;
  2557. pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
  2558. if_id);
  2559. err = xfrm_expand_policies(fl, family, pols,
  2560. &num_pols, &num_xfrms);
  2561. if (err < 0)
  2562. goto dropdst;
  2563. if (num_pols) {
  2564. if (num_xfrms <= 0) {
  2565. drop_pols = num_pols;
  2566. goto no_transform;
  2567. }
  2568. xdst = xfrm_resolve_and_create_bundle(
  2569. pols, num_pols, fl,
  2570. family, dst_orig);
  2571. if (IS_ERR(xdst)) {
  2572. xfrm_pols_put(pols, num_pols);
  2573. err = PTR_ERR(xdst);
  2574. if (err == -EREMOTE)
  2575. goto nopol;
  2576. goto dropdst;
  2577. } else if (xdst == NULL) {
  2578. num_xfrms = 0;
  2579. drop_pols = num_pols;
  2580. goto no_transform;
  2581. }
  2582. route = xdst->route;
  2583. }
  2584. }
  2585. if (xdst == NULL) {
  2586. struct xfrm_flo xflo;
  2587. xflo.dst_orig = dst_orig;
  2588. xflo.flags = flags;
  2589. /* To accelerate a bit... */
  2590. if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
  2591. !net->xfrm.policy_count[XFRM_POLICY_OUT]))
  2592. goto nopol;
  2593. xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
  2594. if (xdst == NULL)
  2595. goto nopol;
  2596. if (IS_ERR(xdst)) {
  2597. err = PTR_ERR(xdst);
  2598. goto dropdst;
  2599. }
  2600. num_pols = xdst->num_pols;
  2601. num_xfrms = xdst->num_xfrms;
  2602. memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
  2603. route = xdst->route;
  2604. }
  2605. dst = &xdst->u.dst;
  2606. if (route == NULL && num_xfrms > 0) {
  2607. /* The only case when xfrm_bundle_lookup() returns a
  2608. * bundle with null route, is when the template could
  2609. * not be resolved. It means policies are there, but
  2610. * bundle could not be created, since we don't yet
  2611. * have the xfrm_state's. We need to wait for KM to
  2612. * negotiate new SA's or bail out with error.*/
  2613. if (net->xfrm.sysctl_larval_drop) {
  2614. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  2615. err = -EREMOTE;
  2616. goto error;
  2617. }
  2618. err = -EAGAIN;
  2619. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
  2620. goto error;
  2621. }
  2622. no_transform:
  2623. if (num_pols == 0)
  2624. goto nopol;
  2625. if ((flags & XFRM_LOOKUP_ICMP) &&
  2626. !(pols[0]->flags & XFRM_POLICY_ICMP)) {
  2627. err = -ENOENT;
  2628. goto error;
  2629. }
  2630. for (i = 0; i < num_pols; i++)
  2631. pols[i]->curlft.use_time = ktime_get_real_seconds();
  2632. if (num_xfrms < 0) {
  2633. /* Prohibit the flow */
  2634. XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
  2635. err = -EPERM;
  2636. goto error;
  2637. } else if (num_xfrms > 0) {
  2638. /* Flow transformed */
  2639. dst_release(dst_orig);
  2640. } else {
  2641. /* Flow passes untransformed */
  2642. dst_release(dst);
  2643. dst = dst_orig;
  2644. }
  2645. ok:
  2646. xfrm_pols_put(pols, drop_pols);
  2647. if (dst && dst->xfrm &&
  2648. dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
  2649. dst->flags |= DST_XFRM_TUNNEL;
  2650. return dst;
  2651. nopol:
  2652. if (!(flags & XFRM_LOOKUP_ICMP)) {
  2653. dst = dst_orig;
  2654. goto ok;
  2655. }
  2656. err = -ENOENT;
  2657. error:
  2658. dst_release(dst);
  2659. dropdst:
  2660. if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
  2661. dst_release(dst_orig);
  2662. xfrm_pols_put(pols, drop_pols);
  2663. return ERR_PTR(err);
  2664. }
  2665. EXPORT_SYMBOL(xfrm_lookup_with_ifid);
  2666. /* Main function: finds/creates a bundle for given flow.
  2667. *
  2668. * At the moment we eat a raw IP route. Mostly to speed up lookups
  2669. * on interfaces with disabled IPsec.
  2670. */
  2671. struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
  2672. const struct flowi *fl, const struct sock *sk,
  2673. int flags)
  2674. {
  2675. return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
  2676. }
  2677. EXPORT_SYMBOL(xfrm_lookup);
  2678. /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
  2679. * Otherwise we may send out blackholed packets.
  2680. */
  2681. struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
  2682. const struct flowi *fl,
  2683. const struct sock *sk, int flags)
  2684. {
  2685. struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
  2686. flags | XFRM_LOOKUP_QUEUE |
  2687. XFRM_LOOKUP_KEEP_DST_REF);
  2688. if (PTR_ERR(dst) == -EREMOTE)
  2689. return make_blackhole(net, dst_orig->ops->family, dst_orig);
  2690. if (IS_ERR(dst))
  2691. dst_release(dst_orig);
  2692. return dst;
  2693. }
  2694. EXPORT_SYMBOL(xfrm_lookup_route);
  2695. static inline int
  2696. xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
  2697. {
  2698. struct sec_path *sp = skb_sec_path(skb);
  2699. struct xfrm_state *x;
  2700. if (!sp || idx < 0 || idx >= sp->len)
  2701. return 0;
  2702. x = sp->xvec[idx];
  2703. if (!x->type->reject)
  2704. return 0;
  2705. return x->type->reject(x, skb, fl);
  2706. }
  2707. /* When skb is transformed back to its "native" form, we have to
  2708. * check policy restrictions. At the moment we make this in maximally
  2709. * stupid way. Shame on me. :-) Of course, connected sockets must
  2710. * have policy cached at them.
  2711. */
  2712. static inline int
  2713. xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
  2714. unsigned short family)
  2715. {
  2716. if (xfrm_state_kern(x))
  2717. return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
  2718. return x->id.proto == tmpl->id.proto &&
  2719. (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
  2720. (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
  2721. x->props.mode == tmpl->mode &&
  2722. (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
  2723. !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
  2724. !(x->props.mode != XFRM_MODE_TRANSPORT &&
  2725. xfrm_state_addr_cmp(tmpl, x, family));
  2726. }
  2727. /*
  2728. * 0 or more than 0 is returned when validation is succeeded (either bypass
  2729. * because of optional transport mode, or next index of the mathced secpath
  2730. * state with the template.
  2731. * -1 is returned when no matching template is found.
  2732. * Otherwise "-2 - errored_index" is returned.
  2733. */
  2734. static inline int
  2735. xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
  2736. unsigned short family)
  2737. {
  2738. int idx = start;
  2739. if (tmpl->optional) {
  2740. if (tmpl->mode == XFRM_MODE_TRANSPORT)
  2741. return start;
  2742. } else
  2743. start = -1;
  2744. for (; idx < sp->len; idx++) {
  2745. if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
  2746. return ++idx;
  2747. if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
  2748. if (start == -1)
  2749. start = -2-idx;
  2750. break;
  2751. }
  2752. }
  2753. return start;
  2754. }
  2755. static void
  2756. decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
  2757. {
  2758. const struct iphdr *iph = ip_hdr(skb);
  2759. int ihl = iph->ihl;
  2760. u8 *xprth = skb_network_header(skb) + ihl * 4;
  2761. struct flowi4 *fl4 = &fl->u.ip4;
  2762. int oif = 0;
  2763. if (skb_dst(skb) && skb_dst(skb)->dev)
  2764. oif = skb_dst(skb)->dev->ifindex;
  2765. memset(fl4, 0, sizeof(struct flowi4));
  2766. fl4->flowi4_mark = skb->mark;
  2767. fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
  2768. fl4->flowi4_proto = iph->protocol;
  2769. fl4->daddr = reverse ? iph->saddr : iph->daddr;
  2770. fl4->saddr = reverse ? iph->daddr : iph->saddr;
  2771. fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
  2772. if (!ip_is_fragment(iph)) {
  2773. switch (iph->protocol) {
  2774. case IPPROTO_UDP:
  2775. case IPPROTO_UDPLITE:
  2776. case IPPROTO_TCP:
  2777. case IPPROTO_SCTP:
  2778. case IPPROTO_DCCP:
  2779. if (xprth + 4 < skb->data ||
  2780. pskb_may_pull(skb, xprth + 4 - skb->data)) {
  2781. __be16 *ports;
  2782. xprth = skb_network_header(skb) + ihl * 4;
  2783. ports = (__be16 *)xprth;
  2784. fl4->fl4_sport = ports[!!reverse];
  2785. fl4->fl4_dport = ports[!reverse];
  2786. }
  2787. break;
  2788. case IPPROTO_ICMP:
  2789. if (xprth + 2 < skb->data ||
  2790. pskb_may_pull(skb, xprth + 2 - skb->data)) {
  2791. u8 *icmp;
  2792. xprth = skb_network_header(skb) + ihl * 4;
  2793. icmp = xprth;
  2794. fl4->fl4_icmp_type = icmp[0];
  2795. fl4->fl4_icmp_code = icmp[1];
  2796. }
  2797. break;
  2798. case IPPROTO_ESP:
  2799. if (xprth + 4 < skb->data ||
  2800. pskb_may_pull(skb, xprth + 4 - skb->data)) {
  2801. __be32 *ehdr;
  2802. xprth = skb_network_header(skb) + ihl * 4;
  2803. ehdr = (__be32 *)xprth;
  2804. fl4->fl4_ipsec_spi = ehdr[0];
  2805. }
  2806. break;
  2807. case IPPROTO_AH:
  2808. if (xprth + 8 < skb->data ||
  2809. pskb_may_pull(skb, xprth + 8 - skb->data)) {
  2810. __be32 *ah_hdr;
  2811. xprth = skb_network_header(skb) + ihl * 4;
  2812. ah_hdr = (__be32 *)xprth;
  2813. fl4->fl4_ipsec_spi = ah_hdr[1];
  2814. }
  2815. break;
  2816. case IPPROTO_COMP:
  2817. if (xprth + 4 < skb->data ||
  2818. pskb_may_pull(skb, xprth + 4 - skb->data)) {
  2819. __be16 *ipcomp_hdr;
  2820. xprth = skb_network_header(skb) + ihl * 4;
  2821. ipcomp_hdr = (__be16 *)xprth;
  2822. fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
  2823. }
  2824. break;
  2825. case IPPROTO_GRE:
  2826. if (xprth + 12 < skb->data ||
  2827. pskb_may_pull(skb, xprth + 12 - skb->data)) {
  2828. __be16 *greflags;
  2829. __be32 *gre_hdr;
  2830. xprth = skb_network_header(skb) + ihl * 4;
  2831. greflags = (__be16 *)xprth;
  2832. gre_hdr = (__be32 *)xprth;
  2833. if (greflags[0] & GRE_KEY) {
  2834. if (greflags[0] & GRE_CSUM)
  2835. gre_hdr++;
  2836. fl4->fl4_gre_key = gre_hdr[1];
  2837. }
  2838. }
  2839. break;
  2840. default:
  2841. fl4->fl4_ipsec_spi = 0;
  2842. break;
  2843. }
  2844. }
  2845. }
  2846. #if IS_ENABLED(CONFIG_IPV6)
  2847. static void
  2848. decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
  2849. {
  2850. struct flowi6 *fl6 = &fl->u.ip6;
  2851. int onlyproto = 0;
  2852. const struct ipv6hdr *hdr = ipv6_hdr(skb);
  2853. u32 offset = sizeof(*hdr);
  2854. struct ipv6_opt_hdr *exthdr;
  2855. const unsigned char *nh = skb_network_header(skb);
  2856. u16 nhoff = IP6CB(skb)->nhoff;
  2857. int oif = 0;
  2858. u8 nexthdr;
  2859. if (!nhoff)
  2860. nhoff = offsetof(struct ipv6hdr, nexthdr);
  2861. nexthdr = nh[nhoff];
  2862. if (skb_dst(skb) && skb_dst(skb)->dev)
  2863. oif = skb_dst(skb)->dev->ifindex;
  2864. memset(fl6, 0, sizeof(struct flowi6));
  2865. fl6->flowi6_mark = skb->mark;
  2866. fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
  2867. fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
  2868. fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
  2869. while (nh + offset + sizeof(*exthdr) < skb->data ||
  2870. pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
  2871. nh = skb_network_header(skb);
  2872. exthdr = (struct ipv6_opt_hdr *)(nh + offset);
  2873. switch (nexthdr) {
  2874. case NEXTHDR_FRAGMENT:
  2875. onlyproto = 1;
  2876. fallthrough;
  2877. case NEXTHDR_ROUTING:
  2878. case NEXTHDR_HOP:
  2879. case NEXTHDR_DEST:
  2880. offset += ipv6_optlen(exthdr);
  2881. nexthdr = exthdr->nexthdr;
  2882. exthdr = (struct ipv6_opt_hdr *)(nh + offset);
  2883. break;
  2884. case IPPROTO_UDP:
  2885. case IPPROTO_UDPLITE:
  2886. case IPPROTO_TCP:
  2887. case IPPROTO_SCTP:
  2888. case IPPROTO_DCCP:
  2889. if (!onlyproto && (nh + offset + 4 < skb->data ||
  2890. pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
  2891. __be16 *ports;
  2892. nh = skb_network_header(skb);
  2893. ports = (__be16 *)(nh + offset);
  2894. fl6->fl6_sport = ports[!!reverse];
  2895. fl6->fl6_dport = ports[!reverse];
  2896. }
  2897. fl6->flowi6_proto = nexthdr;
  2898. return;
  2899. case IPPROTO_ICMPV6:
  2900. if (!onlyproto && (nh + offset + 2 < skb->data ||
  2901. pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
  2902. u8 *icmp;
  2903. nh = skb_network_header(skb);
  2904. icmp = (u8 *)(nh + offset);
  2905. fl6->fl6_icmp_type = icmp[0];
  2906. fl6->fl6_icmp_code = icmp[1];
  2907. }
  2908. fl6->flowi6_proto = nexthdr;
  2909. return;
  2910. case IPPROTO_GRE:
  2911. if (!onlyproto &&
  2912. (nh + offset + 12 < skb->data ||
  2913. pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
  2914. struct gre_base_hdr *gre_hdr;
  2915. __be32 *gre_key;
  2916. nh = skb_network_header(skb);
  2917. gre_hdr = (struct gre_base_hdr *)(nh + offset);
  2918. gre_key = (__be32 *)(gre_hdr + 1);
  2919. if (gre_hdr->flags & GRE_KEY) {
  2920. if (gre_hdr->flags & GRE_CSUM)
  2921. gre_key++;
  2922. fl6->fl6_gre_key = *gre_key;
  2923. }
  2924. }
  2925. fl6->flowi6_proto = nexthdr;
  2926. return;
  2927. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  2928. case IPPROTO_MH:
  2929. offset += ipv6_optlen(exthdr);
  2930. if (!onlyproto && (nh + offset + 3 < skb->data ||
  2931. pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
  2932. struct ip6_mh *mh;
  2933. nh = skb_network_header(skb);
  2934. mh = (struct ip6_mh *)(nh + offset);
  2935. fl6->fl6_mh_type = mh->ip6mh_type;
  2936. }
  2937. fl6->flowi6_proto = nexthdr;
  2938. return;
  2939. #endif
  2940. /* XXX Why are there these headers? */
  2941. case IPPROTO_AH:
  2942. case IPPROTO_ESP:
  2943. case IPPROTO_COMP:
  2944. default:
  2945. fl6->fl6_ipsec_spi = 0;
  2946. fl6->flowi6_proto = nexthdr;
  2947. return;
  2948. }
  2949. }
  2950. }
  2951. #endif
  2952. int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
  2953. unsigned int family, int reverse)
  2954. {
  2955. switch (family) {
  2956. case AF_INET:
  2957. decode_session4(skb, fl, reverse);
  2958. break;
  2959. #if IS_ENABLED(CONFIG_IPV6)
  2960. case AF_INET6:
  2961. decode_session6(skb, fl, reverse);
  2962. break;
  2963. #endif
  2964. default:
  2965. return -EAFNOSUPPORT;
  2966. }
  2967. return security_xfrm_decode_session(skb, &fl->flowi_secid);
  2968. }
  2969. EXPORT_SYMBOL(__xfrm_decode_session);
  2970. static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
  2971. {
  2972. for (; k < sp->len; k++) {
  2973. if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
  2974. *idxp = k;
  2975. return 1;
  2976. }
  2977. }
  2978. return 0;
  2979. }
  2980. int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
  2981. unsigned short family)
  2982. {
  2983. struct net *net = dev_net(skb->dev);
  2984. struct xfrm_policy *pol;
  2985. struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
  2986. int npols = 0;
  2987. int xfrm_nr;
  2988. int pi;
  2989. int reverse;
  2990. struct flowi fl;
  2991. int xerr_idx = -1;
  2992. const struct xfrm_if_cb *ifcb;
  2993. struct sec_path *sp;
  2994. struct xfrm_if *xi;
  2995. u32 if_id = 0;
  2996. rcu_read_lock();
  2997. ifcb = xfrm_if_get_cb();
  2998. if (ifcb) {
  2999. xi = ifcb->decode_session(skb, family);
  3000. if (xi) {
  3001. if_id = xi->p.if_id;
  3002. net = xi->net;
  3003. }
  3004. }
  3005. rcu_read_unlock();
  3006. reverse = dir & ~XFRM_POLICY_MASK;
  3007. dir &= XFRM_POLICY_MASK;
  3008. if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
  3009. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  3010. return 0;
  3011. }
  3012. nf_nat_decode_session(skb, &fl, family);
  3013. /* First, check used SA against their selectors. */
  3014. sp = skb_sec_path(skb);
  3015. if (sp) {
  3016. int i;
  3017. for (i = sp->len - 1; i >= 0; i--) {
  3018. struct xfrm_state *x = sp->xvec[i];
  3019. if (!xfrm_selector_match(&x->sel, &fl, family)) {
  3020. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
  3021. return 0;
  3022. }
  3023. }
  3024. }
  3025. pol = NULL;
  3026. sk = sk_to_full_sk(sk);
  3027. if (sk && sk->sk_policy[dir]) {
  3028. pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
  3029. if (IS_ERR(pol)) {
  3030. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  3031. return 0;
  3032. }
  3033. }
  3034. if (!pol)
  3035. pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
  3036. if (IS_ERR(pol)) {
  3037. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  3038. return 0;
  3039. }
  3040. if (!pol) {
  3041. if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
  3042. xfrm_secpath_reject(xerr_idx, skb, &fl);
  3043. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
  3044. return 0;
  3045. }
  3046. return 1;
  3047. }
  3048. pol->curlft.use_time = ktime_get_real_seconds();
  3049. pols[0] = pol;
  3050. npols++;
  3051. #ifdef CONFIG_XFRM_SUB_POLICY
  3052. if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
  3053. pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
  3054. &fl, family,
  3055. XFRM_POLICY_IN, if_id);
  3056. if (pols[1]) {
  3057. if (IS_ERR(pols[1])) {
  3058. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
  3059. return 0;
  3060. }
  3061. pols[1]->curlft.use_time = ktime_get_real_seconds();
  3062. npols++;
  3063. }
  3064. }
  3065. #endif
  3066. if (pol->action == XFRM_POLICY_ALLOW) {
  3067. static struct sec_path dummy;
  3068. struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
  3069. struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
  3070. struct xfrm_tmpl **tpp = tp;
  3071. int ti = 0;
  3072. int i, k;
  3073. sp = skb_sec_path(skb);
  3074. if (!sp)
  3075. sp = &dummy;
  3076. for (pi = 0; pi < npols; pi++) {
  3077. if (pols[pi] != pol &&
  3078. pols[pi]->action != XFRM_POLICY_ALLOW) {
  3079. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  3080. goto reject;
  3081. }
  3082. if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
  3083. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  3084. goto reject_error;
  3085. }
  3086. for (i = 0; i < pols[pi]->xfrm_nr; i++)
  3087. tpp[ti++] = &pols[pi]->xfrm_vec[i];
  3088. }
  3089. xfrm_nr = ti;
  3090. if (npols > 1) {
  3091. xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
  3092. tpp = stp;
  3093. }
  3094. /* For each tunnel xfrm, find the first matching tmpl.
  3095. * For each tmpl before that, find corresponding xfrm.
  3096. * Order is _important_. Later we will implement
  3097. * some barriers, but at the moment barriers
  3098. * are implied between each two transformations.
  3099. */
  3100. for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
  3101. k = xfrm_policy_ok(tpp[i], sp, k, family);
  3102. if (k < 0) {
  3103. if (k < -1)
  3104. /* "-2 - errored_index" returned */
  3105. xerr_idx = -(2+k);
  3106. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  3107. goto reject;
  3108. }
  3109. }
  3110. if (secpath_has_nontransport(sp, k, &xerr_idx)) {
  3111. XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
  3112. goto reject;
  3113. }
  3114. xfrm_pols_put(pols, npols);
  3115. return 1;
  3116. }
  3117. XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
  3118. reject:
  3119. xfrm_secpath_reject(xerr_idx, skb, &fl);
  3120. reject_error:
  3121. xfrm_pols_put(pols, npols);
  3122. return 0;
  3123. }
  3124. EXPORT_SYMBOL(__xfrm_policy_check);
  3125. int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
  3126. {
  3127. struct net *net = dev_net(skb->dev);
  3128. struct flowi fl;
  3129. struct dst_entry *dst;
  3130. int res = 1;
  3131. if (xfrm_decode_session(skb, &fl, family) < 0) {
  3132. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  3133. return 0;
  3134. }
  3135. skb_dst_force(skb);
  3136. if (!skb_dst(skb)) {
  3137. XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
  3138. return 0;
  3139. }
  3140. dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
  3141. if (IS_ERR(dst)) {
  3142. res = 0;
  3143. dst = NULL;
  3144. }
  3145. skb_dst_set(skb, dst);
  3146. return res;
  3147. }
  3148. EXPORT_SYMBOL(__xfrm_route_forward);
  3149. /* Optimize later using cookies and generation ids. */
  3150. static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
  3151. {
  3152. /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
  3153. * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
  3154. * get validated by dst_ops->check on every use. We do this
  3155. * because when a normal route referenced by an XFRM dst is
  3156. * obsoleted we do not go looking around for all parent
  3157. * referencing XFRM dsts so that we can invalidate them. It
  3158. * is just too much work. Instead we make the checks here on
  3159. * every use. For example:
  3160. *
  3161. * XFRM dst A --> IPv4 dst X
  3162. *
  3163. * X is the "xdst->route" of A (X is also the "dst->path" of A
  3164. * in this example). If X is marked obsolete, "A" will not
  3165. * notice. That's what we are validating here via the
  3166. * stale_bundle() check.
  3167. *
  3168. * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
  3169. * be marked on it.
  3170. * This will force stale_bundle() to fail on any xdst bundle with
  3171. * this dst linked in it.
  3172. */
  3173. if (dst->obsolete < 0 && !stale_bundle(dst))
  3174. return dst;
  3175. return NULL;
  3176. }
  3177. static int stale_bundle(struct dst_entry *dst)
  3178. {
  3179. return !xfrm_bundle_ok((struct xfrm_dst *)dst);
  3180. }
  3181. void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  3182. {
  3183. while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
  3184. dst->dev = dev_net(dev)->loopback_dev;
  3185. dev_hold(dst->dev);
  3186. dev_put(dev);
  3187. }
  3188. }
  3189. EXPORT_SYMBOL(xfrm_dst_ifdown);
  3190. static void xfrm_link_failure(struct sk_buff *skb)
  3191. {
  3192. /* Impossible. Such dst must be popped before reaches point of failure. */
  3193. }
  3194. static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
  3195. {
  3196. if (dst) {
  3197. if (dst->obsolete) {
  3198. dst_release(dst);
  3199. dst = NULL;
  3200. }
  3201. }
  3202. return dst;
  3203. }
  3204. static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
  3205. {
  3206. while (nr--) {
  3207. struct xfrm_dst *xdst = bundle[nr];
  3208. u32 pmtu, route_mtu_cached;
  3209. struct dst_entry *dst;
  3210. dst = &xdst->u.dst;
  3211. pmtu = dst_mtu(xfrm_dst_child(dst));
  3212. xdst->child_mtu_cached = pmtu;
  3213. pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
  3214. route_mtu_cached = dst_mtu(xdst->route);
  3215. xdst->route_mtu_cached = route_mtu_cached;
  3216. if (pmtu > route_mtu_cached)
  3217. pmtu = route_mtu_cached;
  3218. dst_metric_set(dst, RTAX_MTU, pmtu);
  3219. }
  3220. }
  3221. /* Check that the bundle accepts the flow and its components are
  3222. * still valid.
  3223. */
  3224. static int xfrm_bundle_ok(struct xfrm_dst *first)
  3225. {
  3226. struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
  3227. struct dst_entry *dst = &first->u.dst;
  3228. struct xfrm_dst *xdst;
  3229. int start_from, nr;
  3230. u32 mtu;
  3231. if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
  3232. (dst->dev && !netif_running(dst->dev)))
  3233. return 0;
  3234. if (dst->flags & DST_XFRM_QUEUE)
  3235. return 1;
  3236. start_from = nr = 0;
  3237. do {
  3238. struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  3239. if (dst->xfrm->km.state != XFRM_STATE_VALID)
  3240. return 0;
  3241. if (xdst->xfrm_genid != dst->xfrm->genid)
  3242. return 0;
  3243. if (xdst->num_pols > 0 &&
  3244. xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
  3245. return 0;
  3246. bundle[nr++] = xdst;
  3247. mtu = dst_mtu(xfrm_dst_child(dst));
  3248. if (xdst->child_mtu_cached != mtu) {
  3249. start_from = nr;
  3250. xdst->child_mtu_cached = mtu;
  3251. }
  3252. if (!dst_check(xdst->route, xdst->route_cookie))
  3253. return 0;
  3254. mtu = dst_mtu(xdst->route);
  3255. if (xdst->route_mtu_cached != mtu) {
  3256. start_from = nr;
  3257. xdst->route_mtu_cached = mtu;
  3258. }
  3259. dst = xfrm_dst_child(dst);
  3260. } while (dst->xfrm);
  3261. if (likely(!start_from))
  3262. return 1;
  3263. xdst = bundle[start_from - 1];
  3264. mtu = xdst->child_mtu_cached;
  3265. while (start_from--) {
  3266. dst = &xdst->u.dst;
  3267. mtu = xfrm_state_mtu(dst->xfrm, mtu);
  3268. if (mtu > xdst->route_mtu_cached)
  3269. mtu = xdst->route_mtu_cached;
  3270. dst_metric_set(dst, RTAX_MTU, mtu);
  3271. if (!start_from)
  3272. break;
  3273. xdst = bundle[start_from - 1];
  3274. xdst->child_mtu_cached = mtu;
  3275. }
  3276. return 1;
  3277. }
  3278. static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
  3279. {
  3280. return dst_metric_advmss(xfrm_dst_path(dst));
  3281. }
  3282. static unsigned int xfrm_mtu(const struct dst_entry *dst)
  3283. {
  3284. unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
  3285. return mtu ? : dst_mtu(xfrm_dst_path(dst));
  3286. }
  3287. static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
  3288. const void *daddr)
  3289. {
  3290. while (dst->xfrm) {
  3291. const struct xfrm_state *xfrm = dst->xfrm;
  3292. dst = xfrm_dst_child(dst);
  3293. if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
  3294. continue;
  3295. if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
  3296. daddr = xfrm->coaddr;
  3297. else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
  3298. daddr = &xfrm->id.daddr;
  3299. }
  3300. return daddr;
  3301. }
  3302. static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
  3303. struct sk_buff *skb,
  3304. const void *daddr)
  3305. {
  3306. const struct dst_entry *path = xfrm_dst_path(dst);
  3307. if (!skb)
  3308. daddr = xfrm_get_dst_nexthop(dst, daddr);
  3309. return path->ops->neigh_lookup(path, skb, daddr);
  3310. }
  3311. static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
  3312. {
  3313. const struct dst_entry *path = xfrm_dst_path(dst);
  3314. daddr = xfrm_get_dst_nexthop(dst, daddr);
  3315. path->ops->confirm_neigh(path, daddr);
  3316. }
  3317. int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
  3318. {
  3319. int err = 0;
  3320. if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
  3321. return -EAFNOSUPPORT;
  3322. spin_lock(&xfrm_policy_afinfo_lock);
  3323. if (unlikely(xfrm_policy_afinfo[family] != NULL))
  3324. err = -EEXIST;
  3325. else {
  3326. struct dst_ops *dst_ops = afinfo->dst_ops;
  3327. if (likely(dst_ops->kmem_cachep == NULL))
  3328. dst_ops->kmem_cachep = xfrm_dst_cache;
  3329. if (likely(dst_ops->check == NULL))
  3330. dst_ops->check = xfrm_dst_check;
  3331. if (likely(dst_ops->default_advmss == NULL))
  3332. dst_ops->default_advmss = xfrm_default_advmss;
  3333. if (likely(dst_ops->mtu == NULL))
  3334. dst_ops->mtu = xfrm_mtu;
  3335. if (likely(dst_ops->negative_advice == NULL))
  3336. dst_ops->negative_advice = xfrm_negative_advice;
  3337. if (likely(dst_ops->link_failure == NULL))
  3338. dst_ops->link_failure = xfrm_link_failure;
  3339. if (likely(dst_ops->neigh_lookup == NULL))
  3340. dst_ops->neigh_lookup = xfrm_neigh_lookup;
  3341. if (likely(!dst_ops->confirm_neigh))
  3342. dst_ops->confirm_neigh = xfrm_confirm_neigh;
  3343. rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
  3344. }
  3345. spin_unlock(&xfrm_policy_afinfo_lock);
  3346. return err;
  3347. }
  3348. EXPORT_SYMBOL(xfrm_policy_register_afinfo);
  3349. void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
  3350. {
  3351. struct dst_ops *dst_ops = afinfo->dst_ops;
  3352. int i;
  3353. for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
  3354. if (xfrm_policy_afinfo[i] != afinfo)
  3355. continue;
  3356. RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
  3357. break;
  3358. }
  3359. synchronize_rcu();
  3360. dst_ops->kmem_cachep = NULL;
  3361. dst_ops->check = NULL;
  3362. dst_ops->negative_advice = NULL;
  3363. dst_ops->link_failure = NULL;
  3364. }
  3365. EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
  3366. void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
  3367. {
  3368. spin_lock(&xfrm_if_cb_lock);
  3369. rcu_assign_pointer(xfrm_if_cb, ifcb);
  3370. spin_unlock(&xfrm_if_cb_lock);
  3371. }
  3372. EXPORT_SYMBOL(xfrm_if_register_cb);
  3373. void xfrm_if_unregister_cb(void)
  3374. {
  3375. RCU_INIT_POINTER(xfrm_if_cb, NULL);
  3376. synchronize_rcu();
  3377. }
  3378. EXPORT_SYMBOL(xfrm_if_unregister_cb);
  3379. #ifdef CONFIG_XFRM_STATISTICS
  3380. static int __net_init xfrm_statistics_init(struct net *net)
  3381. {
  3382. int rv;
  3383. net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
  3384. if (!net->mib.xfrm_statistics)
  3385. return -ENOMEM;
  3386. rv = xfrm_proc_init(net);
  3387. if (rv < 0)
  3388. free_percpu(net->mib.xfrm_statistics);
  3389. return rv;
  3390. }
  3391. static void xfrm_statistics_fini(struct net *net)
  3392. {
  3393. xfrm_proc_fini(net);
  3394. free_percpu(net->mib.xfrm_statistics);
  3395. }
  3396. #else
  3397. static int __net_init xfrm_statistics_init(struct net *net)
  3398. {
  3399. return 0;
  3400. }
  3401. static void xfrm_statistics_fini(struct net *net)
  3402. {
  3403. }
  3404. #endif
  3405. static int __net_init xfrm_policy_init(struct net *net)
  3406. {
  3407. unsigned int hmask, sz;
  3408. int dir, err;
  3409. if (net_eq(net, &init_net)) {
  3410. xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
  3411. sizeof(struct xfrm_dst),
  3412. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  3413. NULL);
  3414. err = rhashtable_init(&xfrm_policy_inexact_table,
  3415. &xfrm_pol_inexact_params);
  3416. BUG_ON(err);
  3417. }
  3418. hmask = 8 - 1;
  3419. sz = (hmask+1) * sizeof(struct hlist_head);
  3420. net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
  3421. if (!net->xfrm.policy_byidx)
  3422. goto out_byidx;
  3423. net->xfrm.policy_idx_hmask = hmask;
  3424. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  3425. struct xfrm_policy_hash *htab;
  3426. net->xfrm.policy_count[dir] = 0;
  3427. net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
  3428. INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
  3429. htab = &net->xfrm.policy_bydst[dir];
  3430. htab->table = xfrm_hash_alloc(sz);
  3431. if (!htab->table)
  3432. goto out_bydst;
  3433. htab->hmask = hmask;
  3434. htab->dbits4 = 32;
  3435. htab->sbits4 = 32;
  3436. htab->dbits6 = 128;
  3437. htab->sbits6 = 128;
  3438. }
  3439. net->xfrm.policy_hthresh.lbits4 = 32;
  3440. net->xfrm.policy_hthresh.rbits4 = 32;
  3441. net->xfrm.policy_hthresh.lbits6 = 128;
  3442. net->xfrm.policy_hthresh.rbits6 = 128;
  3443. seqlock_init(&net->xfrm.policy_hthresh.lock);
  3444. INIT_LIST_HEAD(&net->xfrm.policy_all);
  3445. INIT_LIST_HEAD(&net->xfrm.inexact_bins);
  3446. INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
  3447. INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
  3448. return 0;
  3449. out_bydst:
  3450. for (dir--; dir >= 0; dir--) {
  3451. struct xfrm_policy_hash *htab;
  3452. htab = &net->xfrm.policy_bydst[dir];
  3453. xfrm_hash_free(htab->table, sz);
  3454. }
  3455. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  3456. out_byidx:
  3457. return -ENOMEM;
  3458. }
  3459. static void xfrm_policy_fini(struct net *net)
  3460. {
  3461. struct xfrm_pol_inexact_bin *b, *t;
  3462. unsigned int sz;
  3463. int dir;
  3464. flush_work(&net->xfrm.policy_hash_work);
  3465. #ifdef CONFIG_XFRM_SUB_POLICY
  3466. xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
  3467. #endif
  3468. xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
  3469. WARN_ON(!list_empty(&net->xfrm.policy_all));
  3470. for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
  3471. struct xfrm_policy_hash *htab;
  3472. WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
  3473. htab = &net->xfrm.policy_bydst[dir];
  3474. sz = (htab->hmask + 1) * sizeof(struct hlist_head);
  3475. WARN_ON(!hlist_empty(htab->table));
  3476. xfrm_hash_free(htab->table, sz);
  3477. }
  3478. sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
  3479. WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
  3480. xfrm_hash_free(net->xfrm.policy_byidx, sz);
  3481. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  3482. list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
  3483. __xfrm_policy_inexact_prune_bin(b, true);
  3484. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  3485. }
  3486. static int __net_init xfrm_net_init(struct net *net)
  3487. {
  3488. int rv;
  3489. /* Initialize the per-net locks here */
  3490. spin_lock_init(&net->xfrm.xfrm_state_lock);
  3491. spin_lock_init(&net->xfrm.xfrm_policy_lock);
  3492. mutex_init(&net->xfrm.xfrm_cfg_mutex);
  3493. rv = xfrm_statistics_init(net);
  3494. if (rv < 0)
  3495. goto out_statistics;
  3496. rv = xfrm_state_init(net);
  3497. if (rv < 0)
  3498. goto out_state;
  3499. rv = xfrm_policy_init(net);
  3500. if (rv < 0)
  3501. goto out_policy;
  3502. rv = xfrm_sysctl_init(net);
  3503. if (rv < 0)
  3504. goto out_sysctl;
  3505. return 0;
  3506. out_sysctl:
  3507. xfrm_policy_fini(net);
  3508. out_policy:
  3509. xfrm_state_fini(net);
  3510. out_state:
  3511. xfrm_statistics_fini(net);
  3512. out_statistics:
  3513. return rv;
  3514. }
  3515. static void __net_exit xfrm_net_exit(struct net *net)
  3516. {
  3517. xfrm_sysctl_fini(net);
  3518. xfrm_policy_fini(net);
  3519. xfrm_state_fini(net);
  3520. xfrm_statistics_fini(net);
  3521. }
  3522. static struct pernet_operations __net_initdata xfrm_net_ops = {
  3523. .init = xfrm_net_init,
  3524. .exit = xfrm_net_exit,
  3525. };
  3526. void __init xfrm_init(void)
  3527. {
  3528. register_pernet_subsys(&xfrm_net_ops);
  3529. xfrm_dev_init();
  3530. seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
  3531. xfrm_input_init();
  3532. #ifdef CONFIG_XFRM_ESPINTCP
  3533. espintcp_init();
  3534. #endif
  3535. RCU_INIT_POINTER(xfrm_if_cb, NULL);
  3536. synchronize_rcu();
  3537. }
  3538. #ifdef CONFIG_AUDITSYSCALL
  3539. static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
  3540. struct audit_buffer *audit_buf)
  3541. {
  3542. struct xfrm_sec_ctx *ctx = xp->security;
  3543. struct xfrm_selector *sel = &xp->selector;
  3544. if (ctx)
  3545. audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
  3546. ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
  3547. switch (sel->family) {
  3548. case AF_INET:
  3549. audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
  3550. if (sel->prefixlen_s != 32)
  3551. audit_log_format(audit_buf, " src_prefixlen=%d",
  3552. sel->prefixlen_s);
  3553. audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
  3554. if (sel->prefixlen_d != 32)
  3555. audit_log_format(audit_buf, " dst_prefixlen=%d",
  3556. sel->prefixlen_d);
  3557. break;
  3558. case AF_INET6:
  3559. audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
  3560. if (sel->prefixlen_s != 128)
  3561. audit_log_format(audit_buf, " src_prefixlen=%d",
  3562. sel->prefixlen_s);
  3563. audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
  3564. if (sel->prefixlen_d != 128)
  3565. audit_log_format(audit_buf, " dst_prefixlen=%d",
  3566. sel->prefixlen_d);
  3567. break;
  3568. }
  3569. }
  3570. void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
  3571. {
  3572. struct audit_buffer *audit_buf;
  3573. audit_buf = xfrm_audit_start("SPD-add");
  3574. if (audit_buf == NULL)
  3575. return;
  3576. xfrm_audit_helper_usrinfo(task_valid, audit_buf);
  3577. audit_log_format(audit_buf, " res=%u", result);
  3578. xfrm_audit_common_policyinfo(xp, audit_buf);
  3579. audit_log_end(audit_buf);
  3580. }
  3581. EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
  3582. void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
  3583. bool task_valid)
  3584. {
  3585. struct audit_buffer *audit_buf;
  3586. audit_buf = xfrm_audit_start("SPD-delete");
  3587. if (audit_buf == NULL)
  3588. return;
  3589. xfrm_audit_helper_usrinfo(task_valid, audit_buf);
  3590. audit_log_format(audit_buf, " res=%u", result);
  3591. xfrm_audit_common_policyinfo(xp, audit_buf);
  3592. audit_log_end(audit_buf);
  3593. }
  3594. EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
  3595. #endif
  3596. #ifdef CONFIG_XFRM_MIGRATE
  3597. static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
  3598. const struct xfrm_selector *sel_tgt)
  3599. {
  3600. if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
  3601. if (sel_tgt->family == sel_cmp->family &&
  3602. xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
  3603. sel_cmp->family) &&
  3604. xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
  3605. sel_cmp->family) &&
  3606. sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
  3607. sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
  3608. return true;
  3609. }
  3610. } else {
  3611. if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
  3612. return true;
  3613. }
  3614. }
  3615. return false;
  3616. }
  3617. static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
  3618. u8 dir, u8 type, struct net *net, u32 if_id)
  3619. {
  3620. struct xfrm_policy *pol, *ret = NULL;
  3621. struct hlist_head *chain;
  3622. u32 priority = ~0U;
  3623. spin_lock_bh(&net->xfrm.xfrm_policy_lock);
  3624. chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
  3625. hlist_for_each_entry(pol, chain, bydst) {
  3626. if ((if_id == 0 || pol->if_id == if_id) &&
  3627. xfrm_migrate_selector_match(sel, &pol->selector) &&
  3628. pol->type == type) {
  3629. ret = pol;
  3630. priority = ret->priority;
  3631. break;
  3632. }
  3633. }
  3634. chain = &net->xfrm.policy_inexact[dir];
  3635. hlist_for_each_entry(pol, chain, bydst_inexact_list) {
  3636. if ((pol->priority >= priority) && ret)
  3637. break;
  3638. if ((if_id == 0 || pol->if_id == if_id) &&
  3639. xfrm_migrate_selector_match(sel, &pol->selector) &&
  3640. pol->type == type) {
  3641. ret = pol;
  3642. break;
  3643. }
  3644. }
  3645. xfrm_pol_hold(ret);
  3646. spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
  3647. return ret;
  3648. }
  3649. static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
  3650. {
  3651. int match = 0;
  3652. if (t->mode == m->mode && t->id.proto == m->proto &&
  3653. (m->reqid == 0 || t->reqid == m->reqid)) {
  3654. switch (t->mode) {
  3655. case XFRM_MODE_TUNNEL:
  3656. case XFRM_MODE_BEET:
  3657. if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
  3658. m->old_family) &&
  3659. xfrm_addr_equal(&t->saddr, &m->old_saddr,
  3660. m->old_family)) {
  3661. match = 1;
  3662. }
  3663. break;
  3664. case XFRM_MODE_TRANSPORT:
  3665. /* in case of transport mode, template does not store
  3666. any IP addresses, hence we just compare mode and
  3667. protocol */
  3668. match = 1;
  3669. break;
  3670. default:
  3671. break;
  3672. }
  3673. }
  3674. return match;
  3675. }
  3676. /* update endpoint address(es) of template(s) */
  3677. static int xfrm_policy_migrate(struct xfrm_policy *pol,
  3678. struct xfrm_migrate *m, int num_migrate)
  3679. {
  3680. struct xfrm_migrate *mp;
  3681. int i, j, n = 0;
  3682. write_lock_bh(&pol->lock);
  3683. if (unlikely(pol->walk.dead)) {
  3684. /* target policy has been deleted */
  3685. write_unlock_bh(&pol->lock);
  3686. return -ENOENT;
  3687. }
  3688. for (i = 0; i < pol->xfrm_nr; i++) {
  3689. for (j = 0, mp = m; j < num_migrate; j++, mp++) {
  3690. if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
  3691. continue;
  3692. n++;
  3693. if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
  3694. pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
  3695. continue;
  3696. /* update endpoints */
  3697. memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
  3698. sizeof(pol->xfrm_vec[i].id.daddr));
  3699. memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
  3700. sizeof(pol->xfrm_vec[i].saddr));
  3701. pol->xfrm_vec[i].encap_family = mp->new_family;
  3702. /* flush bundles */
  3703. atomic_inc(&pol->genid);
  3704. }
  3705. }
  3706. write_unlock_bh(&pol->lock);
  3707. if (!n)
  3708. return -ENODATA;
  3709. return 0;
  3710. }
  3711. static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
  3712. {
  3713. int i, j;
  3714. if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
  3715. return -EINVAL;
  3716. for (i = 0; i < num_migrate; i++) {
  3717. if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
  3718. xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
  3719. return -EINVAL;
  3720. /* check if there is any duplicated entry */
  3721. for (j = i + 1; j < num_migrate; j++) {
  3722. if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
  3723. sizeof(m[i].old_daddr)) &&
  3724. !memcmp(&m[i].old_saddr, &m[j].old_saddr,
  3725. sizeof(m[i].old_saddr)) &&
  3726. m[i].proto == m[j].proto &&
  3727. m[i].mode == m[j].mode &&
  3728. m[i].reqid == m[j].reqid &&
  3729. m[i].old_family == m[j].old_family)
  3730. return -EINVAL;
  3731. }
  3732. }
  3733. return 0;
  3734. }
  3735. int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
  3736. struct xfrm_migrate *m, int num_migrate,
  3737. struct xfrm_kmaddress *k, struct net *net,
  3738. struct xfrm_encap_tmpl *encap, u32 if_id)
  3739. {
  3740. int i, err, nx_cur = 0, nx_new = 0;
  3741. struct xfrm_policy *pol = NULL;
  3742. struct xfrm_state *x, *xc;
  3743. struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
  3744. struct xfrm_state *x_new[XFRM_MAX_DEPTH];
  3745. struct xfrm_migrate *mp;
  3746. /* Stage 0 - sanity checks */
  3747. if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
  3748. goto out;
  3749. if (dir >= XFRM_POLICY_MAX) {
  3750. err = -EINVAL;
  3751. goto out;
  3752. }
  3753. /* Stage 1 - find policy */
  3754. if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) {
  3755. err = -ENOENT;
  3756. goto out;
  3757. }
  3758. /* Stage 2 - find and update state(s) */
  3759. for (i = 0, mp = m; i < num_migrate; i++, mp++) {
  3760. if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
  3761. x_cur[nx_cur] = x;
  3762. nx_cur++;
  3763. xc = xfrm_state_migrate(x, mp, encap);
  3764. if (xc) {
  3765. x_new[nx_new] = xc;
  3766. nx_new++;
  3767. } else {
  3768. err = -ENODATA;
  3769. goto restore_state;
  3770. }
  3771. }
  3772. }
  3773. /* Stage 3 - update policy */
  3774. if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
  3775. goto restore_state;
  3776. /* Stage 4 - delete old state(s) */
  3777. if (nx_cur) {
  3778. xfrm_states_put(x_cur, nx_cur);
  3779. xfrm_states_delete(x_cur, nx_cur);
  3780. }
  3781. /* Stage 5 - announce */
  3782. km_migrate(sel, dir, type, m, num_migrate, k, encap);
  3783. xfrm_pol_put(pol);
  3784. return 0;
  3785. out:
  3786. return err;
  3787. restore_state:
  3788. if (pol)
  3789. xfrm_pol_put(pol);
  3790. if (nx_cur)
  3791. xfrm_states_put(x_cur, nx_cur);
  3792. if (nx_new)
  3793. xfrm_states_delete(x_new, nx_new);
  3794. return err;
  3795. }
  3796. EXPORT_SYMBOL(xfrm_migrate);
  3797. #endif