cma.c 133 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2005 Voltaire Inc. All rights reserved.
  4. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
  5. * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
  6. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
  7. */
  8. #include <linux/completion.h>
  9. #include <linux/in.h>
  10. #include <linux/in6.h>
  11. #include <linux/mutex.h>
  12. #include <linux/random.h>
  13. #include <linux/igmp.h>
  14. #include <linux/xarray.h>
  15. #include <linux/inetdevice.h>
  16. #include <linux/slab.h>
  17. #include <linux/module.h>
  18. #include <net/route.h>
  19. #include <net/net_namespace.h>
  20. #include <net/netns/generic.h>
  21. #include <net/tcp.h>
  22. #include <net/ipv6.h>
  23. #include <net/ip_fib.h>
  24. #include <net/ip6_route.h>
  25. #include <rdma/rdma_cm.h>
  26. #include <rdma/rdma_cm_ib.h>
  27. #include <rdma/rdma_netlink.h>
  28. #include <rdma/ib.h>
  29. #include <rdma/ib_cache.h>
  30. #include <rdma/ib_cm.h>
  31. #include <rdma/ib_sa.h>
  32. #include <rdma/iw_cm.h>
  33. #include "core_priv.h"
  34. #include "cma_priv.h"
  35. #include "cma_trace.h"
  36. MODULE_AUTHOR("Sean Hefty");
  37. MODULE_DESCRIPTION("Generic RDMA CM Agent");
  38. MODULE_LICENSE("Dual BSD/GPL");
  39. #define CMA_CM_RESPONSE_TIMEOUT 20
  40. #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
  41. #define CMA_MAX_CM_RETRIES 15
  42. #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
  43. #define CMA_IBOE_PACKET_LIFETIME 18
  44. #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
  45. static const char * const cma_events[] = {
  46. [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
  47. [RDMA_CM_EVENT_ADDR_ERROR] = "address error",
  48. [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ",
  49. [RDMA_CM_EVENT_ROUTE_ERROR] = "route error",
  50. [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request",
  51. [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
  52. [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error",
  53. [RDMA_CM_EVENT_UNREACHABLE] = "unreachable",
  54. [RDMA_CM_EVENT_REJECTED] = "rejected",
  55. [RDMA_CM_EVENT_ESTABLISHED] = "established",
  56. [RDMA_CM_EVENT_DISCONNECTED] = "disconnected",
  57. [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
  58. [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join",
  59. [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error",
  60. [RDMA_CM_EVENT_ADDR_CHANGE] = "address change",
  61. [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
  62. };
  63. static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
  64. enum ib_gid_type gid_type);
  65. const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
  66. {
  67. size_t index = event;
  68. return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
  69. cma_events[index] : "unrecognized event";
  70. }
  71. EXPORT_SYMBOL(rdma_event_msg);
  72. const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
  73. int reason)
  74. {
  75. if (rdma_ib_or_roce(id->device, id->port_num))
  76. return ibcm_reject_msg(reason);
  77. if (rdma_protocol_iwarp(id->device, id->port_num))
  78. return iwcm_reject_msg(reason);
  79. WARN_ON_ONCE(1);
  80. return "unrecognized transport";
  81. }
  82. EXPORT_SYMBOL(rdma_reject_msg);
  83. /**
  84. * rdma_is_consumer_reject - return true if the consumer rejected the connect
  85. * request.
  86. * @id: Communication identifier that received the REJECT event.
  87. * @reason: Value returned in the REJECT event status field.
  88. */
  89. static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
  90. {
  91. if (rdma_ib_or_roce(id->device, id->port_num))
  92. return reason == IB_CM_REJ_CONSUMER_DEFINED;
  93. if (rdma_protocol_iwarp(id->device, id->port_num))
  94. return reason == -ECONNREFUSED;
  95. WARN_ON_ONCE(1);
  96. return false;
  97. }
  98. const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
  99. struct rdma_cm_event *ev, u8 *data_len)
  100. {
  101. const void *p;
  102. if (rdma_is_consumer_reject(id, ev->status)) {
  103. *data_len = ev->param.conn.private_data_len;
  104. p = ev->param.conn.private_data;
  105. } else {
  106. *data_len = 0;
  107. p = NULL;
  108. }
  109. return p;
  110. }
  111. EXPORT_SYMBOL(rdma_consumer_reject_data);
  112. /**
  113. * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
  114. * @id: Communication Identifier
  115. */
  116. struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
  117. {
  118. struct rdma_id_private *id_priv;
  119. id_priv = container_of(id, struct rdma_id_private, id);
  120. if (id->device->node_type == RDMA_NODE_RNIC)
  121. return id_priv->cm_id.iw;
  122. return NULL;
  123. }
  124. EXPORT_SYMBOL(rdma_iw_cm_id);
  125. /**
  126. * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
  127. * @res: rdma resource tracking entry pointer
  128. */
  129. struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
  130. {
  131. struct rdma_id_private *id_priv =
  132. container_of(res, struct rdma_id_private, res);
  133. return &id_priv->id;
  134. }
  135. EXPORT_SYMBOL(rdma_res_to_id);
  136. static int cma_add_one(struct ib_device *device);
  137. static void cma_remove_one(struct ib_device *device, void *client_data);
  138. static struct ib_client cma_client = {
  139. .name = "cma",
  140. .add = cma_add_one,
  141. .remove = cma_remove_one
  142. };
  143. static struct ib_sa_client sa_client;
  144. static LIST_HEAD(dev_list);
  145. static LIST_HEAD(listen_any_list);
  146. static DEFINE_MUTEX(lock);
  147. static struct workqueue_struct *cma_wq;
  148. static unsigned int cma_pernet_id;
  149. struct cma_pernet {
  150. struct xarray tcp_ps;
  151. struct xarray udp_ps;
  152. struct xarray ipoib_ps;
  153. struct xarray ib_ps;
  154. };
  155. static struct cma_pernet *cma_pernet(struct net *net)
  156. {
  157. return net_generic(net, cma_pernet_id);
  158. }
  159. static
  160. struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
  161. {
  162. struct cma_pernet *pernet = cma_pernet(net);
  163. switch (ps) {
  164. case RDMA_PS_TCP:
  165. return &pernet->tcp_ps;
  166. case RDMA_PS_UDP:
  167. return &pernet->udp_ps;
  168. case RDMA_PS_IPOIB:
  169. return &pernet->ipoib_ps;
  170. case RDMA_PS_IB:
  171. return &pernet->ib_ps;
  172. default:
  173. return NULL;
  174. }
  175. }
  176. struct cma_device {
  177. struct list_head list;
  178. struct ib_device *device;
  179. struct completion comp;
  180. refcount_t refcount;
  181. struct list_head id_list;
  182. enum ib_gid_type *default_gid_type;
  183. u8 *default_roce_tos;
  184. };
  185. struct rdma_bind_list {
  186. enum rdma_ucm_port_space ps;
  187. struct hlist_head owners;
  188. unsigned short port;
  189. };
  190. struct class_port_info_context {
  191. struct ib_class_port_info *class_port_info;
  192. struct ib_device *device;
  193. struct completion done;
  194. struct ib_sa_query *sa_query;
  195. u8 port_num;
  196. };
  197. static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
  198. struct rdma_bind_list *bind_list, int snum)
  199. {
  200. struct xarray *xa = cma_pernet_xa(net, ps);
  201. return xa_insert(xa, snum, bind_list, GFP_KERNEL);
  202. }
  203. static struct rdma_bind_list *cma_ps_find(struct net *net,
  204. enum rdma_ucm_port_space ps, int snum)
  205. {
  206. struct xarray *xa = cma_pernet_xa(net, ps);
  207. return xa_load(xa, snum);
  208. }
  209. static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps,
  210. int snum)
  211. {
  212. struct xarray *xa = cma_pernet_xa(net, ps);
  213. xa_erase(xa, snum);
  214. }
  215. enum {
  216. CMA_OPTION_AFONLY,
  217. };
  218. void cma_dev_get(struct cma_device *cma_dev)
  219. {
  220. refcount_inc(&cma_dev->refcount);
  221. }
  222. void cma_dev_put(struct cma_device *cma_dev)
  223. {
  224. if (refcount_dec_and_test(&cma_dev->refcount))
  225. complete(&cma_dev->comp);
  226. }
  227. struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
  228. void *cookie)
  229. {
  230. struct cma_device *cma_dev;
  231. struct cma_device *found_cma_dev = NULL;
  232. mutex_lock(&lock);
  233. list_for_each_entry(cma_dev, &dev_list, list)
  234. if (filter(cma_dev->device, cookie)) {
  235. found_cma_dev = cma_dev;
  236. break;
  237. }
  238. if (found_cma_dev)
  239. cma_dev_get(found_cma_dev);
  240. mutex_unlock(&lock);
  241. return found_cma_dev;
  242. }
  243. int cma_get_default_gid_type(struct cma_device *cma_dev,
  244. unsigned int port)
  245. {
  246. if (!rdma_is_port_valid(cma_dev->device, port))
  247. return -EINVAL;
  248. return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
  249. }
  250. int cma_set_default_gid_type(struct cma_device *cma_dev,
  251. unsigned int port,
  252. enum ib_gid_type default_gid_type)
  253. {
  254. unsigned long supported_gids;
  255. if (!rdma_is_port_valid(cma_dev->device, port))
  256. return -EINVAL;
  257. if (default_gid_type == IB_GID_TYPE_IB &&
  258. rdma_protocol_roce_eth_encap(cma_dev->device, port))
  259. default_gid_type = IB_GID_TYPE_ROCE;
  260. supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
  261. if (!(supported_gids & 1 << default_gid_type))
  262. return -EINVAL;
  263. cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] =
  264. default_gid_type;
  265. return 0;
  266. }
  267. int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port)
  268. {
  269. if (!rdma_is_port_valid(cma_dev->device, port))
  270. return -EINVAL;
  271. return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
  272. }
  273. int cma_set_default_roce_tos(struct cma_device *cma_dev, unsigned int port,
  274. u8 default_roce_tos)
  275. {
  276. if (!rdma_is_port_valid(cma_dev->device, port))
  277. return -EINVAL;
  278. cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] =
  279. default_roce_tos;
  280. return 0;
  281. }
  282. struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
  283. {
  284. return cma_dev->device;
  285. }
  286. /*
  287. * Device removal can occur at anytime, so we need extra handling to
  288. * serialize notifying the user of device removal with other callbacks.
  289. * We do this by disabling removal notification while a callback is in process,
  290. * and reporting it after the callback completes.
  291. */
  292. struct cma_multicast {
  293. struct rdma_id_private *id_priv;
  294. union {
  295. struct ib_sa_multicast *sa_mc;
  296. struct {
  297. struct work_struct work;
  298. struct rdma_cm_event event;
  299. } iboe_join;
  300. };
  301. struct list_head list;
  302. void *context;
  303. struct sockaddr_storage addr;
  304. u8 join_state;
  305. };
  306. struct cma_work {
  307. struct work_struct work;
  308. struct rdma_id_private *id;
  309. enum rdma_cm_state old_state;
  310. enum rdma_cm_state new_state;
  311. struct rdma_cm_event event;
  312. };
  313. union cma_ip_addr {
  314. struct in6_addr ip6;
  315. struct {
  316. __be32 pad[3];
  317. __be32 addr;
  318. } ip4;
  319. };
  320. struct cma_hdr {
  321. u8 cma_version;
  322. u8 ip_version; /* IP version: 7:4 */
  323. __be16 port;
  324. union cma_ip_addr src_addr;
  325. union cma_ip_addr dst_addr;
  326. };
  327. #define CMA_VERSION 0x00
  328. struct cma_req_info {
  329. struct sockaddr_storage listen_addr_storage;
  330. struct sockaddr_storage src_addr_storage;
  331. struct ib_device *device;
  332. union ib_gid local_gid;
  333. __be64 service_id;
  334. int port;
  335. bool has_gid;
  336. u16 pkey;
  337. };
  338. static int cma_comp_exch(struct rdma_id_private *id_priv,
  339. enum rdma_cm_state comp, enum rdma_cm_state exch)
  340. {
  341. unsigned long flags;
  342. int ret;
  343. /*
  344. * The FSM uses a funny double locking where state is protected by both
  345. * the handler_mutex and the spinlock. State is not allowed to change
  346. * to/from a handler_mutex protected value without also holding
  347. * handler_mutex.
  348. */
  349. if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
  350. lockdep_assert_held(&id_priv->handler_mutex);
  351. spin_lock_irqsave(&id_priv->lock, flags);
  352. if ((ret = (id_priv->state == comp)))
  353. id_priv->state = exch;
  354. spin_unlock_irqrestore(&id_priv->lock, flags);
  355. return ret;
  356. }
  357. static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
  358. {
  359. return hdr->ip_version >> 4;
  360. }
  361. static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
  362. {
  363. hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
  364. }
  365. static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
  366. {
  367. struct in_device *in_dev = NULL;
  368. if (ndev) {
  369. rtnl_lock();
  370. in_dev = __in_dev_get_rtnl(ndev);
  371. if (in_dev) {
  372. if (join)
  373. ip_mc_inc_group(in_dev,
  374. *(__be32 *)(mgid->raw + 12));
  375. else
  376. ip_mc_dec_group(in_dev,
  377. *(__be32 *)(mgid->raw + 12));
  378. }
  379. rtnl_unlock();
  380. }
  381. return (in_dev) ? 0 : -ENODEV;
  382. }
  383. static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
  384. struct cma_device *cma_dev)
  385. {
  386. cma_dev_get(cma_dev);
  387. id_priv->cma_dev = cma_dev;
  388. id_priv->id.device = cma_dev->device;
  389. id_priv->id.route.addr.dev_addr.transport =
  390. rdma_node_get_transport(cma_dev->device->node_type);
  391. list_add_tail(&id_priv->list, &cma_dev->id_list);
  392. trace_cm_id_attach(id_priv, cma_dev->device);
  393. }
  394. static void cma_attach_to_dev(struct rdma_id_private *id_priv,
  395. struct cma_device *cma_dev)
  396. {
  397. _cma_attach_to_dev(id_priv, cma_dev);
  398. id_priv->gid_type =
  399. cma_dev->default_gid_type[id_priv->id.port_num -
  400. rdma_start_port(cma_dev->device)];
  401. }
  402. static void cma_release_dev(struct rdma_id_private *id_priv)
  403. {
  404. mutex_lock(&lock);
  405. list_del(&id_priv->list);
  406. cma_dev_put(id_priv->cma_dev);
  407. id_priv->cma_dev = NULL;
  408. id_priv->id.device = NULL;
  409. if (id_priv->id.route.addr.dev_addr.sgid_attr) {
  410. rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
  411. id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
  412. }
  413. mutex_unlock(&lock);
  414. }
  415. static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
  416. {
  417. return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
  418. }
  419. static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
  420. {
  421. return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
  422. }
  423. static inline unsigned short cma_family(struct rdma_id_private *id_priv)
  424. {
  425. return id_priv->id.route.addr.src_addr.ss_family;
  426. }
  427. static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
  428. {
  429. struct ib_sa_mcmember_rec rec;
  430. int ret = 0;
  431. if (id_priv->qkey) {
  432. if (qkey && id_priv->qkey != qkey)
  433. return -EINVAL;
  434. return 0;
  435. }
  436. if (qkey) {
  437. id_priv->qkey = qkey;
  438. return 0;
  439. }
  440. switch (id_priv->id.ps) {
  441. case RDMA_PS_UDP:
  442. case RDMA_PS_IB:
  443. id_priv->qkey = RDMA_UDP_QKEY;
  444. break;
  445. case RDMA_PS_IPOIB:
  446. ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
  447. ret = ib_sa_get_mcmember_rec(id_priv->id.device,
  448. id_priv->id.port_num, &rec.mgid,
  449. &rec);
  450. if (!ret)
  451. id_priv->qkey = be32_to_cpu(rec.qkey);
  452. break;
  453. default:
  454. break;
  455. }
  456. return ret;
  457. }
  458. static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
  459. {
  460. dev_addr->dev_type = ARPHRD_INFINIBAND;
  461. rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
  462. ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
  463. }
  464. static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
  465. {
  466. int ret;
  467. if (addr->sa_family != AF_IB) {
  468. ret = rdma_translate_ip(addr, dev_addr);
  469. } else {
  470. cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
  471. ret = 0;
  472. }
  473. return ret;
  474. }
  475. static const struct ib_gid_attr *
  476. cma_validate_port(struct ib_device *device, u8 port,
  477. enum ib_gid_type gid_type,
  478. union ib_gid *gid,
  479. struct rdma_id_private *id_priv)
  480. {
  481. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  482. int bound_if_index = dev_addr->bound_dev_if;
  483. const struct ib_gid_attr *sgid_attr;
  484. int dev_type = dev_addr->dev_type;
  485. struct net_device *ndev = NULL;
  486. if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
  487. return ERR_PTR(-ENODEV);
  488. if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
  489. return ERR_PTR(-ENODEV);
  490. if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
  491. return ERR_PTR(-ENODEV);
  492. if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
  493. ndev = dev_get_by_index(dev_addr->net, bound_if_index);
  494. if (!ndev)
  495. return ERR_PTR(-ENODEV);
  496. } else {
  497. gid_type = IB_GID_TYPE_IB;
  498. }
  499. sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
  500. if (ndev)
  501. dev_put(ndev);
  502. return sgid_attr;
  503. }
  504. static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
  505. const struct ib_gid_attr *sgid_attr)
  506. {
  507. WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
  508. id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
  509. }
  510. /**
  511. * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
  512. * based on source ip address.
  513. * @id_priv: cm_id which should be bound to cma device
  514. *
  515. * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
  516. * based on source IP address. It returns 0 on success or error code otherwise.
  517. * It is applicable to active and passive side cm_id.
  518. */
  519. static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
  520. {
  521. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  522. const struct ib_gid_attr *sgid_attr;
  523. union ib_gid gid, iboe_gid, *gidp;
  524. struct cma_device *cma_dev;
  525. enum ib_gid_type gid_type;
  526. int ret = -ENODEV;
  527. unsigned int port;
  528. if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
  529. id_priv->id.ps == RDMA_PS_IPOIB)
  530. return -EINVAL;
  531. rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
  532. &iboe_gid);
  533. memcpy(&gid, dev_addr->src_dev_addr +
  534. rdma_addr_gid_offset(dev_addr), sizeof(gid));
  535. mutex_lock(&lock);
  536. list_for_each_entry(cma_dev, &dev_list, list) {
  537. rdma_for_each_port (cma_dev->device, port) {
  538. gidp = rdma_protocol_roce(cma_dev->device, port) ?
  539. &iboe_gid : &gid;
  540. gid_type = cma_dev->default_gid_type[port - 1];
  541. sgid_attr = cma_validate_port(cma_dev->device, port,
  542. gid_type, gidp, id_priv);
  543. if (!IS_ERR(sgid_attr)) {
  544. id_priv->id.port_num = port;
  545. cma_bind_sgid_attr(id_priv, sgid_attr);
  546. cma_attach_to_dev(id_priv, cma_dev);
  547. ret = 0;
  548. goto out;
  549. }
  550. }
  551. }
  552. out:
  553. mutex_unlock(&lock);
  554. return ret;
  555. }
  556. /**
  557. * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
  558. * @id_priv: cm id to bind to cma device
  559. * @listen_id_priv: listener cm id to match against
  560. * @req: Pointer to req structure containaining incoming
  561. * request information
  562. * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
  563. * rdma device matches for listen_id and incoming request. It also verifies
  564. * that a GID table entry is present for the source address.
  565. * Returns 0 on success, or returns error code otherwise.
  566. */
  567. static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
  568. const struct rdma_id_private *listen_id_priv,
  569. struct cma_req_info *req)
  570. {
  571. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  572. const struct ib_gid_attr *sgid_attr;
  573. enum ib_gid_type gid_type;
  574. union ib_gid gid;
  575. if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
  576. id_priv->id.ps == RDMA_PS_IPOIB)
  577. return -EINVAL;
  578. if (rdma_protocol_roce(req->device, req->port))
  579. rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
  580. &gid);
  581. else
  582. memcpy(&gid, dev_addr->src_dev_addr +
  583. rdma_addr_gid_offset(dev_addr), sizeof(gid));
  584. gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
  585. sgid_attr = cma_validate_port(req->device, req->port,
  586. gid_type, &gid, id_priv);
  587. if (IS_ERR(sgid_attr))
  588. return PTR_ERR(sgid_attr);
  589. id_priv->id.port_num = req->port;
  590. cma_bind_sgid_attr(id_priv, sgid_attr);
  591. /* Need to acquire lock to protect against reader
  592. * of cma_dev->id_list such as cma_netdev_callback() and
  593. * cma_process_remove().
  594. */
  595. mutex_lock(&lock);
  596. cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
  597. mutex_unlock(&lock);
  598. rdma_restrack_add(&id_priv->res);
  599. return 0;
  600. }
  601. static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
  602. const struct rdma_id_private *listen_id_priv)
  603. {
  604. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  605. const struct ib_gid_attr *sgid_attr;
  606. struct cma_device *cma_dev;
  607. enum ib_gid_type gid_type;
  608. int ret = -ENODEV;
  609. unsigned int port;
  610. union ib_gid gid;
  611. if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
  612. id_priv->id.ps == RDMA_PS_IPOIB)
  613. return -EINVAL;
  614. memcpy(&gid, dev_addr->src_dev_addr +
  615. rdma_addr_gid_offset(dev_addr), sizeof(gid));
  616. mutex_lock(&lock);
  617. cma_dev = listen_id_priv->cma_dev;
  618. port = listen_id_priv->id.port_num;
  619. gid_type = listen_id_priv->gid_type;
  620. sgid_attr = cma_validate_port(cma_dev->device, port,
  621. gid_type, &gid, id_priv);
  622. if (!IS_ERR(sgid_attr)) {
  623. id_priv->id.port_num = port;
  624. cma_bind_sgid_attr(id_priv, sgid_attr);
  625. ret = 0;
  626. goto out;
  627. }
  628. list_for_each_entry(cma_dev, &dev_list, list) {
  629. rdma_for_each_port (cma_dev->device, port) {
  630. if (listen_id_priv->cma_dev == cma_dev &&
  631. listen_id_priv->id.port_num == port)
  632. continue;
  633. gid_type = cma_dev->default_gid_type[port - 1];
  634. sgid_attr = cma_validate_port(cma_dev->device, port,
  635. gid_type, &gid, id_priv);
  636. if (!IS_ERR(sgid_attr)) {
  637. id_priv->id.port_num = port;
  638. cma_bind_sgid_attr(id_priv, sgid_attr);
  639. ret = 0;
  640. goto out;
  641. }
  642. }
  643. }
  644. out:
  645. if (!ret) {
  646. cma_attach_to_dev(id_priv, cma_dev);
  647. rdma_restrack_add(&id_priv->res);
  648. }
  649. mutex_unlock(&lock);
  650. return ret;
  651. }
  652. /*
  653. * Select the source IB device and address to reach the destination IB address.
  654. */
  655. static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
  656. {
  657. struct cma_device *cma_dev, *cur_dev;
  658. struct sockaddr_ib *addr;
  659. union ib_gid gid, sgid, *dgid;
  660. unsigned int p;
  661. u16 pkey, index;
  662. enum ib_port_state port_state;
  663. int ret;
  664. int i;
  665. cma_dev = NULL;
  666. addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
  667. dgid = (union ib_gid *) &addr->sib_addr;
  668. pkey = ntohs(addr->sib_pkey);
  669. mutex_lock(&lock);
  670. list_for_each_entry(cur_dev, &dev_list, list) {
  671. rdma_for_each_port (cur_dev->device, p) {
  672. if (!rdma_cap_af_ib(cur_dev->device, p))
  673. continue;
  674. if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
  675. continue;
  676. if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
  677. continue;
  678. for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
  679. ++i) {
  680. ret = rdma_query_gid(cur_dev->device, p, i,
  681. &gid);
  682. if (ret)
  683. continue;
  684. if (!memcmp(&gid, dgid, sizeof(gid))) {
  685. cma_dev = cur_dev;
  686. sgid = gid;
  687. id_priv->id.port_num = p;
  688. goto found;
  689. }
  690. if (!cma_dev && (gid.global.subnet_prefix ==
  691. dgid->global.subnet_prefix) &&
  692. port_state == IB_PORT_ACTIVE) {
  693. cma_dev = cur_dev;
  694. sgid = gid;
  695. id_priv->id.port_num = p;
  696. goto found;
  697. }
  698. }
  699. }
  700. }
  701. mutex_unlock(&lock);
  702. return -ENODEV;
  703. found:
  704. cma_attach_to_dev(id_priv, cma_dev);
  705. rdma_restrack_add(&id_priv->res);
  706. mutex_unlock(&lock);
  707. addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
  708. memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
  709. cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
  710. return 0;
  711. }
  712. static void cma_id_get(struct rdma_id_private *id_priv)
  713. {
  714. refcount_inc(&id_priv->refcount);
  715. }
  716. static void cma_id_put(struct rdma_id_private *id_priv)
  717. {
  718. if (refcount_dec_and_test(&id_priv->refcount))
  719. complete(&id_priv->comp);
  720. }
  721. static struct rdma_id_private *
  722. __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
  723. void *context, enum rdma_ucm_port_space ps,
  724. enum ib_qp_type qp_type, const struct rdma_id_private *parent)
  725. {
  726. struct rdma_id_private *id_priv;
  727. id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
  728. if (!id_priv)
  729. return ERR_PTR(-ENOMEM);
  730. id_priv->state = RDMA_CM_IDLE;
  731. id_priv->id.context = context;
  732. id_priv->id.event_handler = event_handler;
  733. id_priv->id.ps = ps;
  734. id_priv->id.qp_type = qp_type;
  735. id_priv->tos_set = false;
  736. id_priv->timeout_set = false;
  737. id_priv->gid_type = IB_GID_TYPE_IB;
  738. spin_lock_init(&id_priv->lock);
  739. mutex_init(&id_priv->qp_mutex);
  740. init_completion(&id_priv->comp);
  741. refcount_set(&id_priv->refcount, 1);
  742. mutex_init(&id_priv->handler_mutex);
  743. INIT_LIST_HEAD(&id_priv->listen_list);
  744. INIT_LIST_HEAD(&id_priv->mc_list);
  745. get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
  746. id_priv->id.route.addr.dev_addr.net = get_net(net);
  747. id_priv->seq_num &= 0x00ffffff;
  748. rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
  749. if (parent)
  750. rdma_restrack_parent_name(&id_priv->res, &parent->res);
  751. return id_priv;
  752. }
  753. struct rdma_cm_id *
  754. __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
  755. void *context, enum rdma_ucm_port_space ps,
  756. enum ib_qp_type qp_type, const char *caller)
  757. {
  758. struct rdma_id_private *ret;
  759. ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL);
  760. if (IS_ERR(ret))
  761. return ERR_CAST(ret);
  762. rdma_restrack_set_name(&ret->res, caller);
  763. return &ret->id;
  764. }
  765. EXPORT_SYMBOL(__rdma_create_kernel_id);
  766. struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
  767. void *context,
  768. enum rdma_ucm_port_space ps,
  769. enum ib_qp_type qp_type)
  770. {
  771. struct rdma_id_private *ret;
  772. ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context,
  773. ps, qp_type, NULL);
  774. if (IS_ERR(ret))
  775. return ERR_CAST(ret);
  776. rdma_restrack_set_name(&ret->res, NULL);
  777. return &ret->id;
  778. }
  779. EXPORT_SYMBOL(rdma_create_user_id);
  780. static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
  781. {
  782. struct ib_qp_attr qp_attr;
  783. int qp_attr_mask, ret;
  784. qp_attr.qp_state = IB_QPS_INIT;
  785. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  786. if (ret)
  787. return ret;
  788. ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  789. if (ret)
  790. return ret;
  791. qp_attr.qp_state = IB_QPS_RTR;
  792. ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
  793. if (ret)
  794. return ret;
  795. qp_attr.qp_state = IB_QPS_RTS;
  796. qp_attr.sq_psn = 0;
  797. ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
  798. return ret;
  799. }
  800. static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
  801. {
  802. struct ib_qp_attr qp_attr;
  803. int qp_attr_mask, ret;
  804. qp_attr.qp_state = IB_QPS_INIT;
  805. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  806. if (ret)
  807. return ret;
  808. return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
  809. }
  810. int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
  811. struct ib_qp_init_attr *qp_init_attr)
  812. {
  813. struct rdma_id_private *id_priv;
  814. struct ib_qp *qp;
  815. int ret;
  816. id_priv = container_of(id, struct rdma_id_private, id);
  817. if (id->device != pd->device) {
  818. ret = -EINVAL;
  819. goto out_err;
  820. }
  821. qp_init_attr->port_num = id->port_num;
  822. qp = ib_create_qp(pd, qp_init_attr);
  823. if (IS_ERR(qp)) {
  824. ret = PTR_ERR(qp);
  825. goto out_err;
  826. }
  827. if (id->qp_type == IB_QPT_UD)
  828. ret = cma_init_ud_qp(id_priv, qp);
  829. else
  830. ret = cma_init_conn_qp(id_priv, qp);
  831. if (ret)
  832. goto out_destroy;
  833. id->qp = qp;
  834. id_priv->qp_num = qp->qp_num;
  835. id_priv->srq = (qp->srq != NULL);
  836. trace_cm_qp_create(id_priv, pd, qp_init_attr, 0);
  837. return 0;
  838. out_destroy:
  839. ib_destroy_qp(qp);
  840. out_err:
  841. trace_cm_qp_create(id_priv, pd, qp_init_attr, ret);
  842. return ret;
  843. }
  844. EXPORT_SYMBOL(rdma_create_qp);
  845. void rdma_destroy_qp(struct rdma_cm_id *id)
  846. {
  847. struct rdma_id_private *id_priv;
  848. id_priv = container_of(id, struct rdma_id_private, id);
  849. trace_cm_qp_destroy(id_priv);
  850. mutex_lock(&id_priv->qp_mutex);
  851. ib_destroy_qp(id_priv->id.qp);
  852. id_priv->id.qp = NULL;
  853. mutex_unlock(&id_priv->qp_mutex);
  854. }
  855. EXPORT_SYMBOL(rdma_destroy_qp);
  856. static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
  857. struct rdma_conn_param *conn_param)
  858. {
  859. struct ib_qp_attr qp_attr;
  860. int qp_attr_mask, ret;
  861. mutex_lock(&id_priv->qp_mutex);
  862. if (!id_priv->id.qp) {
  863. ret = 0;
  864. goto out;
  865. }
  866. /* Need to update QP attributes from default values. */
  867. qp_attr.qp_state = IB_QPS_INIT;
  868. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  869. if (ret)
  870. goto out;
  871. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  872. if (ret)
  873. goto out;
  874. qp_attr.qp_state = IB_QPS_RTR;
  875. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  876. if (ret)
  877. goto out;
  878. BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
  879. if (conn_param)
  880. qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
  881. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  882. out:
  883. mutex_unlock(&id_priv->qp_mutex);
  884. return ret;
  885. }
  886. static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
  887. struct rdma_conn_param *conn_param)
  888. {
  889. struct ib_qp_attr qp_attr;
  890. int qp_attr_mask, ret;
  891. mutex_lock(&id_priv->qp_mutex);
  892. if (!id_priv->id.qp) {
  893. ret = 0;
  894. goto out;
  895. }
  896. qp_attr.qp_state = IB_QPS_RTS;
  897. ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
  898. if (ret)
  899. goto out;
  900. if (conn_param)
  901. qp_attr.max_rd_atomic = conn_param->initiator_depth;
  902. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
  903. out:
  904. mutex_unlock(&id_priv->qp_mutex);
  905. return ret;
  906. }
  907. static int cma_modify_qp_err(struct rdma_id_private *id_priv)
  908. {
  909. struct ib_qp_attr qp_attr;
  910. int ret;
  911. mutex_lock(&id_priv->qp_mutex);
  912. if (!id_priv->id.qp) {
  913. ret = 0;
  914. goto out;
  915. }
  916. qp_attr.qp_state = IB_QPS_ERR;
  917. ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
  918. out:
  919. mutex_unlock(&id_priv->qp_mutex);
  920. return ret;
  921. }
  922. static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
  923. struct ib_qp_attr *qp_attr, int *qp_attr_mask)
  924. {
  925. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  926. int ret;
  927. u16 pkey;
  928. if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
  929. pkey = 0xffff;
  930. else
  931. pkey = ib_addr_get_pkey(dev_addr);
  932. ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
  933. pkey, &qp_attr->pkey_index);
  934. if (ret)
  935. return ret;
  936. qp_attr->port_num = id_priv->id.port_num;
  937. *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
  938. if (id_priv->id.qp_type == IB_QPT_UD) {
  939. ret = cma_set_qkey(id_priv, 0);
  940. if (ret)
  941. return ret;
  942. qp_attr->qkey = id_priv->qkey;
  943. *qp_attr_mask |= IB_QP_QKEY;
  944. } else {
  945. qp_attr->qp_access_flags = 0;
  946. *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
  947. }
  948. return 0;
  949. }
  950. int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
  951. int *qp_attr_mask)
  952. {
  953. struct rdma_id_private *id_priv;
  954. int ret = 0;
  955. id_priv = container_of(id, struct rdma_id_private, id);
  956. if (rdma_cap_ib_cm(id->device, id->port_num)) {
  957. if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
  958. ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
  959. else
  960. ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
  961. qp_attr_mask);
  962. if (qp_attr->qp_state == IB_QPS_RTR)
  963. qp_attr->rq_psn = id_priv->seq_num;
  964. } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
  965. if (!id_priv->cm_id.iw) {
  966. qp_attr->qp_access_flags = 0;
  967. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
  968. } else
  969. ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
  970. qp_attr_mask);
  971. qp_attr->port_num = id_priv->id.port_num;
  972. *qp_attr_mask |= IB_QP_PORT;
  973. } else
  974. ret = -ENOSYS;
  975. if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
  976. qp_attr->timeout = id_priv->timeout;
  977. return ret;
  978. }
  979. EXPORT_SYMBOL(rdma_init_qp_attr);
  980. static inline bool cma_zero_addr(const struct sockaddr *addr)
  981. {
  982. switch (addr->sa_family) {
  983. case AF_INET:
  984. return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
  985. case AF_INET6:
  986. return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr);
  987. case AF_IB:
  988. return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr);
  989. default:
  990. return false;
  991. }
  992. }
  993. static inline bool cma_loopback_addr(const struct sockaddr *addr)
  994. {
  995. switch (addr->sa_family) {
  996. case AF_INET:
  997. return ipv4_is_loopback(
  998. ((struct sockaddr_in *)addr)->sin_addr.s_addr);
  999. case AF_INET6:
  1000. return ipv6_addr_loopback(
  1001. &((struct sockaddr_in6 *)addr)->sin6_addr);
  1002. case AF_IB:
  1003. return ib_addr_loopback(
  1004. &((struct sockaddr_ib *)addr)->sib_addr);
  1005. default:
  1006. return false;
  1007. }
  1008. }
  1009. static inline bool cma_any_addr(const struct sockaddr *addr)
  1010. {
  1011. return cma_zero_addr(addr) || cma_loopback_addr(addr);
  1012. }
  1013. static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
  1014. {
  1015. if (src->sa_family != dst->sa_family)
  1016. return -1;
  1017. switch (src->sa_family) {
  1018. case AF_INET:
  1019. return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
  1020. ((struct sockaddr_in *)dst)->sin_addr.s_addr;
  1021. case AF_INET6: {
  1022. struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
  1023. struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
  1024. bool link_local;
  1025. if (ipv6_addr_cmp(&src_addr6->sin6_addr,
  1026. &dst_addr6->sin6_addr))
  1027. return 1;
  1028. link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
  1029. IPV6_ADDR_LINKLOCAL;
  1030. /* Link local must match their scope_ids */
  1031. return link_local ? (src_addr6->sin6_scope_id !=
  1032. dst_addr6->sin6_scope_id) :
  1033. 0;
  1034. }
  1035. default:
  1036. return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
  1037. &((struct sockaddr_ib *) dst)->sib_addr);
  1038. }
  1039. }
  1040. static __be16 cma_port(const struct sockaddr *addr)
  1041. {
  1042. struct sockaddr_ib *sib;
  1043. switch (addr->sa_family) {
  1044. case AF_INET:
  1045. return ((struct sockaddr_in *) addr)->sin_port;
  1046. case AF_INET6:
  1047. return ((struct sockaddr_in6 *) addr)->sin6_port;
  1048. case AF_IB:
  1049. sib = (struct sockaddr_ib *) addr;
  1050. return htons((u16) (be64_to_cpu(sib->sib_sid) &
  1051. be64_to_cpu(sib->sib_sid_mask)));
  1052. default:
  1053. return 0;
  1054. }
  1055. }
  1056. static inline int cma_any_port(const struct sockaddr *addr)
  1057. {
  1058. return !cma_port(addr);
  1059. }
  1060. static void cma_save_ib_info(struct sockaddr *src_addr,
  1061. struct sockaddr *dst_addr,
  1062. const struct rdma_cm_id *listen_id,
  1063. const struct sa_path_rec *path)
  1064. {
  1065. struct sockaddr_ib *listen_ib, *ib;
  1066. listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
  1067. if (src_addr) {
  1068. ib = (struct sockaddr_ib *)src_addr;
  1069. ib->sib_family = AF_IB;
  1070. if (path) {
  1071. ib->sib_pkey = path->pkey;
  1072. ib->sib_flowinfo = path->flow_label;
  1073. memcpy(&ib->sib_addr, &path->sgid, 16);
  1074. ib->sib_sid = path->service_id;
  1075. ib->sib_scope_id = 0;
  1076. } else {
  1077. ib->sib_pkey = listen_ib->sib_pkey;
  1078. ib->sib_flowinfo = listen_ib->sib_flowinfo;
  1079. ib->sib_addr = listen_ib->sib_addr;
  1080. ib->sib_sid = listen_ib->sib_sid;
  1081. ib->sib_scope_id = listen_ib->sib_scope_id;
  1082. }
  1083. ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
  1084. }
  1085. if (dst_addr) {
  1086. ib = (struct sockaddr_ib *)dst_addr;
  1087. ib->sib_family = AF_IB;
  1088. if (path) {
  1089. ib->sib_pkey = path->pkey;
  1090. ib->sib_flowinfo = path->flow_label;
  1091. memcpy(&ib->sib_addr, &path->dgid, 16);
  1092. }
  1093. }
  1094. }
  1095. static void cma_save_ip4_info(struct sockaddr_in *src_addr,
  1096. struct sockaddr_in *dst_addr,
  1097. struct cma_hdr *hdr,
  1098. __be16 local_port)
  1099. {
  1100. if (src_addr) {
  1101. *src_addr = (struct sockaddr_in) {
  1102. .sin_family = AF_INET,
  1103. .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
  1104. .sin_port = local_port,
  1105. };
  1106. }
  1107. if (dst_addr) {
  1108. *dst_addr = (struct sockaddr_in) {
  1109. .sin_family = AF_INET,
  1110. .sin_addr.s_addr = hdr->src_addr.ip4.addr,
  1111. .sin_port = hdr->port,
  1112. };
  1113. }
  1114. }
  1115. static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
  1116. struct sockaddr_in6 *dst_addr,
  1117. struct cma_hdr *hdr,
  1118. __be16 local_port)
  1119. {
  1120. if (src_addr) {
  1121. *src_addr = (struct sockaddr_in6) {
  1122. .sin6_family = AF_INET6,
  1123. .sin6_addr = hdr->dst_addr.ip6,
  1124. .sin6_port = local_port,
  1125. };
  1126. }
  1127. if (dst_addr) {
  1128. *dst_addr = (struct sockaddr_in6) {
  1129. .sin6_family = AF_INET6,
  1130. .sin6_addr = hdr->src_addr.ip6,
  1131. .sin6_port = hdr->port,
  1132. };
  1133. }
  1134. }
  1135. static u16 cma_port_from_service_id(__be64 service_id)
  1136. {
  1137. return (u16)be64_to_cpu(service_id);
  1138. }
  1139. static int cma_save_ip_info(struct sockaddr *src_addr,
  1140. struct sockaddr *dst_addr,
  1141. const struct ib_cm_event *ib_event,
  1142. __be64 service_id)
  1143. {
  1144. struct cma_hdr *hdr;
  1145. __be16 port;
  1146. hdr = ib_event->private_data;
  1147. if (hdr->cma_version != CMA_VERSION)
  1148. return -EINVAL;
  1149. port = htons(cma_port_from_service_id(service_id));
  1150. switch (cma_get_ip_ver(hdr)) {
  1151. case 4:
  1152. cma_save_ip4_info((struct sockaddr_in *)src_addr,
  1153. (struct sockaddr_in *)dst_addr, hdr, port);
  1154. break;
  1155. case 6:
  1156. cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
  1157. (struct sockaddr_in6 *)dst_addr, hdr, port);
  1158. break;
  1159. default:
  1160. return -EAFNOSUPPORT;
  1161. }
  1162. return 0;
  1163. }
  1164. static int cma_save_net_info(struct sockaddr *src_addr,
  1165. struct sockaddr *dst_addr,
  1166. const struct rdma_cm_id *listen_id,
  1167. const struct ib_cm_event *ib_event,
  1168. sa_family_t sa_family, __be64 service_id)
  1169. {
  1170. if (sa_family == AF_IB) {
  1171. if (ib_event->event == IB_CM_REQ_RECEIVED)
  1172. cma_save_ib_info(src_addr, dst_addr, listen_id,
  1173. ib_event->param.req_rcvd.primary_path);
  1174. else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
  1175. cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
  1176. return 0;
  1177. }
  1178. return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
  1179. }
  1180. static int cma_save_req_info(const struct ib_cm_event *ib_event,
  1181. struct cma_req_info *req)
  1182. {
  1183. const struct ib_cm_req_event_param *req_param =
  1184. &ib_event->param.req_rcvd;
  1185. const struct ib_cm_sidr_req_event_param *sidr_param =
  1186. &ib_event->param.sidr_req_rcvd;
  1187. switch (ib_event->event) {
  1188. case IB_CM_REQ_RECEIVED:
  1189. req->device = req_param->listen_id->device;
  1190. req->port = req_param->port;
  1191. memcpy(&req->local_gid, &req_param->primary_path->sgid,
  1192. sizeof(req->local_gid));
  1193. req->has_gid = true;
  1194. req->service_id = req_param->primary_path->service_id;
  1195. req->pkey = be16_to_cpu(req_param->primary_path->pkey);
  1196. if (req->pkey != req_param->bth_pkey)
  1197. pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
  1198. "RDMA CMA: in the future this may cause the request to be dropped\n",
  1199. req_param->bth_pkey, req->pkey);
  1200. break;
  1201. case IB_CM_SIDR_REQ_RECEIVED:
  1202. req->device = sidr_param->listen_id->device;
  1203. req->port = sidr_param->port;
  1204. req->has_gid = false;
  1205. req->service_id = sidr_param->service_id;
  1206. req->pkey = sidr_param->pkey;
  1207. if (req->pkey != sidr_param->bth_pkey)
  1208. pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
  1209. "RDMA CMA: in the future this may cause the request to be dropped\n",
  1210. sidr_param->bth_pkey, req->pkey);
  1211. break;
  1212. default:
  1213. return -EINVAL;
  1214. }
  1215. return 0;
  1216. }
  1217. static bool validate_ipv4_net_dev(struct net_device *net_dev,
  1218. const struct sockaddr_in *dst_addr,
  1219. const struct sockaddr_in *src_addr)
  1220. {
  1221. __be32 daddr = dst_addr->sin_addr.s_addr,
  1222. saddr = src_addr->sin_addr.s_addr;
  1223. struct fib_result res;
  1224. struct flowi4 fl4;
  1225. int err;
  1226. bool ret;
  1227. if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
  1228. ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) ||
  1229. ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) ||
  1230. ipv4_is_loopback(saddr))
  1231. return false;
  1232. memset(&fl4, 0, sizeof(fl4));
  1233. fl4.flowi4_iif = net_dev->ifindex;
  1234. fl4.daddr = daddr;
  1235. fl4.saddr = saddr;
  1236. rcu_read_lock();
  1237. err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
  1238. ret = err == 0 && FIB_RES_DEV(res) == net_dev;
  1239. rcu_read_unlock();
  1240. return ret;
  1241. }
  1242. static bool validate_ipv6_net_dev(struct net_device *net_dev,
  1243. const struct sockaddr_in6 *dst_addr,
  1244. const struct sockaddr_in6 *src_addr)
  1245. {
  1246. #if IS_ENABLED(CONFIG_IPV6)
  1247. const int strict = ipv6_addr_type(&dst_addr->sin6_addr) &
  1248. IPV6_ADDR_LINKLOCAL;
  1249. struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
  1250. &src_addr->sin6_addr, net_dev->ifindex,
  1251. NULL, strict);
  1252. bool ret;
  1253. if (!rt)
  1254. return false;
  1255. ret = rt->rt6i_idev->dev == net_dev;
  1256. ip6_rt_put(rt);
  1257. return ret;
  1258. #else
  1259. return false;
  1260. #endif
  1261. }
  1262. static bool validate_net_dev(struct net_device *net_dev,
  1263. const struct sockaddr *daddr,
  1264. const struct sockaddr *saddr)
  1265. {
  1266. const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr;
  1267. const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr;
  1268. const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
  1269. const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr;
  1270. switch (daddr->sa_family) {
  1271. case AF_INET:
  1272. return saddr->sa_family == AF_INET &&
  1273. validate_ipv4_net_dev(net_dev, daddr4, saddr4);
  1274. case AF_INET6:
  1275. return saddr->sa_family == AF_INET6 &&
  1276. validate_ipv6_net_dev(net_dev, daddr6, saddr6);
  1277. default:
  1278. return false;
  1279. }
  1280. }
  1281. static struct net_device *
  1282. roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
  1283. {
  1284. const struct ib_gid_attr *sgid_attr = NULL;
  1285. struct net_device *ndev;
  1286. if (ib_event->event == IB_CM_REQ_RECEIVED)
  1287. sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
  1288. else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
  1289. sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr;
  1290. if (!sgid_attr)
  1291. return NULL;
  1292. rcu_read_lock();
  1293. ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr);
  1294. if (IS_ERR(ndev))
  1295. ndev = NULL;
  1296. else
  1297. dev_hold(ndev);
  1298. rcu_read_unlock();
  1299. return ndev;
  1300. }
  1301. static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
  1302. struct cma_req_info *req)
  1303. {
  1304. struct sockaddr *listen_addr =
  1305. (struct sockaddr *)&req->listen_addr_storage;
  1306. struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
  1307. struct net_device *net_dev;
  1308. const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
  1309. int err;
  1310. err = cma_save_ip_info(listen_addr, src_addr, ib_event,
  1311. req->service_id);
  1312. if (err)
  1313. return ERR_PTR(err);
  1314. if (rdma_protocol_roce(req->device, req->port))
  1315. net_dev = roce_get_net_dev_by_cm_event(ib_event);
  1316. else
  1317. net_dev = ib_get_net_dev_by_params(req->device, req->port,
  1318. req->pkey,
  1319. gid, listen_addr);
  1320. if (!net_dev)
  1321. return ERR_PTR(-ENODEV);
  1322. return net_dev;
  1323. }
  1324. static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id)
  1325. {
  1326. return (be64_to_cpu(service_id) >> 16) & 0xffff;
  1327. }
  1328. static bool cma_match_private_data(struct rdma_id_private *id_priv,
  1329. const struct cma_hdr *hdr)
  1330. {
  1331. struct sockaddr *addr = cma_src_addr(id_priv);
  1332. __be32 ip4_addr;
  1333. struct in6_addr ip6_addr;
  1334. if (cma_any_addr(addr) && !id_priv->afonly)
  1335. return true;
  1336. switch (addr->sa_family) {
  1337. case AF_INET:
  1338. ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
  1339. if (cma_get_ip_ver(hdr) != 4)
  1340. return false;
  1341. if (!cma_any_addr(addr) &&
  1342. hdr->dst_addr.ip4.addr != ip4_addr)
  1343. return false;
  1344. break;
  1345. case AF_INET6:
  1346. ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
  1347. if (cma_get_ip_ver(hdr) != 6)
  1348. return false;
  1349. if (!cma_any_addr(addr) &&
  1350. memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
  1351. return false;
  1352. break;
  1353. case AF_IB:
  1354. return true;
  1355. default:
  1356. return false;
  1357. }
  1358. return true;
  1359. }
  1360. static bool cma_protocol_roce(const struct rdma_cm_id *id)
  1361. {
  1362. struct ib_device *device = id->device;
  1363. const int port_num = id->port_num ?: rdma_start_port(device);
  1364. return rdma_protocol_roce(device, port_num);
  1365. }
  1366. static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
  1367. {
  1368. const struct sockaddr *daddr =
  1369. (const struct sockaddr *)&req->listen_addr_storage;
  1370. const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
  1371. /* Returns true if the req is for IPv6 link local */
  1372. return (daddr->sa_family == AF_INET6 &&
  1373. (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
  1374. }
  1375. static bool cma_match_net_dev(const struct rdma_cm_id *id,
  1376. const struct net_device *net_dev,
  1377. const struct cma_req_info *req)
  1378. {
  1379. const struct rdma_addr *addr = &id->route.addr;
  1380. if (!net_dev)
  1381. /* This request is an AF_IB request */
  1382. return (!id->port_num || id->port_num == req->port) &&
  1383. (addr->src_addr.ss_family == AF_IB);
  1384. /*
  1385. * If the request is not for IPv6 link local, allow matching
  1386. * request to any netdevice of the one or multiport rdma device.
  1387. */
  1388. if (!cma_is_req_ipv6_ll(req))
  1389. return true;
  1390. /*
  1391. * Net namespaces must match, and if the listner is listening
  1392. * on a specific netdevice than netdevice must match as well.
  1393. */
  1394. if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
  1395. (!!addr->dev_addr.bound_dev_if ==
  1396. (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
  1397. return true;
  1398. else
  1399. return false;
  1400. }
  1401. static struct rdma_id_private *cma_find_listener(
  1402. const struct rdma_bind_list *bind_list,
  1403. const struct ib_cm_id *cm_id,
  1404. const struct ib_cm_event *ib_event,
  1405. const struct cma_req_info *req,
  1406. const struct net_device *net_dev)
  1407. {
  1408. struct rdma_id_private *id_priv, *id_priv_dev;
  1409. lockdep_assert_held(&lock);
  1410. if (!bind_list)
  1411. return ERR_PTR(-EINVAL);
  1412. hlist_for_each_entry(id_priv, &bind_list->owners, node) {
  1413. if (cma_match_private_data(id_priv, ib_event->private_data)) {
  1414. if (id_priv->id.device == cm_id->device &&
  1415. cma_match_net_dev(&id_priv->id, net_dev, req))
  1416. return id_priv;
  1417. list_for_each_entry(id_priv_dev,
  1418. &id_priv->listen_list,
  1419. listen_list) {
  1420. if (id_priv_dev->id.device == cm_id->device &&
  1421. cma_match_net_dev(&id_priv_dev->id,
  1422. net_dev, req))
  1423. return id_priv_dev;
  1424. }
  1425. }
  1426. }
  1427. return ERR_PTR(-EINVAL);
  1428. }
  1429. static struct rdma_id_private *
  1430. cma_ib_id_from_event(struct ib_cm_id *cm_id,
  1431. const struct ib_cm_event *ib_event,
  1432. struct cma_req_info *req,
  1433. struct net_device **net_dev)
  1434. {
  1435. struct rdma_bind_list *bind_list;
  1436. struct rdma_id_private *id_priv;
  1437. int err;
  1438. err = cma_save_req_info(ib_event, req);
  1439. if (err)
  1440. return ERR_PTR(err);
  1441. *net_dev = cma_get_net_dev(ib_event, req);
  1442. if (IS_ERR(*net_dev)) {
  1443. if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
  1444. /* Assuming the protocol is AF_IB */
  1445. *net_dev = NULL;
  1446. } else {
  1447. return ERR_CAST(*net_dev);
  1448. }
  1449. }
  1450. mutex_lock(&lock);
  1451. /*
  1452. * Net namespace might be getting deleted while route lookup,
  1453. * cm_id lookup is in progress. Therefore, perform netdevice
  1454. * validation, cm_id lookup under rcu lock.
  1455. * RCU lock along with netdevice state check, synchronizes with
  1456. * netdevice migrating to different net namespace and also avoids
  1457. * case where net namespace doesn't get deleted while lookup is in
  1458. * progress.
  1459. * If the device state is not IFF_UP, its properties such as ifindex
  1460. * and nd_net cannot be trusted to remain valid without rcu lock.
  1461. * net/core/dev.c change_net_namespace() ensures to synchronize with
  1462. * ongoing operations on net device after device is closed using
  1463. * synchronize_net().
  1464. */
  1465. rcu_read_lock();
  1466. if (*net_dev) {
  1467. /*
  1468. * If netdevice is down, it is likely that it is administratively
  1469. * down or it might be migrating to different namespace.
  1470. * In that case avoid further processing, as the net namespace
  1471. * or ifindex may change.
  1472. */
  1473. if (((*net_dev)->flags & IFF_UP) == 0) {
  1474. id_priv = ERR_PTR(-EHOSTUNREACH);
  1475. goto err;
  1476. }
  1477. if (!validate_net_dev(*net_dev,
  1478. (struct sockaddr *)&req->listen_addr_storage,
  1479. (struct sockaddr *)&req->src_addr_storage)) {
  1480. id_priv = ERR_PTR(-EHOSTUNREACH);
  1481. goto err;
  1482. }
  1483. }
  1484. bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
  1485. rdma_ps_from_service_id(req->service_id),
  1486. cma_port_from_service_id(req->service_id));
  1487. id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
  1488. err:
  1489. rcu_read_unlock();
  1490. mutex_unlock(&lock);
  1491. if (IS_ERR(id_priv) && *net_dev) {
  1492. dev_put(*net_dev);
  1493. *net_dev = NULL;
  1494. }
  1495. return id_priv;
  1496. }
  1497. static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
  1498. {
  1499. return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
  1500. }
  1501. static void cma_cancel_route(struct rdma_id_private *id_priv)
  1502. {
  1503. if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
  1504. if (id_priv->query)
  1505. ib_sa_cancel_query(id_priv->query_id, id_priv->query);
  1506. }
  1507. }
  1508. static void _cma_cancel_listens(struct rdma_id_private *id_priv)
  1509. {
  1510. struct rdma_id_private *dev_id_priv;
  1511. lockdep_assert_held(&lock);
  1512. /*
  1513. * Remove from listen_any_list to prevent added devices from spawning
  1514. * additional listen requests.
  1515. */
  1516. list_del(&id_priv->list);
  1517. while (!list_empty(&id_priv->listen_list)) {
  1518. dev_id_priv = list_entry(id_priv->listen_list.next,
  1519. struct rdma_id_private, listen_list);
  1520. /* sync with device removal to avoid duplicate destruction */
  1521. list_del_init(&dev_id_priv->list);
  1522. list_del(&dev_id_priv->listen_list);
  1523. mutex_unlock(&lock);
  1524. rdma_destroy_id(&dev_id_priv->id);
  1525. mutex_lock(&lock);
  1526. }
  1527. }
  1528. static void cma_cancel_listens(struct rdma_id_private *id_priv)
  1529. {
  1530. mutex_lock(&lock);
  1531. _cma_cancel_listens(id_priv);
  1532. mutex_unlock(&lock);
  1533. }
  1534. static void cma_cancel_operation(struct rdma_id_private *id_priv,
  1535. enum rdma_cm_state state)
  1536. {
  1537. switch (state) {
  1538. case RDMA_CM_ADDR_QUERY:
  1539. rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
  1540. break;
  1541. case RDMA_CM_ROUTE_QUERY:
  1542. cma_cancel_route(id_priv);
  1543. break;
  1544. case RDMA_CM_LISTEN:
  1545. if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
  1546. cma_cancel_listens(id_priv);
  1547. break;
  1548. default:
  1549. break;
  1550. }
  1551. }
  1552. static void cma_release_port(struct rdma_id_private *id_priv)
  1553. {
  1554. struct rdma_bind_list *bind_list = id_priv->bind_list;
  1555. struct net *net = id_priv->id.route.addr.dev_addr.net;
  1556. if (!bind_list)
  1557. return;
  1558. mutex_lock(&lock);
  1559. hlist_del(&id_priv->node);
  1560. if (hlist_empty(&bind_list->owners)) {
  1561. cma_ps_remove(net, bind_list->ps, bind_list->port);
  1562. kfree(bind_list);
  1563. }
  1564. mutex_unlock(&lock);
  1565. }
  1566. static void destroy_mc(struct rdma_id_private *id_priv,
  1567. struct cma_multicast *mc)
  1568. {
  1569. bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
  1570. if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
  1571. ib_sa_free_multicast(mc->sa_mc);
  1572. if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
  1573. struct rdma_dev_addr *dev_addr =
  1574. &id_priv->id.route.addr.dev_addr;
  1575. struct net_device *ndev = NULL;
  1576. if (dev_addr->bound_dev_if)
  1577. ndev = dev_get_by_index(dev_addr->net,
  1578. dev_addr->bound_dev_if);
  1579. if (ndev && !send_only) {
  1580. enum ib_gid_type gid_type;
  1581. union ib_gid mgid;
  1582. gid_type = id_priv->cma_dev->default_gid_type
  1583. [id_priv->id.port_num -
  1584. rdma_start_port(
  1585. id_priv->cma_dev->device)];
  1586. cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
  1587. gid_type);
  1588. cma_igmp_send(ndev, &mgid, false);
  1589. }
  1590. dev_put(ndev);
  1591. cancel_work_sync(&mc->iboe_join.work);
  1592. }
  1593. kfree(mc);
  1594. }
  1595. static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
  1596. {
  1597. struct cma_multicast *mc;
  1598. while (!list_empty(&id_priv->mc_list)) {
  1599. mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
  1600. list);
  1601. list_del(&mc->list);
  1602. destroy_mc(id_priv, mc);
  1603. }
  1604. }
  1605. static void _destroy_id(struct rdma_id_private *id_priv,
  1606. enum rdma_cm_state state)
  1607. {
  1608. cma_cancel_operation(id_priv, state);
  1609. rdma_restrack_del(&id_priv->res);
  1610. if (id_priv->cma_dev) {
  1611. if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
  1612. if (id_priv->cm_id.ib)
  1613. ib_destroy_cm_id(id_priv->cm_id.ib);
  1614. } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
  1615. if (id_priv->cm_id.iw)
  1616. iw_destroy_cm_id(id_priv->cm_id.iw);
  1617. }
  1618. cma_leave_mc_groups(id_priv);
  1619. cma_release_dev(id_priv);
  1620. }
  1621. cma_release_port(id_priv);
  1622. cma_id_put(id_priv);
  1623. wait_for_completion(&id_priv->comp);
  1624. if (id_priv->internal_id)
  1625. cma_id_put(id_priv->id.context);
  1626. kfree(id_priv->id.route.path_rec);
  1627. put_net(id_priv->id.route.addr.dev_addr.net);
  1628. kfree(id_priv);
  1629. }
  1630. /*
  1631. * destroy an ID from within the handler_mutex. This ensures that no other
  1632. * handlers can start running concurrently.
  1633. */
  1634. static void destroy_id_handler_unlock(struct rdma_id_private *id_priv)
  1635. __releases(&idprv->handler_mutex)
  1636. {
  1637. enum rdma_cm_state state;
  1638. unsigned long flags;
  1639. trace_cm_id_destroy(id_priv);
  1640. /*
  1641. * Setting the state to destroyed under the handler mutex provides a
  1642. * fence against calling handler callbacks. If this is invoked due to
  1643. * the failure of a handler callback then it guarentees that no future
  1644. * handlers will be called.
  1645. */
  1646. lockdep_assert_held(&id_priv->handler_mutex);
  1647. spin_lock_irqsave(&id_priv->lock, flags);
  1648. state = id_priv->state;
  1649. id_priv->state = RDMA_CM_DESTROYING;
  1650. spin_unlock_irqrestore(&id_priv->lock, flags);
  1651. mutex_unlock(&id_priv->handler_mutex);
  1652. _destroy_id(id_priv, state);
  1653. }
  1654. void rdma_destroy_id(struct rdma_cm_id *id)
  1655. {
  1656. struct rdma_id_private *id_priv =
  1657. container_of(id, struct rdma_id_private, id);
  1658. mutex_lock(&id_priv->handler_mutex);
  1659. destroy_id_handler_unlock(id_priv);
  1660. }
  1661. EXPORT_SYMBOL(rdma_destroy_id);
  1662. static int cma_rep_recv(struct rdma_id_private *id_priv)
  1663. {
  1664. int ret;
  1665. ret = cma_modify_qp_rtr(id_priv, NULL);
  1666. if (ret)
  1667. goto reject;
  1668. ret = cma_modify_qp_rts(id_priv, NULL);
  1669. if (ret)
  1670. goto reject;
  1671. trace_cm_send_rtu(id_priv);
  1672. ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
  1673. if (ret)
  1674. goto reject;
  1675. return 0;
  1676. reject:
  1677. pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
  1678. cma_modify_qp_err(id_priv);
  1679. trace_cm_send_rej(id_priv);
  1680. ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
  1681. NULL, 0, NULL, 0);
  1682. return ret;
  1683. }
  1684. static void cma_set_rep_event_data(struct rdma_cm_event *event,
  1685. const struct ib_cm_rep_event_param *rep_data,
  1686. void *private_data)
  1687. {
  1688. event->param.conn.private_data = private_data;
  1689. event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
  1690. event->param.conn.responder_resources = rep_data->responder_resources;
  1691. event->param.conn.initiator_depth = rep_data->initiator_depth;
  1692. event->param.conn.flow_control = rep_data->flow_control;
  1693. event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
  1694. event->param.conn.srq = rep_data->srq;
  1695. event->param.conn.qp_num = rep_data->remote_qpn;
  1696. event->ece.vendor_id = rep_data->ece.vendor_id;
  1697. event->ece.attr_mod = rep_data->ece.attr_mod;
  1698. }
  1699. static int cma_cm_event_handler(struct rdma_id_private *id_priv,
  1700. struct rdma_cm_event *event)
  1701. {
  1702. int ret;
  1703. lockdep_assert_held(&id_priv->handler_mutex);
  1704. trace_cm_event_handler(id_priv, event);
  1705. ret = id_priv->id.event_handler(&id_priv->id, event);
  1706. trace_cm_event_done(id_priv, event, ret);
  1707. return ret;
  1708. }
  1709. static int cma_ib_handler(struct ib_cm_id *cm_id,
  1710. const struct ib_cm_event *ib_event)
  1711. {
  1712. struct rdma_id_private *id_priv = cm_id->context;
  1713. struct rdma_cm_event event = {};
  1714. enum rdma_cm_state state;
  1715. int ret;
  1716. mutex_lock(&id_priv->handler_mutex);
  1717. state = READ_ONCE(id_priv->state);
  1718. if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
  1719. state != RDMA_CM_CONNECT) ||
  1720. (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
  1721. state != RDMA_CM_DISCONNECT))
  1722. goto out;
  1723. switch (ib_event->event) {
  1724. case IB_CM_REQ_ERROR:
  1725. case IB_CM_REP_ERROR:
  1726. event.event = RDMA_CM_EVENT_UNREACHABLE;
  1727. event.status = -ETIMEDOUT;
  1728. break;
  1729. case IB_CM_REP_RECEIVED:
  1730. if (state == RDMA_CM_CONNECT &&
  1731. (id_priv->id.qp_type != IB_QPT_UD)) {
  1732. trace_cm_send_mra(id_priv);
  1733. ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
  1734. }
  1735. if (id_priv->id.qp) {
  1736. event.status = cma_rep_recv(id_priv);
  1737. event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
  1738. RDMA_CM_EVENT_ESTABLISHED;
  1739. } else {
  1740. event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
  1741. }
  1742. cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
  1743. ib_event->private_data);
  1744. break;
  1745. case IB_CM_RTU_RECEIVED:
  1746. case IB_CM_USER_ESTABLISHED:
  1747. event.event = RDMA_CM_EVENT_ESTABLISHED;
  1748. break;
  1749. case IB_CM_DREQ_ERROR:
  1750. event.status = -ETIMEDOUT;
  1751. fallthrough;
  1752. case IB_CM_DREQ_RECEIVED:
  1753. case IB_CM_DREP_RECEIVED:
  1754. if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
  1755. RDMA_CM_DISCONNECT))
  1756. goto out;
  1757. event.event = RDMA_CM_EVENT_DISCONNECTED;
  1758. break;
  1759. case IB_CM_TIMEWAIT_EXIT:
  1760. event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
  1761. break;
  1762. case IB_CM_MRA_RECEIVED:
  1763. /* ignore event */
  1764. goto out;
  1765. case IB_CM_REJ_RECEIVED:
  1766. pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
  1767. ib_event->param.rej_rcvd.reason));
  1768. cma_modify_qp_err(id_priv);
  1769. event.status = ib_event->param.rej_rcvd.reason;
  1770. event.event = RDMA_CM_EVENT_REJECTED;
  1771. event.param.conn.private_data = ib_event->private_data;
  1772. event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
  1773. break;
  1774. default:
  1775. pr_err("RDMA CMA: unexpected IB CM event: %d\n",
  1776. ib_event->event);
  1777. goto out;
  1778. }
  1779. ret = cma_cm_event_handler(id_priv, &event);
  1780. if (ret) {
  1781. /* Destroy the CM ID by returning a non-zero value. */
  1782. id_priv->cm_id.ib = NULL;
  1783. destroy_id_handler_unlock(id_priv);
  1784. return ret;
  1785. }
  1786. out:
  1787. mutex_unlock(&id_priv->handler_mutex);
  1788. return 0;
  1789. }
  1790. static struct rdma_id_private *
  1791. cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
  1792. const struct ib_cm_event *ib_event,
  1793. struct net_device *net_dev)
  1794. {
  1795. struct rdma_id_private *listen_id_priv;
  1796. struct rdma_id_private *id_priv;
  1797. struct rdma_cm_id *id;
  1798. struct rdma_route *rt;
  1799. const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
  1800. struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
  1801. const __be64 service_id =
  1802. ib_event->param.req_rcvd.primary_path->service_id;
  1803. int ret;
  1804. listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
  1805. id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net,
  1806. listen_id->event_handler, listen_id->context,
  1807. listen_id->ps,
  1808. ib_event->param.req_rcvd.qp_type,
  1809. listen_id_priv);
  1810. if (IS_ERR(id_priv))
  1811. return NULL;
  1812. id = &id_priv->id;
  1813. if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
  1814. (struct sockaddr *)&id->route.addr.dst_addr,
  1815. listen_id, ib_event, ss_family, service_id))
  1816. goto err;
  1817. rt = &id->route;
  1818. rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
  1819. rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec),
  1820. GFP_KERNEL);
  1821. if (!rt->path_rec)
  1822. goto err;
  1823. rt->path_rec[0] = *path;
  1824. if (rt->num_paths == 2)
  1825. rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
  1826. if (net_dev) {
  1827. rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
  1828. } else {
  1829. if (!cma_protocol_roce(listen_id) &&
  1830. cma_any_addr(cma_src_addr(id_priv))) {
  1831. rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
  1832. rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
  1833. ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
  1834. } else if (!cma_any_addr(cma_src_addr(id_priv))) {
  1835. ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
  1836. if (ret)
  1837. goto err;
  1838. }
  1839. }
  1840. rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
  1841. id_priv->state = RDMA_CM_CONNECT;
  1842. return id_priv;
  1843. err:
  1844. rdma_destroy_id(id);
  1845. return NULL;
  1846. }
  1847. static struct rdma_id_private *
  1848. cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
  1849. const struct ib_cm_event *ib_event,
  1850. struct net_device *net_dev)
  1851. {
  1852. const struct rdma_id_private *listen_id_priv;
  1853. struct rdma_id_private *id_priv;
  1854. struct rdma_cm_id *id;
  1855. const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
  1856. struct net *net = listen_id->route.addr.dev_addr.net;
  1857. int ret;
  1858. listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
  1859. id_priv = __rdma_create_id(net, listen_id->event_handler,
  1860. listen_id->context, listen_id->ps, IB_QPT_UD,
  1861. listen_id_priv);
  1862. if (IS_ERR(id_priv))
  1863. return NULL;
  1864. id = &id_priv->id;
  1865. if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
  1866. (struct sockaddr *)&id->route.addr.dst_addr,
  1867. listen_id, ib_event, ss_family,
  1868. ib_event->param.sidr_req_rcvd.service_id))
  1869. goto err;
  1870. if (net_dev) {
  1871. rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
  1872. } else {
  1873. if (!cma_any_addr(cma_src_addr(id_priv))) {
  1874. ret = cma_translate_addr(cma_src_addr(id_priv),
  1875. &id->route.addr.dev_addr);
  1876. if (ret)
  1877. goto err;
  1878. }
  1879. }
  1880. id_priv->state = RDMA_CM_CONNECT;
  1881. return id_priv;
  1882. err:
  1883. rdma_destroy_id(id);
  1884. return NULL;
  1885. }
  1886. static void cma_set_req_event_data(struct rdma_cm_event *event,
  1887. const struct ib_cm_req_event_param *req_data,
  1888. void *private_data, int offset)
  1889. {
  1890. event->param.conn.private_data = private_data + offset;
  1891. event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
  1892. event->param.conn.responder_resources = req_data->responder_resources;
  1893. event->param.conn.initiator_depth = req_data->initiator_depth;
  1894. event->param.conn.flow_control = req_data->flow_control;
  1895. event->param.conn.retry_count = req_data->retry_count;
  1896. event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
  1897. event->param.conn.srq = req_data->srq;
  1898. event->param.conn.qp_num = req_data->remote_qpn;
  1899. event->ece.vendor_id = req_data->ece.vendor_id;
  1900. event->ece.attr_mod = req_data->ece.attr_mod;
  1901. }
  1902. static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
  1903. const struct ib_cm_event *ib_event)
  1904. {
  1905. return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
  1906. (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
  1907. ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
  1908. (id->qp_type == IB_QPT_UD)) ||
  1909. (!id->qp_type));
  1910. }
  1911. static int cma_ib_req_handler(struct ib_cm_id *cm_id,
  1912. const struct ib_cm_event *ib_event)
  1913. {
  1914. struct rdma_id_private *listen_id, *conn_id = NULL;
  1915. struct rdma_cm_event event = {};
  1916. struct cma_req_info req = {};
  1917. struct net_device *net_dev;
  1918. u8 offset;
  1919. int ret;
  1920. listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
  1921. if (IS_ERR(listen_id))
  1922. return PTR_ERR(listen_id);
  1923. trace_cm_req_handler(listen_id, ib_event->event);
  1924. if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
  1925. ret = -EINVAL;
  1926. goto net_dev_put;
  1927. }
  1928. mutex_lock(&listen_id->handler_mutex);
  1929. if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
  1930. ret = -ECONNABORTED;
  1931. goto err_unlock;
  1932. }
  1933. offset = cma_user_data_offset(listen_id);
  1934. event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
  1935. if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
  1936. conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
  1937. event.param.ud.private_data = ib_event->private_data + offset;
  1938. event.param.ud.private_data_len =
  1939. IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
  1940. } else {
  1941. conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
  1942. cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
  1943. ib_event->private_data, offset);
  1944. }
  1945. if (!conn_id) {
  1946. ret = -ENOMEM;
  1947. goto err_unlock;
  1948. }
  1949. mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
  1950. ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
  1951. if (ret) {
  1952. destroy_id_handler_unlock(conn_id);
  1953. goto err_unlock;
  1954. }
  1955. conn_id->cm_id.ib = cm_id;
  1956. cm_id->context = conn_id;
  1957. cm_id->cm_handler = cma_ib_handler;
  1958. ret = cma_cm_event_handler(conn_id, &event);
  1959. if (ret) {
  1960. /* Destroy the CM ID by returning a non-zero value. */
  1961. conn_id->cm_id.ib = NULL;
  1962. mutex_unlock(&listen_id->handler_mutex);
  1963. destroy_id_handler_unlock(conn_id);
  1964. goto net_dev_put;
  1965. }
  1966. if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
  1967. conn_id->id.qp_type != IB_QPT_UD) {
  1968. trace_cm_send_mra(cm_id->context);
  1969. ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
  1970. }
  1971. mutex_unlock(&conn_id->handler_mutex);
  1972. err_unlock:
  1973. mutex_unlock(&listen_id->handler_mutex);
  1974. net_dev_put:
  1975. if (net_dev)
  1976. dev_put(net_dev);
  1977. return ret;
  1978. }
  1979. __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
  1980. {
  1981. if (addr->sa_family == AF_IB)
  1982. return ((struct sockaddr_ib *) addr)->sib_sid;
  1983. return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
  1984. }
  1985. EXPORT_SYMBOL(rdma_get_service_id);
  1986. void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
  1987. union ib_gid *dgid)
  1988. {
  1989. struct rdma_addr *addr = &cm_id->route.addr;
  1990. if (!cm_id->device) {
  1991. if (sgid)
  1992. memset(sgid, 0, sizeof(*sgid));
  1993. if (dgid)
  1994. memset(dgid, 0, sizeof(*dgid));
  1995. return;
  1996. }
  1997. if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
  1998. if (sgid)
  1999. rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
  2000. if (dgid)
  2001. rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
  2002. } else {
  2003. if (sgid)
  2004. rdma_addr_get_sgid(&addr->dev_addr, sgid);
  2005. if (dgid)
  2006. rdma_addr_get_dgid(&addr->dev_addr, dgid);
  2007. }
  2008. }
  2009. EXPORT_SYMBOL(rdma_read_gids);
  2010. static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
  2011. {
  2012. struct rdma_id_private *id_priv = iw_id->context;
  2013. struct rdma_cm_event event = {};
  2014. int ret = 0;
  2015. struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
  2016. struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
  2017. mutex_lock(&id_priv->handler_mutex);
  2018. if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
  2019. goto out;
  2020. switch (iw_event->event) {
  2021. case IW_CM_EVENT_CLOSE:
  2022. event.event = RDMA_CM_EVENT_DISCONNECTED;
  2023. break;
  2024. case IW_CM_EVENT_CONNECT_REPLY:
  2025. memcpy(cma_src_addr(id_priv), laddr,
  2026. rdma_addr_size(laddr));
  2027. memcpy(cma_dst_addr(id_priv), raddr,
  2028. rdma_addr_size(raddr));
  2029. switch (iw_event->status) {
  2030. case 0:
  2031. event.event = RDMA_CM_EVENT_ESTABLISHED;
  2032. event.param.conn.initiator_depth = iw_event->ird;
  2033. event.param.conn.responder_resources = iw_event->ord;
  2034. break;
  2035. case -ECONNRESET:
  2036. case -ECONNREFUSED:
  2037. event.event = RDMA_CM_EVENT_REJECTED;
  2038. break;
  2039. case -ETIMEDOUT:
  2040. event.event = RDMA_CM_EVENT_UNREACHABLE;
  2041. break;
  2042. default:
  2043. event.event = RDMA_CM_EVENT_CONNECT_ERROR;
  2044. break;
  2045. }
  2046. break;
  2047. case IW_CM_EVENT_ESTABLISHED:
  2048. event.event = RDMA_CM_EVENT_ESTABLISHED;
  2049. event.param.conn.initiator_depth = iw_event->ird;
  2050. event.param.conn.responder_resources = iw_event->ord;
  2051. break;
  2052. default:
  2053. goto out;
  2054. }
  2055. event.status = iw_event->status;
  2056. event.param.conn.private_data = iw_event->private_data;
  2057. event.param.conn.private_data_len = iw_event->private_data_len;
  2058. ret = cma_cm_event_handler(id_priv, &event);
  2059. if (ret) {
  2060. /* Destroy the CM ID by returning a non-zero value. */
  2061. id_priv->cm_id.iw = NULL;
  2062. destroy_id_handler_unlock(id_priv);
  2063. return ret;
  2064. }
  2065. out:
  2066. mutex_unlock(&id_priv->handler_mutex);
  2067. return ret;
  2068. }
  2069. static int iw_conn_req_handler(struct iw_cm_id *cm_id,
  2070. struct iw_cm_event *iw_event)
  2071. {
  2072. struct rdma_id_private *listen_id, *conn_id;
  2073. struct rdma_cm_event event = {};
  2074. int ret = -ECONNABORTED;
  2075. struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
  2076. struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
  2077. event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
  2078. event.param.conn.private_data = iw_event->private_data;
  2079. event.param.conn.private_data_len = iw_event->private_data_len;
  2080. event.param.conn.initiator_depth = iw_event->ird;
  2081. event.param.conn.responder_resources = iw_event->ord;
  2082. listen_id = cm_id->context;
  2083. mutex_lock(&listen_id->handler_mutex);
  2084. if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
  2085. goto out;
  2086. /* Create a new RDMA id for the new IW CM ID */
  2087. conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
  2088. listen_id->id.event_handler,
  2089. listen_id->id.context, RDMA_PS_TCP,
  2090. IB_QPT_RC, listen_id);
  2091. if (IS_ERR(conn_id)) {
  2092. ret = -ENOMEM;
  2093. goto out;
  2094. }
  2095. mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
  2096. conn_id->state = RDMA_CM_CONNECT;
  2097. ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
  2098. if (ret) {
  2099. mutex_unlock(&listen_id->handler_mutex);
  2100. destroy_id_handler_unlock(conn_id);
  2101. return ret;
  2102. }
  2103. ret = cma_iw_acquire_dev(conn_id, listen_id);
  2104. if (ret) {
  2105. mutex_unlock(&listen_id->handler_mutex);
  2106. destroy_id_handler_unlock(conn_id);
  2107. return ret;
  2108. }
  2109. conn_id->cm_id.iw = cm_id;
  2110. cm_id->context = conn_id;
  2111. cm_id->cm_handler = cma_iw_handler;
  2112. memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
  2113. memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
  2114. ret = cma_cm_event_handler(conn_id, &event);
  2115. if (ret) {
  2116. /* User wants to destroy the CM ID */
  2117. conn_id->cm_id.iw = NULL;
  2118. mutex_unlock(&listen_id->handler_mutex);
  2119. destroy_id_handler_unlock(conn_id);
  2120. return ret;
  2121. }
  2122. mutex_unlock(&conn_id->handler_mutex);
  2123. out:
  2124. mutex_unlock(&listen_id->handler_mutex);
  2125. return ret;
  2126. }
  2127. static int cma_ib_listen(struct rdma_id_private *id_priv)
  2128. {
  2129. struct sockaddr *addr;
  2130. struct ib_cm_id *id;
  2131. __be64 svc_id;
  2132. addr = cma_src_addr(id_priv);
  2133. svc_id = rdma_get_service_id(&id_priv->id, addr);
  2134. id = ib_cm_insert_listen(id_priv->id.device,
  2135. cma_ib_req_handler, svc_id);
  2136. if (IS_ERR(id))
  2137. return PTR_ERR(id);
  2138. id_priv->cm_id.ib = id;
  2139. return 0;
  2140. }
  2141. static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
  2142. {
  2143. int ret;
  2144. struct iw_cm_id *id;
  2145. id = iw_create_cm_id(id_priv->id.device,
  2146. iw_conn_req_handler,
  2147. id_priv);
  2148. if (IS_ERR(id))
  2149. return PTR_ERR(id);
  2150. mutex_lock(&id_priv->qp_mutex);
  2151. id->tos = id_priv->tos;
  2152. id->tos_set = id_priv->tos_set;
  2153. mutex_unlock(&id_priv->qp_mutex);
  2154. id_priv->cm_id.iw = id;
  2155. memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
  2156. rdma_addr_size(cma_src_addr(id_priv)));
  2157. ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
  2158. if (ret) {
  2159. iw_destroy_cm_id(id_priv->cm_id.iw);
  2160. id_priv->cm_id.iw = NULL;
  2161. }
  2162. return ret;
  2163. }
  2164. static int cma_listen_handler(struct rdma_cm_id *id,
  2165. struct rdma_cm_event *event)
  2166. {
  2167. struct rdma_id_private *id_priv = id->context;
  2168. /* Listening IDs are always destroyed on removal */
  2169. if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
  2170. return -1;
  2171. id->context = id_priv->id.context;
  2172. id->event_handler = id_priv->id.event_handler;
  2173. trace_cm_event_handler(id_priv, event);
  2174. return id_priv->id.event_handler(id, event);
  2175. }
  2176. static int cma_listen_on_dev(struct rdma_id_private *id_priv,
  2177. struct cma_device *cma_dev,
  2178. struct rdma_id_private **to_destroy)
  2179. {
  2180. struct rdma_id_private *dev_id_priv;
  2181. struct net *net = id_priv->id.route.addr.dev_addr.net;
  2182. int ret;
  2183. lockdep_assert_held(&lock);
  2184. *to_destroy = NULL;
  2185. if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
  2186. return 0;
  2187. dev_id_priv =
  2188. __rdma_create_id(net, cma_listen_handler, id_priv,
  2189. id_priv->id.ps, id_priv->id.qp_type, id_priv);
  2190. if (IS_ERR(dev_id_priv))
  2191. return PTR_ERR(dev_id_priv);
  2192. dev_id_priv->state = RDMA_CM_ADDR_BOUND;
  2193. memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
  2194. rdma_addr_size(cma_src_addr(id_priv)));
  2195. _cma_attach_to_dev(dev_id_priv, cma_dev);
  2196. rdma_restrack_add(&dev_id_priv->res);
  2197. cma_id_get(id_priv);
  2198. dev_id_priv->internal_id = 1;
  2199. dev_id_priv->afonly = id_priv->afonly;
  2200. mutex_lock(&id_priv->qp_mutex);
  2201. dev_id_priv->tos_set = id_priv->tos_set;
  2202. dev_id_priv->tos = id_priv->tos;
  2203. mutex_unlock(&id_priv->qp_mutex);
  2204. ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
  2205. if (ret)
  2206. goto err_listen;
  2207. list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
  2208. return 0;
  2209. err_listen:
  2210. /* Caller must destroy this after releasing lock */
  2211. *to_destroy = dev_id_priv;
  2212. dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret);
  2213. return ret;
  2214. }
  2215. static int cma_listen_on_all(struct rdma_id_private *id_priv)
  2216. {
  2217. struct rdma_id_private *to_destroy;
  2218. struct cma_device *cma_dev;
  2219. int ret;
  2220. mutex_lock(&lock);
  2221. list_add_tail(&id_priv->list, &listen_any_list);
  2222. list_for_each_entry(cma_dev, &dev_list, list) {
  2223. ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
  2224. if (ret) {
  2225. /* Prevent racing with cma_process_remove() */
  2226. if (to_destroy)
  2227. list_del_init(&to_destroy->list);
  2228. goto err_listen;
  2229. }
  2230. }
  2231. mutex_unlock(&lock);
  2232. return 0;
  2233. err_listen:
  2234. _cma_cancel_listens(id_priv);
  2235. mutex_unlock(&lock);
  2236. if (to_destroy)
  2237. rdma_destroy_id(&to_destroy->id);
  2238. return ret;
  2239. }
  2240. void rdma_set_service_type(struct rdma_cm_id *id, int tos)
  2241. {
  2242. struct rdma_id_private *id_priv;
  2243. id_priv = container_of(id, struct rdma_id_private, id);
  2244. mutex_lock(&id_priv->qp_mutex);
  2245. id_priv->tos = (u8) tos;
  2246. id_priv->tos_set = true;
  2247. mutex_unlock(&id_priv->qp_mutex);
  2248. }
  2249. EXPORT_SYMBOL(rdma_set_service_type);
  2250. /**
  2251. * rdma_set_ack_timeout() - Set the ack timeout of QP associated
  2252. * with a connection identifier.
  2253. * @id: Communication identifier to associated with service type.
  2254. * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
  2255. *
  2256. * This function should be called before rdma_connect() on active side,
  2257. * and on passive side before rdma_accept(). It is applicable to primary
  2258. * path only. The timeout will affect the local side of the QP, it is not
  2259. * negotiated with remote side and zero disables the timer. In case it is
  2260. * set before rdma_resolve_route, the value will also be used to determine
  2261. * PacketLifeTime for RoCE.
  2262. *
  2263. * Return: 0 for success
  2264. */
  2265. int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
  2266. {
  2267. struct rdma_id_private *id_priv;
  2268. if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
  2269. return -EINVAL;
  2270. id_priv = container_of(id, struct rdma_id_private, id);
  2271. mutex_lock(&id_priv->qp_mutex);
  2272. id_priv->timeout = timeout;
  2273. id_priv->timeout_set = true;
  2274. mutex_unlock(&id_priv->qp_mutex);
  2275. return 0;
  2276. }
  2277. EXPORT_SYMBOL(rdma_set_ack_timeout);
  2278. static void cma_query_handler(int status, struct sa_path_rec *path_rec,
  2279. void *context)
  2280. {
  2281. struct cma_work *work = context;
  2282. struct rdma_route *route;
  2283. route = &work->id->id.route;
  2284. if (!status) {
  2285. route->num_paths = 1;
  2286. *route->path_rec = *path_rec;
  2287. } else {
  2288. work->old_state = RDMA_CM_ROUTE_QUERY;
  2289. work->new_state = RDMA_CM_ADDR_RESOLVED;
  2290. work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
  2291. work->event.status = status;
  2292. pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
  2293. status);
  2294. }
  2295. queue_work(cma_wq, &work->work);
  2296. }
  2297. static int cma_query_ib_route(struct rdma_id_private *id_priv,
  2298. unsigned long timeout_ms, struct cma_work *work)
  2299. {
  2300. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  2301. struct sa_path_rec path_rec;
  2302. ib_sa_comp_mask comp_mask;
  2303. struct sockaddr_in6 *sin6;
  2304. struct sockaddr_ib *sib;
  2305. memset(&path_rec, 0, sizeof path_rec);
  2306. if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num))
  2307. path_rec.rec_type = SA_PATH_REC_TYPE_OPA;
  2308. else
  2309. path_rec.rec_type = SA_PATH_REC_TYPE_IB;
  2310. rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
  2311. rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
  2312. path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  2313. path_rec.numb_path = 1;
  2314. path_rec.reversible = 1;
  2315. path_rec.service_id = rdma_get_service_id(&id_priv->id,
  2316. cma_dst_addr(id_priv));
  2317. comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
  2318. IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
  2319. IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
  2320. switch (cma_family(id_priv)) {
  2321. case AF_INET:
  2322. path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
  2323. comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
  2324. break;
  2325. case AF_INET6:
  2326. sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
  2327. path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
  2328. comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
  2329. break;
  2330. case AF_IB:
  2331. sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
  2332. path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
  2333. comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
  2334. break;
  2335. }
  2336. id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
  2337. id_priv->id.port_num, &path_rec,
  2338. comp_mask, timeout_ms,
  2339. GFP_KERNEL, cma_query_handler,
  2340. work, &id_priv->query);
  2341. return (id_priv->query_id < 0) ? id_priv->query_id : 0;
  2342. }
  2343. static void cma_iboe_join_work_handler(struct work_struct *work)
  2344. {
  2345. struct cma_multicast *mc =
  2346. container_of(work, struct cma_multicast, iboe_join.work);
  2347. struct rdma_cm_event *event = &mc->iboe_join.event;
  2348. struct rdma_id_private *id_priv = mc->id_priv;
  2349. int ret;
  2350. mutex_lock(&id_priv->handler_mutex);
  2351. if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
  2352. READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
  2353. goto out_unlock;
  2354. ret = cma_cm_event_handler(id_priv, event);
  2355. WARN_ON(ret);
  2356. out_unlock:
  2357. mutex_unlock(&id_priv->handler_mutex);
  2358. if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
  2359. rdma_destroy_ah_attr(&event->param.ud.ah_attr);
  2360. }
  2361. static void cma_work_handler(struct work_struct *_work)
  2362. {
  2363. struct cma_work *work = container_of(_work, struct cma_work, work);
  2364. struct rdma_id_private *id_priv = work->id;
  2365. mutex_lock(&id_priv->handler_mutex);
  2366. if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
  2367. READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
  2368. goto out_unlock;
  2369. if (work->old_state != 0 || work->new_state != 0) {
  2370. if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
  2371. goto out_unlock;
  2372. }
  2373. if (cma_cm_event_handler(id_priv, &work->event)) {
  2374. cma_id_put(id_priv);
  2375. destroy_id_handler_unlock(id_priv);
  2376. goto out_free;
  2377. }
  2378. out_unlock:
  2379. mutex_unlock(&id_priv->handler_mutex);
  2380. cma_id_put(id_priv);
  2381. out_free:
  2382. if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
  2383. rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
  2384. kfree(work);
  2385. }
  2386. static void cma_init_resolve_route_work(struct cma_work *work,
  2387. struct rdma_id_private *id_priv)
  2388. {
  2389. work->id = id_priv;
  2390. INIT_WORK(&work->work, cma_work_handler);
  2391. work->old_state = RDMA_CM_ROUTE_QUERY;
  2392. work->new_state = RDMA_CM_ROUTE_RESOLVED;
  2393. work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
  2394. }
  2395. static void enqueue_resolve_addr_work(struct cma_work *work,
  2396. struct rdma_id_private *id_priv)
  2397. {
  2398. /* Balances with cma_id_put() in cma_work_handler */
  2399. cma_id_get(id_priv);
  2400. work->id = id_priv;
  2401. INIT_WORK(&work->work, cma_work_handler);
  2402. work->old_state = RDMA_CM_ADDR_QUERY;
  2403. work->new_state = RDMA_CM_ADDR_RESOLVED;
  2404. work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  2405. queue_work(cma_wq, &work->work);
  2406. }
  2407. static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
  2408. unsigned long timeout_ms)
  2409. {
  2410. struct rdma_route *route = &id_priv->id.route;
  2411. struct cma_work *work;
  2412. int ret;
  2413. work = kzalloc(sizeof *work, GFP_KERNEL);
  2414. if (!work)
  2415. return -ENOMEM;
  2416. cma_init_resolve_route_work(work, id_priv);
  2417. if (!route->path_rec)
  2418. route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
  2419. if (!route->path_rec) {
  2420. ret = -ENOMEM;
  2421. goto err1;
  2422. }
  2423. ret = cma_query_ib_route(id_priv, timeout_ms, work);
  2424. if (ret)
  2425. goto err2;
  2426. return 0;
  2427. err2:
  2428. kfree(route->path_rec);
  2429. route->path_rec = NULL;
  2430. err1:
  2431. kfree(work);
  2432. return ret;
  2433. }
  2434. static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
  2435. unsigned long supported_gids,
  2436. enum ib_gid_type default_gid)
  2437. {
  2438. if ((network_type == RDMA_NETWORK_IPV4 ||
  2439. network_type == RDMA_NETWORK_IPV6) &&
  2440. test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
  2441. return IB_GID_TYPE_ROCE_UDP_ENCAP;
  2442. return default_gid;
  2443. }
  2444. /*
  2445. * cma_iboe_set_path_rec_l2_fields() is helper function which sets
  2446. * path record type based on GID type.
  2447. * It also sets up other L2 fields which includes destination mac address
  2448. * netdev ifindex, of the path record.
  2449. * It returns the netdev of the bound interface for this path record entry.
  2450. */
  2451. static struct net_device *
  2452. cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
  2453. {
  2454. struct rdma_route *route = &id_priv->id.route;
  2455. enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
  2456. struct rdma_addr *addr = &route->addr;
  2457. unsigned long supported_gids;
  2458. struct net_device *ndev;
  2459. if (!addr->dev_addr.bound_dev_if)
  2460. return NULL;
  2461. ndev = dev_get_by_index(addr->dev_addr.net,
  2462. addr->dev_addr.bound_dev_if);
  2463. if (!ndev)
  2464. return NULL;
  2465. supported_gids = roce_gid_type_mask_support(id_priv->id.device,
  2466. id_priv->id.port_num);
  2467. gid_type = cma_route_gid_type(addr->dev_addr.network,
  2468. supported_gids,
  2469. id_priv->gid_type);
  2470. /* Use the hint from IP Stack to select GID Type */
  2471. if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
  2472. gid_type = ib_network_to_gid_type(addr->dev_addr.network);
  2473. route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
  2474. route->path_rec->roce.route_resolved = true;
  2475. sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
  2476. return ndev;
  2477. }
  2478. int rdma_set_ib_path(struct rdma_cm_id *id,
  2479. struct sa_path_rec *path_rec)
  2480. {
  2481. struct rdma_id_private *id_priv;
  2482. struct net_device *ndev;
  2483. int ret;
  2484. id_priv = container_of(id, struct rdma_id_private, id);
  2485. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
  2486. RDMA_CM_ROUTE_RESOLVED))
  2487. return -EINVAL;
  2488. id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
  2489. GFP_KERNEL);
  2490. if (!id->route.path_rec) {
  2491. ret = -ENOMEM;
  2492. goto err;
  2493. }
  2494. if (rdma_protocol_roce(id->device, id->port_num)) {
  2495. ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
  2496. if (!ndev) {
  2497. ret = -ENODEV;
  2498. goto err_free;
  2499. }
  2500. dev_put(ndev);
  2501. }
  2502. id->route.num_paths = 1;
  2503. return 0;
  2504. err_free:
  2505. kfree(id->route.path_rec);
  2506. id->route.path_rec = NULL;
  2507. err:
  2508. cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
  2509. return ret;
  2510. }
  2511. EXPORT_SYMBOL(rdma_set_ib_path);
  2512. static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
  2513. {
  2514. struct cma_work *work;
  2515. work = kzalloc(sizeof *work, GFP_KERNEL);
  2516. if (!work)
  2517. return -ENOMEM;
  2518. cma_init_resolve_route_work(work, id_priv);
  2519. queue_work(cma_wq, &work->work);
  2520. return 0;
  2521. }
  2522. static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
  2523. {
  2524. struct net_device *dev;
  2525. dev = vlan_dev_real_dev(vlan_ndev);
  2526. if (dev->num_tc)
  2527. return netdev_get_prio_tc_map(dev, prio);
  2528. return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
  2529. VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  2530. }
  2531. struct iboe_prio_tc_map {
  2532. int input_prio;
  2533. int output_tc;
  2534. bool found;
  2535. };
  2536. static int get_lower_vlan_dev_tc(struct net_device *dev,
  2537. struct netdev_nested_priv *priv)
  2538. {
  2539. struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
  2540. if (is_vlan_dev(dev))
  2541. map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
  2542. else if (dev->num_tc)
  2543. map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
  2544. else
  2545. map->output_tc = 0;
  2546. /* We are interested only in first level VLAN device, so always
  2547. * return 1 to stop iterating over next level devices.
  2548. */
  2549. map->found = true;
  2550. return 1;
  2551. }
  2552. static int iboe_tos_to_sl(struct net_device *ndev, int tos)
  2553. {
  2554. struct iboe_prio_tc_map prio_tc_map = {};
  2555. int prio = rt_tos2priority(tos);
  2556. struct netdev_nested_priv priv;
  2557. /* If VLAN device, get it directly from the VLAN netdev */
  2558. if (is_vlan_dev(ndev))
  2559. return get_vlan_ndev_tc(ndev, prio);
  2560. prio_tc_map.input_prio = prio;
  2561. priv.data = (void *)&prio_tc_map;
  2562. rcu_read_lock();
  2563. netdev_walk_all_lower_dev_rcu(ndev,
  2564. get_lower_vlan_dev_tc,
  2565. &priv);
  2566. rcu_read_unlock();
  2567. /* If map is found from lower device, use it; Otherwise
  2568. * continue with the current netdevice to get priority to tc map.
  2569. */
  2570. if (prio_tc_map.found)
  2571. return prio_tc_map.output_tc;
  2572. else if (ndev->num_tc)
  2573. return netdev_get_prio_tc_map(ndev, prio);
  2574. else
  2575. return 0;
  2576. }
  2577. static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
  2578. {
  2579. struct sockaddr_in6 *addr6;
  2580. u16 dport, sport;
  2581. u32 hash, fl;
  2582. addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
  2583. fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
  2584. if ((cma_family(id_priv) != AF_INET6) || !fl) {
  2585. dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
  2586. sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
  2587. hash = (u32)sport * 31 + dport;
  2588. fl = hash & IB_GRH_FLOWLABEL_MASK;
  2589. }
  2590. return cpu_to_be32(fl);
  2591. }
  2592. static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
  2593. {
  2594. struct rdma_route *route = &id_priv->id.route;
  2595. struct rdma_addr *addr = &route->addr;
  2596. struct cma_work *work;
  2597. int ret;
  2598. struct net_device *ndev;
  2599. u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
  2600. rdma_start_port(id_priv->cma_dev->device)];
  2601. u8 tos;
  2602. mutex_lock(&id_priv->qp_mutex);
  2603. tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
  2604. mutex_unlock(&id_priv->qp_mutex);
  2605. work = kzalloc(sizeof *work, GFP_KERNEL);
  2606. if (!work)
  2607. return -ENOMEM;
  2608. route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
  2609. if (!route->path_rec) {
  2610. ret = -ENOMEM;
  2611. goto err1;
  2612. }
  2613. route->num_paths = 1;
  2614. ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
  2615. if (!ndev) {
  2616. ret = -ENODEV;
  2617. goto err2;
  2618. }
  2619. rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
  2620. &route->path_rec->sgid);
  2621. rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
  2622. &route->path_rec->dgid);
  2623. if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
  2624. /* TODO: get the hoplimit from the inet/inet6 device */
  2625. route->path_rec->hop_limit = addr->dev_addr.hoplimit;
  2626. else
  2627. route->path_rec->hop_limit = 1;
  2628. route->path_rec->reversible = 1;
  2629. route->path_rec->pkey = cpu_to_be16(0xffff);
  2630. route->path_rec->mtu_selector = IB_SA_EQ;
  2631. route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
  2632. route->path_rec->traffic_class = tos;
  2633. route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
  2634. route->path_rec->rate_selector = IB_SA_EQ;
  2635. route->path_rec->rate = iboe_get_rate(ndev);
  2636. dev_put(ndev);
  2637. route->path_rec->packet_life_time_selector = IB_SA_EQ;
  2638. /* In case ACK timeout is set, use this value to calculate
  2639. * PacketLifeTime. As per IBTA 12.7.34,
  2640. * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
  2641. * Assuming a negligible local ACK delay, we can use
  2642. * PacketLifeTime = local ACK timeout/2
  2643. * as a reasonable approximation for RoCE networks.
  2644. */
  2645. mutex_lock(&id_priv->qp_mutex);
  2646. if (id_priv->timeout_set && id_priv->timeout)
  2647. route->path_rec->packet_life_time = id_priv->timeout - 1;
  2648. else
  2649. route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
  2650. mutex_unlock(&id_priv->qp_mutex);
  2651. if (!route->path_rec->mtu) {
  2652. ret = -EINVAL;
  2653. goto err2;
  2654. }
  2655. if (rdma_protocol_roce_udp_encap(id_priv->id.device,
  2656. id_priv->id.port_num))
  2657. route->path_rec->flow_label =
  2658. cma_get_roce_udp_flow_label(id_priv);
  2659. cma_init_resolve_route_work(work, id_priv);
  2660. queue_work(cma_wq, &work->work);
  2661. return 0;
  2662. err2:
  2663. kfree(route->path_rec);
  2664. route->path_rec = NULL;
  2665. route->num_paths = 0;
  2666. err1:
  2667. kfree(work);
  2668. return ret;
  2669. }
  2670. int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
  2671. {
  2672. struct rdma_id_private *id_priv;
  2673. int ret;
  2674. id_priv = container_of(id, struct rdma_id_private, id);
  2675. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
  2676. return -EINVAL;
  2677. cma_id_get(id_priv);
  2678. if (rdma_cap_ib_sa(id->device, id->port_num))
  2679. ret = cma_resolve_ib_route(id_priv, timeout_ms);
  2680. else if (rdma_protocol_roce(id->device, id->port_num))
  2681. ret = cma_resolve_iboe_route(id_priv);
  2682. else if (rdma_protocol_iwarp(id->device, id->port_num))
  2683. ret = cma_resolve_iw_route(id_priv);
  2684. else
  2685. ret = -ENOSYS;
  2686. if (ret)
  2687. goto err;
  2688. return 0;
  2689. err:
  2690. cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
  2691. cma_id_put(id_priv);
  2692. return ret;
  2693. }
  2694. EXPORT_SYMBOL(rdma_resolve_route);
  2695. static void cma_set_loopback(struct sockaddr *addr)
  2696. {
  2697. switch (addr->sa_family) {
  2698. case AF_INET:
  2699. ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
  2700. break;
  2701. case AF_INET6:
  2702. ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
  2703. 0, 0, 0, htonl(1));
  2704. break;
  2705. default:
  2706. ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
  2707. 0, 0, 0, htonl(1));
  2708. break;
  2709. }
  2710. }
  2711. static int cma_bind_loopback(struct rdma_id_private *id_priv)
  2712. {
  2713. struct cma_device *cma_dev, *cur_dev;
  2714. union ib_gid gid;
  2715. enum ib_port_state port_state;
  2716. unsigned int p;
  2717. u16 pkey;
  2718. int ret;
  2719. cma_dev = NULL;
  2720. mutex_lock(&lock);
  2721. list_for_each_entry(cur_dev, &dev_list, list) {
  2722. if (cma_family(id_priv) == AF_IB &&
  2723. !rdma_cap_ib_cm(cur_dev->device, 1))
  2724. continue;
  2725. if (!cma_dev)
  2726. cma_dev = cur_dev;
  2727. rdma_for_each_port (cur_dev->device, p) {
  2728. if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
  2729. port_state == IB_PORT_ACTIVE) {
  2730. cma_dev = cur_dev;
  2731. goto port_found;
  2732. }
  2733. }
  2734. }
  2735. if (!cma_dev) {
  2736. ret = -ENODEV;
  2737. goto out;
  2738. }
  2739. p = 1;
  2740. port_found:
  2741. ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
  2742. if (ret)
  2743. goto out;
  2744. ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
  2745. if (ret)
  2746. goto out;
  2747. id_priv->id.route.addr.dev_addr.dev_type =
  2748. (rdma_protocol_ib(cma_dev->device, p)) ?
  2749. ARPHRD_INFINIBAND : ARPHRD_ETHER;
  2750. rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
  2751. ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
  2752. id_priv->id.port_num = p;
  2753. cma_attach_to_dev(id_priv, cma_dev);
  2754. rdma_restrack_add(&id_priv->res);
  2755. cma_set_loopback(cma_src_addr(id_priv));
  2756. out:
  2757. mutex_unlock(&lock);
  2758. return ret;
  2759. }
  2760. static void addr_handler(int status, struct sockaddr *src_addr,
  2761. struct rdma_dev_addr *dev_addr, void *context)
  2762. {
  2763. struct rdma_id_private *id_priv = context;
  2764. struct rdma_cm_event event = {};
  2765. struct sockaddr *addr;
  2766. struct sockaddr_storage old_addr;
  2767. mutex_lock(&id_priv->handler_mutex);
  2768. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
  2769. RDMA_CM_ADDR_RESOLVED))
  2770. goto out;
  2771. /*
  2772. * Store the previous src address, so that if we fail to acquire
  2773. * matching rdma device, old address can be restored back, which helps
  2774. * to cancel the cma listen operation correctly.
  2775. */
  2776. addr = cma_src_addr(id_priv);
  2777. memcpy(&old_addr, addr, rdma_addr_size(addr));
  2778. memcpy(addr, src_addr, rdma_addr_size(src_addr));
  2779. if (!status && !id_priv->cma_dev) {
  2780. status = cma_acquire_dev_by_src_ip(id_priv);
  2781. if (status)
  2782. pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
  2783. status);
  2784. rdma_restrack_add(&id_priv->res);
  2785. } else if (status) {
  2786. pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
  2787. }
  2788. if (status) {
  2789. memcpy(addr, &old_addr,
  2790. rdma_addr_size((struct sockaddr *)&old_addr));
  2791. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
  2792. RDMA_CM_ADDR_BOUND))
  2793. goto out;
  2794. event.event = RDMA_CM_EVENT_ADDR_ERROR;
  2795. event.status = status;
  2796. } else
  2797. event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
  2798. if (cma_cm_event_handler(id_priv, &event)) {
  2799. destroy_id_handler_unlock(id_priv);
  2800. return;
  2801. }
  2802. out:
  2803. mutex_unlock(&id_priv->handler_mutex);
  2804. }
  2805. static int cma_resolve_loopback(struct rdma_id_private *id_priv)
  2806. {
  2807. struct cma_work *work;
  2808. union ib_gid gid;
  2809. int ret;
  2810. work = kzalloc(sizeof *work, GFP_KERNEL);
  2811. if (!work)
  2812. return -ENOMEM;
  2813. if (!id_priv->cma_dev) {
  2814. ret = cma_bind_loopback(id_priv);
  2815. if (ret)
  2816. goto err;
  2817. }
  2818. rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
  2819. rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
  2820. enqueue_resolve_addr_work(work, id_priv);
  2821. return 0;
  2822. err:
  2823. kfree(work);
  2824. return ret;
  2825. }
  2826. static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
  2827. {
  2828. struct cma_work *work;
  2829. int ret;
  2830. work = kzalloc(sizeof *work, GFP_KERNEL);
  2831. if (!work)
  2832. return -ENOMEM;
  2833. if (!id_priv->cma_dev) {
  2834. ret = cma_resolve_ib_dev(id_priv);
  2835. if (ret)
  2836. goto err;
  2837. }
  2838. rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
  2839. &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
  2840. enqueue_resolve_addr_work(work, id_priv);
  2841. return 0;
  2842. err:
  2843. kfree(work);
  2844. return ret;
  2845. }
  2846. static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
  2847. const struct sockaddr *dst_addr)
  2848. {
  2849. struct sockaddr_storage zero_sock = {};
  2850. if (src_addr && src_addr->sa_family)
  2851. return rdma_bind_addr(id, src_addr);
  2852. /*
  2853. * When the src_addr is not specified, automatically supply an any addr
  2854. */
  2855. zero_sock.ss_family = dst_addr->sa_family;
  2856. if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
  2857. struct sockaddr_in6 *src_addr6 =
  2858. (struct sockaddr_in6 *)&zero_sock;
  2859. struct sockaddr_in6 *dst_addr6 =
  2860. (struct sockaddr_in6 *)dst_addr;
  2861. src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
  2862. if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
  2863. id->route.addr.dev_addr.bound_dev_if =
  2864. dst_addr6->sin6_scope_id;
  2865. } else if (dst_addr->sa_family == AF_IB) {
  2866. ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
  2867. ((struct sockaddr_ib *)dst_addr)->sib_pkey;
  2868. }
  2869. return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
  2870. }
  2871. /*
  2872. * If required, resolve the source address for bind and leave the id_priv in
  2873. * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
  2874. * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
  2875. * ignored.
  2876. */
  2877. static int resolve_prepare_src(struct rdma_id_private *id_priv,
  2878. struct sockaddr *src_addr,
  2879. const struct sockaddr *dst_addr)
  2880. {
  2881. int ret;
  2882. memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
  2883. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
  2884. /* For a well behaved ULP state will be RDMA_CM_IDLE */
  2885. ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
  2886. if (ret)
  2887. goto err_dst;
  2888. if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
  2889. RDMA_CM_ADDR_QUERY))) {
  2890. ret = -EINVAL;
  2891. goto err_dst;
  2892. }
  2893. }
  2894. if (cma_family(id_priv) != dst_addr->sa_family) {
  2895. ret = -EINVAL;
  2896. goto err_state;
  2897. }
  2898. return 0;
  2899. err_state:
  2900. cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
  2901. err_dst:
  2902. memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
  2903. return ret;
  2904. }
  2905. int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
  2906. const struct sockaddr *dst_addr, unsigned long timeout_ms)
  2907. {
  2908. struct rdma_id_private *id_priv =
  2909. container_of(id, struct rdma_id_private, id);
  2910. int ret;
  2911. ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
  2912. if (ret)
  2913. return ret;
  2914. if (cma_any_addr(dst_addr)) {
  2915. ret = cma_resolve_loopback(id_priv);
  2916. } else {
  2917. if (dst_addr->sa_family == AF_IB) {
  2918. ret = cma_resolve_ib_addr(id_priv);
  2919. } else {
  2920. ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
  2921. &id->route.addr.dev_addr,
  2922. timeout_ms, addr_handler,
  2923. false, id_priv);
  2924. }
  2925. }
  2926. if (ret)
  2927. goto err;
  2928. return 0;
  2929. err:
  2930. cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
  2931. return ret;
  2932. }
  2933. EXPORT_SYMBOL(rdma_resolve_addr);
  2934. int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
  2935. {
  2936. struct rdma_id_private *id_priv;
  2937. unsigned long flags;
  2938. int ret;
  2939. id_priv = container_of(id, struct rdma_id_private, id);
  2940. spin_lock_irqsave(&id_priv->lock, flags);
  2941. if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
  2942. id_priv->state == RDMA_CM_IDLE) {
  2943. id_priv->reuseaddr = reuse;
  2944. ret = 0;
  2945. } else {
  2946. ret = -EINVAL;
  2947. }
  2948. spin_unlock_irqrestore(&id_priv->lock, flags);
  2949. return ret;
  2950. }
  2951. EXPORT_SYMBOL(rdma_set_reuseaddr);
  2952. int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
  2953. {
  2954. struct rdma_id_private *id_priv;
  2955. unsigned long flags;
  2956. int ret;
  2957. id_priv = container_of(id, struct rdma_id_private, id);
  2958. spin_lock_irqsave(&id_priv->lock, flags);
  2959. if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
  2960. id_priv->options |= (1 << CMA_OPTION_AFONLY);
  2961. id_priv->afonly = afonly;
  2962. ret = 0;
  2963. } else {
  2964. ret = -EINVAL;
  2965. }
  2966. spin_unlock_irqrestore(&id_priv->lock, flags);
  2967. return ret;
  2968. }
  2969. EXPORT_SYMBOL(rdma_set_afonly);
  2970. static void cma_bind_port(struct rdma_bind_list *bind_list,
  2971. struct rdma_id_private *id_priv)
  2972. {
  2973. struct sockaddr *addr;
  2974. struct sockaddr_ib *sib;
  2975. u64 sid, mask;
  2976. __be16 port;
  2977. lockdep_assert_held(&lock);
  2978. addr = cma_src_addr(id_priv);
  2979. port = htons(bind_list->port);
  2980. switch (addr->sa_family) {
  2981. case AF_INET:
  2982. ((struct sockaddr_in *) addr)->sin_port = port;
  2983. break;
  2984. case AF_INET6:
  2985. ((struct sockaddr_in6 *) addr)->sin6_port = port;
  2986. break;
  2987. case AF_IB:
  2988. sib = (struct sockaddr_ib *) addr;
  2989. sid = be64_to_cpu(sib->sib_sid);
  2990. mask = be64_to_cpu(sib->sib_sid_mask);
  2991. sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
  2992. sib->sib_sid_mask = cpu_to_be64(~0ULL);
  2993. break;
  2994. }
  2995. id_priv->bind_list = bind_list;
  2996. hlist_add_head(&id_priv->node, &bind_list->owners);
  2997. }
  2998. static int cma_alloc_port(enum rdma_ucm_port_space ps,
  2999. struct rdma_id_private *id_priv, unsigned short snum)
  3000. {
  3001. struct rdma_bind_list *bind_list;
  3002. int ret;
  3003. lockdep_assert_held(&lock);
  3004. bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
  3005. if (!bind_list)
  3006. return -ENOMEM;
  3007. ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list,
  3008. snum);
  3009. if (ret < 0)
  3010. goto err;
  3011. bind_list->ps = ps;
  3012. bind_list->port = snum;
  3013. cma_bind_port(bind_list, id_priv);
  3014. return 0;
  3015. err:
  3016. kfree(bind_list);
  3017. return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
  3018. }
  3019. static int cma_port_is_unique(struct rdma_bind_list *bind_list,
  3020. struct rdma_id_private *id_priv)
  3021. {
  3022. struct rdma_id_private *cur_id;
  3023. struct sockaddr *daddr = cma_dst_addr(id_priv);
  3024. struct sockaddr *saddr = cma_src_addr(id_priv);
  3025. __be16 dport = cma_port(daddr);
  3026. lockdep_assert_held(&lock);
  3027. hlist_for_each_entry(cur_id, &bind_list->owners, node) {
  3028. struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
  3029. struct sockaddr *cur_saddr = cma_src_addr(cur_id);
  3030. __be16 cur_dport = cma_port(cur_daddr);
  3031. if (id_priv == cur_id)
  3032. continue;
  3033. /* different dest port -> unique */
  3034. if (!cma_any_port(daddr) &&
  3035. !cma_any_port(cur_daddr) &&
  3036. (dport != cur_dport))
  3037. continue;
  3038. /* different src address -> unique */
  3039. if (!cma_any_addr(saddr) &&
  3040. !cma_any_addr(cur_saddr) &&
  3041. cma_addr_cmp(saddr, cur_saddr))
  3042. continue;
  3043. /* different dst address -> unique */
  3044. if (!cma_any_addr(daddr) &&
  3045. !cma_any_addr(cur_daddr) &&
  3046. cma_addr_cmp(daddr, cur_daddr))
  3047. continue;
  3048. return -EADDRNOTAVAIL;
  3049. }
  3050. return 0;
  3051. }
  3052. static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
  3053. struct rdma_id_private *id_priv)
  3054. {
  3055. static unsigned int last_used_port;
  3056. int low, high, remaining;
  3057. unsigned int rover;
  3058. struct net *net = id_priv->id.route.addr.dev_addr.net;
  3059. lockdep_assert_held(&lock);
  3060. inet_get_local_port_range(net, &low, &high);
  3061. remaining = (high - low) + 1;
  3062. rover = prandom_u32() % remaining + low;
  3063. retry:
  3064. if (last_used_port != rover) {
  3065. struct rdma_bind_list *bind_list;
  3066. int ret;
  3067. bind_list = cma_ps_find(net, ps, (unsigned short)rover);
  3068. if (!bind_list) {
  3069. ret = cma_alloc_port(ps, id_priv, rover);
  3070. } else {
  3071. ret = cma_port_is_unique(bind_list, id_priv);
  3072. if (!ret)
  3073. cma_bind_port(bind_list, id_priv);
  3074. }
  3075. /*
  3076. * Remember previously used port number in order to avoid
  3077. * re-using same port immediately after it is closed.
  3078. */
  3079. if (!ret)
  3080. last_used_port = rover;
  3081. if (ret != -EADDRNOTAVAIL)
  3082. return ret;
  3083. }
  3084. if (--remaining) {
  3085. rover++;
  3086. if ((rover < low) || (rover > high))
  3087. rover = low;
  3088. goto retry;
  3089. }
  3090. return -EADDRNOTAVAIL;
  3091. }
  3092. /*
  3093. * Check that the requested port is available. This is called when trying to
  3094. * bind to a specific port, or when trying to listen on a bound port. In
  3095. * the latter case, the provided id_priv may already be on the bind_list, but
  3096. * we still need to check that it's okay to start listening.
  3097. */
  3098. static int cma_check_port(struct rdma_bind_list *bind_list,
  3099. struct rdma_id_private *id_priv, uint8_t reuseaddr)
  3100. {
  3101. struct rdma_id_private *cur_id;
  3102. struct sockaddr *addr, *cur_addr;
  3103. lockdep_assert_held(&lock);
  3104. addr = cma_src_addr(id_priv);
  3105. hlist_for_each_entry(cur_id, &bind_list->owners, node) {
  3106. if (id_priv == cur_id)
  3107. continue;
  3108. if (reuseaddr && cur_id->reuseaddr)
  3109. continue;
  3110. cur_addr = cma_src_addr(cur_id);
  3111. if (id_priv->afonly && cur_id->afonly &&
  3112. (addr->sa_family != cur_addr->sa_family))
  3113. continue;
  3114. if (cma_any_addr(addr) || cma_any_addr(cur_addr))
  3115. return -EADDRNOTAVAIL;
  3116. if (!cma_addr_cmp(addr, cur_addr))
  3117. return -EADDRINUSE;
  3118. }
  3119. return 0;
  3120. }
  3121. static int cma_use_port(enum rdma_ucm_port_space ps,
  3122. struct rdma_id_private *id_priv)
  3123. {
  3124. struct rdma_bind_list *bind_list;
  3125. unsigned short snum;
  3126. int ret;
  3127. lockdep_assert_held(&lock);
  3128. snum = ntohs(cma_port(cma_src_addr(id_priv)));
  3129. if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
  3130. return -EACCES;
  3131. bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum);
  3132. if (!bind_list) {
  3133. ret = cma_alloc_port(ps, id_priv, snum);
  3134. } else {
  3135. ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
  3136. if (!ret)
  3137. cma_bind_port(bind_list, id_priv);
  3138. }
  3139. return ret;
  3140. }
  3141. static enum rdma_ucm_port_space
  3142. cma_select_inet_ps(struct rdma_id_private *id_priv)
  3143. {
  3144. switch (id_priv->id.ps) {
  3145. case RDMA_PS_TCP:
  3146. case RDMA_PS_UDP:
  3147. case RDMA_PS_IPOIB:
  3148. case RDMA_PS_IB:
  3149. return id_priv->id.ps;
  3150. default:
  3151. return 0;
  3152. }
  3153. }
  3154. static enum rdma_ucm_port_space
  3155. cma_select_ib_ps(struct rdma_id_private *id_priv)
  3156. {
  3157. enum rdma_ucm_port_space ps = 0;
  3158. struct sockaddr_ib *sib;
  3159. u64 sid_ps, mask, sid;
  3160. sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
  3161. mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
  3162. sid = be64_to_cpu(sib->sib_sid) & mask;
  3163. if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
  3164. sid_ps = RDMA_IB_IP_PS_IB;
  3165. ps = RDMA_PS_IB;
  3166. } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
  3167. (sid == (RDMA_IB_IP_PS_TCP & mask))) {
  3168. sid_ps = RDMA_IB_IP_PS_TCP;
  3169. ps = RDMA_PS_TCP;
  3170. } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
  3171. (sid == (RDMA_IB_IP_PS_UDP & mask))) {
  3172. sid_ps = RDMA_IB_IP_PS_UDP;
  3173. ps = RDMA_PS_UDP;
  3174. }
  3175. if (ps) {
  3176. sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
  3177. sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
  3178. be64_to_cpu(sib->sib_sid_mask));
  3179. }
  3180. return ps;
  3181. }
  3182. static int cma_get_port(struct rdma_id_private *id_priv)
  3183. {
  3184. enum rdma_ucm_port_space ps;
  3185. int ret;
  3186. if (cma_family(id_priv) != AF_IB)
  3187. ps = cma_select_inet_ps(id_priv);
  3188. else
  3189. ps = cma_select_ib_ps(id_priv);
  3190. if (!ps)
  3191. return -EPROTONOSUPPORT;
  3192. mutex_lock(&lock);
  3193. if (cma_any_port(cma_src_addr(id_priv)))
  3194. ret = cma_alloc_any_port(ps, id_priv);
  3195. else
  3196. ret = cma_use_port(ps, id_priv);
  3197. mutex_unlock(&lock);
  3198. return ret;
  3199. }
  3200. static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
  3201. struct sockaddr *addr)
  3202. {
  3203. #if IS_ENABLED(CONFIG_IPV6)
  3204. struct sockaddr_in6 *sin6;
  3205. if (addr->sa_family != AF_INET6)
  3206. return 0;
  3207. sin6 = (struct sockaddr_in6 *) addr;
  3208. if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
  3209. return 0;
  3210. if (!sin6->sin6_scope_id)
  3211. return -EINVAL;
  3212. dev_addr->bound_dev_if = sin6->sin6_scope_id;
  3213. #endif
  3214. return 0;
  3215. }
  3216. int rdma_listen(struct rdma_cm_id *id, int backlog)
  3217. {
  3218. struct rdma_id_private *id_priv =
  3219. container_of(id, struct rdma_id_private, id);
  3220. int ret;
  3221. if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
  3222. struct sockaddr_in any_in = {
  3223. .sin_family = AF_INET,
  3224. .sin_addr.s_addr = htonl(INADDR_ANY),
  3225. };
  3226. /* For a well behaved ULP state will be RDMA_CM_IDLE */
  3227. ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
  3228. if (ret)
  3229. return ret;
  3230. if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
  3231. RDMA_CM_LISTEN)))
  3232. return -EINVAL;
  3233. }
  3234. /*
  3235. * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
  3236. * any more, and has to be unique in the bind list.
  3237. */
  3238. if (id_priv->reuseaddr) {
  3239. mutex_lock(&lock);
  3240. ret = cma_check_port(id_priv->bind_list, id_priv, 0);
  3241. if (!ret)
  3242. id_priv->reuseaddr = 0;
  3243. mutex_unlock(&lock);
  3244. if (ret)
  3245. goto err;
  3246. }
  3247. id_priv->backlog = backlog;
  3248. if (id_priv->cma_dev) {
  3249. if (rdma_cap_ib_cm(id->device, 1)) {
  3250. ret = cma_ib_listen(id_priv);
  3251. if (ret)
  3252. goto err;
  3253. } else if (rdma_cap_iw_cm(id->device, 1)) {
  3254. ret = cma_iw_listen(id_priv, backlog);
  3255. if (ret)
  3256. goto err;
  3257. } else {
  3258. ret = -ENOSYS;
  3259. goto err;
  3260. }
  3261. } else {
  3262. ret = cma_listen_on_all(id_priv);
  3263. if (ret)
  3264. goto err;
  3265. }
  3266. return 0;
  3267. err:
  3268. id_priv->backlog = 0;
  3269. /*
  3270. * All the failure paths that lead here will not allow the req_handler's
  3271. * to have run.
  3272. */
  3273. cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
  3274. return ret;
  3275. }
  3276. EXPORT_SYMBOL(rdma_listen);
  3277. int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
  3278. {
  3279. struct rdma_id_private *id_priv;
  3280. int ret;
  3281. struct sockaddr *daddr;
  3282. if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
  3283. addr->sa_family != AF_IB)
  3284. return -EAFNOSUPPORT;
  3285. id_priv = container_of(id, struct rdma_id_private, id);
  3286. if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
  3287. return -EINVAL;
  3288. ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
  3289. if (ret)
  3290. goto err1;
  3291. memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
  3292. if (!cma_any_addr(addr)) {
  3293. ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
  3294. if (ret)
  3295. goto err1;
  3296. ret = cma_acquire_dev_by_src_ip(id_priv);
  3297. if (ret)
  3298. goto err1;
  3299. }
  3300. if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
  3301. if (addr->sa_family == AF_INET)
  3302. id_priv->afonly = 1;
  3303. #if IS_ENABLED(CONFIG_IPV6)
  3304. else if (addr->sa_family == AF_INET6) {
  3305. struct net *net = id_priv->id.route.addr.dev_addr.net;
  3306. id_priv->afonly = net->ipv6.sysctl.bindv6only;
  3307. }
  3308. #endif
  3309. }
  3310. daddr = cma_dst_addr(id_priv);
  3311. daddr->sa_family = addr->sa_family;
  3312. ret = cma_get_port(id_priv);
  3313. if (ret)
  3314. goto err2;
  3315. if (!cma_any_addr(addr))
  3316. rdma_restrack_add(&id_priv->res);
  3317. return 0;
  3318. err2:
  3319. if (id_priv->cma_dev)
  3320. cma_release_dev(id_priv);
  3321. err1:
  3322. cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
  3323. return ret;
  3324. }
  3325. EXPORT_SYMBOL(rdma_bind_addr);
  3326. static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
  3327. {
  3328. struct cma_hdr *cma_hdr;
  3329. cma_hdr = hdr;
  3330. cma_hdr->cma_version = CMA_VERSION;
  3331. if (cma_family(id_priv) == AF_INET) {
  3332. struct sockaddr_in *src4, *dst4;
  3333. src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
  3334. dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
  3335. cma_set_ip_ver(cma_hdr, 4);
  3336. cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
  3337. cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
  3338. cma_hdr->port = src4->sin_port;
  3339. } else if (cma_family(id_priv) == AF_INET6) {
  3340. struct sockaddr_in6 *src6, *dst6;
  3341. src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
  3342. dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
  3343. cma_set_ip_ver(cma_hdr, 6);
  3344. cma_hdr->src_addr.ip6 = src6->sin6_addr;
  3345. cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
  3346. cma_hdr->port = src6->sin6_port;
  3347. }
  3348. return 0;
  3349. }
  3350. static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
  3351. const struct ib_cm_event *ib_event)
  3352. {
  3353. struct rdma_id_private *id_priv = cm_id->context;
  3354. struct rdma_cm_event event = {};
  3355. const struct ib_cm_sidr_rep_event_param *rep =
  3356. &ib_event->param.sidr_rep_rcvd;
  3357. int ret;
  3358. mutex_lock(&id_priv->handler_mutex);
  3359. if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
  3360. goto out;
  3361. switch (ib_event->event) {
  3362. case IB_CM_SIDR_REQ_ERROR:
  3363. event.event = RDMA_CM_EVENT_UNREACHABLE;
  3364. event.status = -ETIMEDOUT;
  3365. break;
  3366. case IB_CM_SIDR_REP_RECEIVED:
  3367. event.param.ud.private_data = ib_event->private_data;
  3368. event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
  3369. if (rep->status != IB_SIDR_SUCCESS) {
  3370. event.event = RDMA_CM_EVENT_UNREACHABLE;
  3371. event.status = ib_event->param.sidr_rep_rcvd.status;
  3372. pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
  3373. event.status);
  3374. break;
  3375. }
  3376. ret = cma_set_qkey(id_priv, rep->qkey);
  3377. if (ret) {
  3378. pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret);
  3379. event.event = RDMA_CM_EVENT_ADDR_ERROR;
  3380. event.status = ret;
  3381. break;
  3382. }
  3383. ib_init_ah_attr_from_path(id_priv->id.device,
  3384. id_priv->id.port_num,
  3385. id_priv->id.route.path_rec,
  3386. &event.param.ud.ah_attr,
  3387. rep->sgid_attr);
  3388. event.param.ud.qp_num = rep->qpn;
  3389. event.param.ud.qkey = rep->qkey;
  3390. event.event = RDMA_CM_EVENT_ESTABLISHED;
  3391. event.status = 0;
  3392. break;
  3393. default:
  3394. pr_err("RDMA CMA: unexpected IB CM event: %d\n",
  3395. ib_event->event);
  3396. goto out;
  3397. }
  3398. ret = cma_cm_event_handler(id_priv, &event);
  3399. rdma_destroy_ah_attr(&event.param.ud.ah_attr);
  3400. if (ret) {
  3401. /* Destroy the CM ID by returning a non-zero value. */
  3402. id_priv->cm_id.ib = NULL;
  3403. destroy_id_handler_unlock(id_priv);
  3404. return ret;
  3405. }
  3406. out:
  3407. mutex_unlock(&id_priv->handler_mutex);
  3408. return 0;
  3409. }
  3410. static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
  3411. struct rdma_conn_param *conn_param)
  3412. {
  3413. struct ib_cm_sidr_req_param req;
  3414. struct ib_cm_id *id;
  3415. void *private_data;
  3416. u8 offset;
  3417. int ret;
  3418. memset(&req, 0, sizeof req);
  3419. offset = cma_user_data_offset(id_priv);
  3420. req.private_data_len = offset + conn_param->private_data_len;
  3421. if (req.private_data_len < conn_param->private_data_len)
  3422. return -EINVAL;
  3423. if (req.private_data_len) {
  3424. private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
  3425. if (!private_data)
  3426. return -ENOMEM;
  3427. } else {
  3428. private_data = NULL;
  3429. }
  3430. if (conn_param->private_data && conn_param->private_data_len)
  3431. memcpy(private_data + offset, conn_param->private_data,
  3432. conn_param->private_data_len);
  3433. if (private_data) {
  3434. ret = cma_format_hdr(private_data, id_priv);
  3435. if (ret)
  3436. goto out;
  3437. req.private_data = private_data;
  3438. }
  3439. id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
  3440. id_priv);
  3441. if (IS_ERR(id)) {
  3442. ret = PTR_ERR(id);
  3443. goto out;
  3444. }
  3445. id_priv->cm_id.ib = id;
  3446. req.path = id_priv->id.route.path_rec;
  3447. req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
  3448. req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
  3449. req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
  3450. req.max_cm_retries = CMA_MAX_CM_RETRIES;
  3451. trace_cm_send_sidr_req(id_priv);
  3452. ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
  3453. if (ret) {
  3454. ib_destroy_cm_id(id_priv->cm_id.ib);
  3455. id_priv->cm_id.ib = NULL;
  3456. }
  3457. out:
  3458. kfree(private_data);
  3459. return ret;
  3460. }
  3461. static int cma_connect_ib(struct rdma_id_private *id_priv,
  3462. struct rdma_conn_param *conn_param)
  3463. {
  3464. struct ib_cm_req_param req;
  3465. struct rdma_route *route;
  3466. void *private_data;
  3467. struct ib_cm_id *id;
  3468. u8 offset;
  3469. int ret;
  3470. memset(&req, 0, sizeof req);
  3471. offset = cma_user_data_offset(id_priv);
  3472. req.private_data_len = offset + conn_param->private_data_len;
  3473. if (req.private_data_len < conn_param->private_data_len)
  3474. return -EINVAL;
  3475. if (req.private_data_len) {
  3476. private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
  3477. if (!private_data)
  3478. return -ENOMEM;
  3479. } else {
  3480. private_data = NULL;
  3481. }
  3482. if (conn_param->private_data && conn_param->private_data_len)
  3483. memcpy(private_data + offset, conn_param->private_data,
  3484. conn_param->private_data_len);
  3485. id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
  3486. if (IS_ERR(id)) {
  3487. ret = PTR_ERR(id);
  3488. goto out;
  3489. }
  3490. id_priv->cm_id.ib = id;
  3491. route = &id_priv->id.route;
  3492. if (private_data) {
  3493. ret = cma_format_hdr(private_data, id_priv);
  3494. if (ret)
  3495. goto out;
  3496. req.private_data = private_data;
  3497. }
  3498. req.primary_path = &route->path_rec[0];
  3499. if (route->num_paths == 2)
  3500. req.alternate_path = &route->path_rec[1];
  3501. req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
  3502. /* Alternate path SGID attribute currently unsupported */
  3503. req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
  3504. req.qp_num = id_priv->qp_num;
  3505. req.qp_type = id_priv->id.qp_type;
  3506. req.starting_psn = id_priv->seq_num;
  3507. req.responder_resources = conn_param->responder_resources;
  3508. req.initiator_depth = conn_param->initiator_depth;
  3509. req.flow_control = conn_param->flow_control;
  3510. req.retry_count = min_t(u8, 7, conn_param->retry_count);
  3511. req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
  3512. req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
  3513. req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
  3514. req.max_cm_retries = CMA_MAX_CM_RETRIES;
  3515. req.srq = id_priv->srq ? 1 : 0;
  3516. req.ece.vendor_id = id_priv->ece.vendor_id;
  3517. req.ece.attr_mod = id_priv->ece.attr_mod;
  3518. trace_cm_send_req(id_priv);
  3519. ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
  3520. out:
  3521. if (ret && !IS_ERR(id)) {
  3522. ib_destroy_cm_id(id);
  3523. id_priv->cm_id.ib = NULL;
  3524. }
  3525. kfree(private_data);
  3526. return ret;
  3527. }
  3528. static int cma_connect_iw(struct rdma_id_private *id_priv,
  3529. struct rdma_conn_param *conn_param)
  3530. {
  3531. struct iw_cm_id *cm_id;
  3532. int ret;
  3533. struct iw_cm_conn_param iw_param;
  3534. cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
  3535. if (IS_ERR(cm_id))
  3536. return PTR_ERR(cm_id);
  3537. mutex_lock(&id_priv->qp_mutex);
  3538. cm_id->tos = id_priv->tos;
  3539. cm_id->tos_set = id_priv->tos_set;
  3540. mutex_unlock(&id_priv->qp_mutex);
  3541. id_priv->cm_id.iw = cm_id;
  3542. memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
  3543. rdma_addr_size(cma_src_addr(id_priv)));
  3544. memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
  3545. rdma_addr_size(cma_dst_addr(id_priv)));
  3546. ret = cma_modify_qp_rtr(id_priv, conn_param);
  3547. if (ret)
  3548. goto out;
  3549. if (conn_param) {
  3550. iw_param.ord = conn_param->initiator_depth;
  3551. iw_param.ird = conn_param->responder_resources;
  3552. iw_param.private_data = conn_param->private_data;
  3553. iw_param.private_data_len = conn_param->private_data_len;
  3554. iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
  3555. } else {
  3556. memset(&iw_param, 0, sizeof iw_param);
  3557. iw_param.qpn = id_priv->qp_num;
  3558. }
  3559. ret = iw_cm_connect(cm_id, &iw_param);
  3560. out:
  3561. if (ret) {
  3562. iw_destroy_cm_id(cm_id);
  3563. id_priv->cm_id.iw = NULL;
  3564. }
  3565. return ret;
  3566. }
  3567. /**
  3568. * rdma_connect_locked - Initiate an active connection request.
  3569. * @id: Connection identifier to connect.
  3570. * @conn_param: Connection information used for connected QPs.
  3571. *
  3572. * Same as rdma_connect() but can only be called from the
  3573. * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
  3574. */
  3575. int rdma_connect_locked(struct rdma_cm_id *id,
  3576. struct rdma_conn_param *conn_param)
  3577. {
  3578. struct rdma_id_private *id_priv =
  3579. container_of(id, struct rdma_id_private, id);
  3580. int ret;
  3581. if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
  3582. return -EINVAL;
  3583. if (!id->qp) {
  3584. id_priv->qp_num = conn_param->qp_num;
  3585. id_priv->srq = conn_param->srq;
  3586. }
  3587. if (rdma_cap_ib_cm(id->device, id->port_num)) {
  3588. if (id->qp_type == IB_QPT_UD)
  3589. ret = cma_resolve_ib_udp(id_priv, conn_param);
  3590. else
  3591. ret = cma_connect_ib(id_priv, conn_param);
  3592. } else if (rdma_cap_iw_cm(id->device, id->port_num))
  3593. ret = cma_connect_iw(id_priv, conn_param);
  3594. else
  3595. ret = -ENOSYS;
  3596. if (ret)
  3597. goto err_state;
  3598. return 0;
  3599. err_state:
  3600. cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
  3601. return ret;
  3602. }
  3603. EXPORT_SYMBOL(rdma_connect_locked);
  3604. /**
  3605. * rdma_connect - Initiate an active connection request.
  3606. * @id: Connection identifier to connect.
  3607. * @conn_param: Connection information used for connected QPs.
  3608. *
  3609. * Users must have resolved a route for the rdma_cm_id to connect with by having
  3610. * called rdma_resolve_route before calling this routine.
  3611. *
  3612. * This call will either connect to a remote QP or obtain remote QP information
  3613. * for unconnected rdma_cm_id's. The actual operation is based on the
  3614. * rdma_cm_id's port space.
  3615. */
  3616. int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
  3617. {
  3618. struct rdma_id_private *id_priv =
  3619. container_of(id, struct rdma_id_private, id);
  3620. int ret;
  3621. mutex_lock(&id_priv->handler_mutex);
  3622. ret = rdma_connect_locked(id, conn_param);
  3623. mutex_unlock(&id_priv->handler_mutex);
  3624. return ret;
  3625. }
  3626. EXPORT_SYMBOL(rdma_connect);
  3627. /**
  3628. * rdma_connect_ece - Initiate an active connection request with ECE data.
  3629. * @id: Connection identifier to connect.
  3630. * @conn_param: Connection information used for connected QPs.
  3631. * @ece: ECE parameters
  3632. *
  3633. * See rdma_connect() explanation.
  3634. */
  3635. int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
  3636. struct rdma_ucm_ece *ece)
  3637. {
  3638. struct rdma_id_private *id_priv =
  3639. container_of(id, struct rdma_id_private, id);
  3640. id_priv->ece.vendor_id = ece->vendor_id;
  3641. id_priv->ece.attr_mod = ece->attr_mod;
  3642. return rdma_connect(id, conn_param);
  3643. }
  3644. EXPORT_SYMBOL(rdma_connect_ece);
  3645. static int cma_accept_ib(struct rdma_id_private *id_priv,
  3646. struct rdma_conn_param *conn_param)
  3647. {
  3648. struct ib_cm_rep_param rep;
  3649. int ret;
  3650. ret = cma_modify_qp_rtr(id_priv, conn_param);
  3651. if (ret)
  3652. goto out;
  3653. ret = cma_modify_qp_rts(id_priv, conn_param);
  3654. if (ret)
  3655. goto out;
  3656. memset(&rep, 0, sizeof rep);
  3657. rep.qp_num = id_priv->qp_num;
  3658. rep.starting_psn = id_priv->seq_num;
  3659. rep.private_data = conn_param->private_data;
  3660. rep.private_data_len = conn_param->private_data_len;
  3661. rep.responder_resources = conn_param->responder_resources;
  3662. rep.initiator_depth = conn_param->initiator_depth;
  3663. rep.failover_accepted = 0;
  3664. rep.flow_control = conn_param->flow_control;
  3665. rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
  3666. rep.srq = id_priv->srq ? 1 : 0;
  3667. rep.ece.vendor_id = id_priv->ece.vendor_id;
  3668. rep.ece.attr_mod = id_priv->ece.attr_mod;
  3669. trace_cm_send_rep(id_priv);
  3670. ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
  3671. out:
  3672. return ret;
  3673. }
  3674. static int cma_accept_iw(struct rdma_id_private *id_priv,
  3675. struct rdma_conn_param *conn_param)
  3676. {
  3677. struct iw_cm_conn_param iw_param;
  3678. int ret;
  3679. if (!conn_param)
  3680. return -EINVAL;
  3681. ret = cma_modify_qp_rtr(id_priv, conn_param);
  3682. if (ret)
  3683. return ret;
  3684. iw_param.ord = conn_param->initiator_depth;
  3685. iw_param.ird = conn_param->responder_resources;
  3686. iw_param.private_data = conn_param->private_data;
  3687. iw_param.private_data_len = conn_param->private_data_len;
  3688. if (id_priv->id.qp) {
  3689. iw_param.qpn = id_priv->qp_num;
  3690. } else
  3691. iw_param.qpn = conn_param->qp_num;
  3692. return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
  3693. }
  3694. static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
  3695. enum ib_cm_sidr_status status, u32 qkey,
  3696. const void *private_data, int private_data_len)
  3697. {
  3698. struct ib_cm_sidr_rep_param rep;
  3699. int ret;
  3700. memset(&rep, 0, sizeof rep);
  3701. rep.status = status;
  3702. if (status == IB_SIDR_SUCCESS) {
  3703. ret = cma_set_qkey(id_priv, qkey);
  3704. if (ret)
  3705. return ret;
  3706. rep.qp_num = id_priv->qp_num;
  3707. rep.qkey = id_priv->qkey;
  3708. rep.ece.vendor_id = id_priv->ece.vendor_id;
  3709. rep.ece.attr_mod = id_priv->ece.attr_mod;
  3710. }
  3711. rep.private_data = private_data;
  3712. rep.private_data_len = private_data_len;
  3713. trace_cm_send_sidr_rep(id_priv);
  3714. return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
  3715. }
  3716. /**
  3717. * rdma_accept - Called to accept a connection request or response.
  3718. * @id: Connection identifier associated with the request.
  3719. * @conn_param: Information needed to establish the connection. This must be
  3720. * provided if accepting a connection request. If accepting a connection
  3721. * response, this parameter must be NULL.
  3722. *
  3723. * Typically, this routine is only called by the listener to accept a connection
  3724. * request. It must also be called on the active side of a connection if the
  3725. * user is performing their own QP transitions.
  3726. *
  3727. * In the case of error, a reject message is sent to the remote side and the
  3728. * state of the qp associated with the id is modified to error, such that any
  3729. * previously posted receive buffers would be flushed.
  3730. *
  3731. * This function is for use by kernel ULPs and must be called from under the
  3732. * handler callback.
  3733. */
  3734. int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
  3735. {
  3736. struct rdma_id_private *id_priv =
  3737. container_of(id, struct rdma_id_private, id);
  3738. int ret;
  3739. lockdep_assert_held(&id_priv->handler_mutex);
  3740. if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
  3741. return -EINVAL;
  3742. if (!id->qp && conn_param) {
  3743. id_priv->qp_num = conn_param->qp_num;
  3744. id_priv->srq = conn_param->srq;
  3745. }
  3746. if (rdma_cap_ib_cm(id->device, id->port_num)) {
  3747. if (id->qp_type == IB_QPT_UD) {
  3748. if (conn_param)
  3749. ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
  3750. conn_param->qkey,
  3751. conn_param->private_data,
  3752. conn_param->private_data_len);
  3753. else
  3754. ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
  3755. 0, NULL, 0);
  3756. } else {
  3757. if (conn_param)
  3758. ret = cma_accept_ib(id_priv, conn_param);
  3759. else
  3760. ret = cma_rep_recv(id_priv);
  3761. }
  3762. } else if (rdma_cap_iw_cm(id->device, id->port_num))
  3763. ret = cma_accept_iw(id_priv, conn_param);
  3764. else
  3765. ret = -ENOSYS;
  3766. if (ret)
  3767. goto reject;
  3768. return 0;
  3769. reject:
  3770. cma_modify_qp_err(id_priv);
  3771. rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
  3772. return ret;
  3773. }
  3774. EXPORT_SYMBOL(rdma_accept);
  3775. int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
  3776. struct rdma_ucm_ece *ece)
  3777. {
  3778. struct rdma_id_private *id_priv =
  3779. container_of(id, struct rdma_id_private, id);
  3780. id_priv->ece.vendor_id = ece->vendor_id;
  3781. id_priv->ece.attr_mod = ece->attr_mod;
  3782. return rdma_accept(id, conn_param);
  3783. }
  3784. EXPORT_SYMBOL(rdma_accept_ece);
  3785. void rdma_lock_handler(struct rdma_cm_id *id)
  3786. {
  3787. struct rdma_id_private *id_priv =
  3788. container_of(id, struct rdma_id_private, id);
  3789. mutex_lock(&id_priv->handler_mutex);
  3790. }
  3791. EXPORT_SYMBOL(rdma_lock_handler);
  3792. void rdma_unlock_handler(struct rdma_cm_id *id)
  3793. {
  3794. struct rdma_id_private *id_priv =
  3795. container_of(id, struct rdma_id_private, id);
  3796. mutex_unlock(&id_priv->handler_mutex);
  3797. }
  3798. EXPORT_SYMBOL(rdma_unlock_handler);
  3799. int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
  3800. {
  3801. struct rdma_id_private *id_priv;
  3802. int ret;
  3803. id_priv = container_of(id, struct rdma_id_private, id);
  3804. if (!id_priv->cm_id.ib)
  3805. return -EINVAL;
  3806. switch (id->device->node_type) {
  3807. case RDMA_NODE_IB_CA:
  3808. ret = ib_cm_notify(id_priv->cm_id.ib, event);
  3809. break;
  3810. default:
  3811. ret = 0;
  3812. break;
  3813. }
  3814. return ret;
  3815. }
  3816. EXPORT_SYMBOL(rdma_notify);
  3817. int rdma_reject(struct rdma_cm_id *id, const void *private_data,
  3818. u8 private_data_len, u8 reason)
  3819. {
  3820. struct rdma_id_private *id_priv;
  3821. int ret;
  3822. id_priv = container_of(id, struct rdma_id_private, id);
  3823. if (!id_priv->cm_id.ib)
  3824. return -EINVAL;
  3825. if (rdma_cap_ib_cm(id->device, id->port_num)) {
  3826. if (id->qp_type == IB_QPT_UD) {
  3827. ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
  3828. private_data, private_data_len);
  3829. } else {
  3830. trace_cm_send_rej(id_priv);
  3831. ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
  3832. private_data, private_data_len);
  3833. }
  3834. } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
  3835. ret = iw_cm_reject(id_priv->cm_id.iw,
  3836. private_data, private_data_len);
  3837. } else
  3838. ret = -ENOSYS;
  3839. return ret;
  3840. }
  3841. EXPORT_SYMBOL(rdma_reject);
  3842. int rdma_disconnect(struct rdma_cm_id *id)
  3843. {
  3844. struct rdma_id_private *id_priv;
  3845. int ret;
  3846. id_priv = container_of(id, struct rdma_id_private, id);
  3847. if (!id_priv->cm_id.ib)
  3848. return -EINVAL;
  3849. if (rdma_cap_ib_cm(id->device, id->port_num)) {
  3850. ret = cma_modify_qp_err(id_priv);
  3851. if (ret)
  3852. goto out;
  3853. /* Initiate or respond to a disconnect. */
  3854. trace_cm_disconnect(id_priv);
  3855. if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) {
  3856. if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0))
  3857. trace_cm_sent_drep(id_priv);
  3858. } else {
  3859. trace_cm_sent_dreq(id_priv);
  3860. }
  3861. } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
  3862. ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
  3863. } else
  3864. ret = -EINVAL;
  3865. out:
  3866. return ret;
  3867. }
  3868. EXPORT_SYMBOL(rdma_disconnect);
  3869. static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
  3870. struct ib_sa_multicast *multicast,
  3871. struct rdma_cm_event *event,
  3872. struct cma_multicast *mc)
  3873. {
  3874. struct rdma_dev_addr *dev_addr;
  3875. enum ib_gid_type gid_type;
  3876. struct net_device *ndev;
  3877. if (!status)
  3878. status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
  3879. else
  3880. pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
  3881. status);
  3882. event->status = status;
  3883. event->param.ud.private_data = mc->context;
  3884. if (status) {
  3885. event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
  3886. return;
  3887. }
  3888. dev_addr = &id_priv->id.route.addr.dev_addr;
  3889. ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
  3890. gid_type =
  3891. id_priv->cma_dev
  3892. ->default_gid_type[id_priv->id.port_num -
  3893. rdma_start_port(
  3894. id_priv->cma_dev->device)];
  3895. event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
  3896. if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
  3897. &multicast->rec, ndev, gid_type,
  3898. &event->param.ud.ah_attr)) {
  3899. event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
  3900. goto out;
  3901. }
  3902. event->param.ud.qp_num = 0xFFFFFF;
  3903. event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
  3904. out:
  3905. if (ndev)
  3906. dev_put(ndev);
  3907. }
  3908. static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
  3909. {
  3910. struct cma_multicast *mc = multicast->context;
  3911. struct rdma_id_private *id_priv = mc->id_priv;
  3912. struct rdma_cm_event event = {};
  3913. int ret = 0;
  3914. mutex_lock(&id_priv->handler_mutex);
  3915. if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
  3916. READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
  3917. goto out;
  3918. cma_make_mc_event(status, id_priv, multicast, &event, mc);
  3919. ret = cma_cm_event_handler(id_priv, &event);
  3920. rdma_destroy_ah_attr(&event.param.ud.ah_attr);
  3921. WARN_ON(ret);
  3922. out:
  3923. mutex_unlock(&id_priv->handler_mutex);
  3924. return 0;
  3925. }
  3926. static void cma_set_mgid(struct rdma_id_private *id_priv,
  3927. struct sockaddr *addr, union ib_gid *mgid)
  3928. {
  3929. unsigned char mc_map[MAX_ADDR_LEN];
  3930. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  3931. struct sockaddr_in *sin = (struct sockaddr_in *) addr;
  3932. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
  3933. if (cma_any_addr(addr)) {
  3934. memset(mgid, 0, sizeof *mgid);
  3935. } else if ((addr->sa_family == AF_INET6) &&
  3936. ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
  3937. 0xFF10A01B)) {
  3938. /* IPv6 address is an SA assigned MGID. */
  3939. memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
  3940. } else if (addr->sa_family == AF_IB) {
  3941. memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
  3942. } else if (addr->sa_family == AF_INET6) {
  3943. ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
  3944. if (id_priv->id.ps == RDMA_PS_UDP)
  3945. mc_map[7] = 0x01; /* Use RDMA CM signature */
  3946. *mgid = *(union ib_gid *) (mc_map + 4);
  3947. } else {
  3948. ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
  3949. if (id_priv->id.ps == RDMA_PS_UDP)
  3950. mc_map[7] = 0x01; /* Use RDMA CM signature */
  3951. *mgid = *(union ib_gid *) (mc_map + 4);
  3952. }
  3953. }
  3954. static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
  3955. struct cma_multicast *mc)
  3956. {
  3957. struct ib_sa_mcmember_rec rec;
  3958. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  3959. ib_sa_comp_mask comp_mask;
  3960. int ret;
  3961. ib_addr_get_mgid(dev_addr, &rec.mgid);
  3962. ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
  3963. &rec.mgid, &rec);
  3964. if (ret)
  3965. return ret;
  3966. ret = cma_set_qkey(id_priv, 0);
  3967. if (ret)
  3968. return ret;
  3969. cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
  3970. rec.qkey = cpu_to_be32(id_priv->qkey);
  3971. rdma_addr_get_sgid(dev_addr, &rec.port_gid);
  3972. rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
  3973. rec.join_state = mc->join_state;
  3974. if ((rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) &&
  3975. (!ib_sa_sendonly_fullmem_support(&sa_client,
  3976. id_priv->id.device,
  3977. id_priv->id.port_num))) {
  3978. dev_warn(
  3979. &id_priv->id.device->dev,
  3980. "RDMA CM: port %u Unable to multicast join: SM doesn't support Send Only Full Member option\n",
  3981. id_priv->id.port_num);
  3982. return -EOPNOTSUPP;
  3983. }
  3984. comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
  3985. IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
  3986. IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
  3987. IB_SA_MCMEMBER_REC_FLOW_LABEL |
  3988. IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
  3989. if (id_priv->id.ps == RDMA_PS_IPOIB)
  3990. comp_mask |= IB_SA_MCMEMBER_REC_RATE |
  3991. IB_SA_MCMEMBER_REC_RATE_SELECTOR |
  3992. IB_SA_MCMEMBER_REC_MTU_SELECTOR |
  3993. IB_SA_MCMEMBER_REC_MTU |
  3994. IB_SA_MCMEMBER_REC_HOP_LIMIT;
  3995. mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
  3996. id_priv->id.port_num, &rec, comp_mask,
  3997. GFP_KERNEL, cma_ib_mc_handler, mc);
  3998. return PTR_ERR_OR_ZERO(mc->sa_mc);
  3999. }
  4000. static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
  4001. enum ib_gid_type gid_type)
  4002. {
  4003. struct sockaddr_in *sin = (struct sockaddr_in *)addr;
  4004. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
  4005. if (cma_any_addr(addr)) {
  4006. memset(mgid, 0, sizeof *mgid);
  4007. } else if (addr->sa_family == AF_INET6) {
  4008. memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
  4009. } else {
  4010. mgid->raw[0] =
  4011. (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
  4012. mgid->raw[1] =
  4013. (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
  4014. mgid->raw[2] = 0;
  4015. mgid->raw[3] = 0;
  4016. mgid->raw[4] = 0;
  4017. mgid->raw[5] = 0;
  4018. mgid->raw[6] = 0;
  4019. mgid->raw[7] = 0;
  4020. mgid->raw[8] = 0;
  4021. mgid->raw[9] = 0;
  4022. mgid->raw[10] = 0xff;
  4023. mgid->raw[11] = 0xff;
  4024. *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
  4025. }
  4026. }
  4027. static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
  4028. struct cma_multicast *mc)
  4029. {
  4030. struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
  4031. int err = 0;
  4032. struct sockaddr *addr = (struct sockaddr *)&mc->addr;
  4033. struct net_device *ndev = NULL;
  4034. struct ib_sa_multicast ib;
  4035. enum ib_gid_type gid_type;
  4036. bool send_only;
  4037. send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
  4038. if (cma_zero_addr(addr))
  4039. return -EINVAL;
  4040. gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
  4041. rdma_start_port(id_priv->cma_dev->device)];
  4042. cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
  4043. ib.rec.pkey = cpu_to_be16(0xffff);
  4044. if (id_priv->id.ps == RDMA_PS_UDP)
  4045. ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
  4046. if (dev_addr->bound_dev_if)
  4047. ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
  4048. if (!ndev)
  4049. return -ENODEV;
  4050. ib.rec.rate = iboe_get_rate(ndev);
  4051. ib.rec.hop_limit = 1;
  4052. ib.rec.mtu = iboe_get_mtu(ndev->mtu);
  4053. if (addr->sa_family == AF_INET) {
  4054. if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
  4055. ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
  4056. if (!send_only) {
  4057. err = cma_igmp_send(ndev, &ib.rec.mgid,
  4058. true);
  4059. }
  4060. }
  4061. } else {
  4062. if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
  4063. err = -ENOTSUPP;
  4064. }
  4065. dev_put(ndev);
  4066. if (err || !ib.rec.mtu)
  4067. return err ?: -EINVAL;
  4068. rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
  4069. &ib.rec.port_gid);
  4070. INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
  4071. cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
  4072. queue_work(cma_wq, &mc->iboe_join.work);
  4073. return 0;
  4074. }
  4075. int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
  4076. u8 join_state, void *context)
  4077. {
  4078. struct rdma_id_private *id_priv =
  4079. container_of(id, struct rdma_id_private, id);
  4080. struct cma_multicast *mc;
  4081. int ret;
  4082. /* Not supported for kernel QPs */
  4083. if (WARN_ON(id->qp))
  4084. return -EINVAL;
  4085. /* ULP is calling this wrong. */
  4086. if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
  4087. READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
  4088. return -EINVAL;
  4089. mc = kzalloc(sizeof(*mc), GFP_KERNEL);
  4090. if (!mc)
  4091. return -ENOMEM;
  4092. memcpy(&mc->addr, addr, rdma_addr_size(addr));
  4093. mc->context = context;
  4094. mc->id_priv = id_priv;
  4095. mc->join_state = join_state;
  4096. if (rdma_protocol_roce(id->device, id->port_num)) {
  4097. ret = cma_iboe_join_multicast(id_priv, mc);
  4098. if (ret)
  4099. goto out_err;
  4100. } else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
  4101. ret = cma_join_ib_multicast(id_priv, mc);
  4102. if (ret)
  4103. goto out_err;
  4104. } else {
  4105. ret = -ENOSYS;
  4106. goto out_err;
  4107. }
  4108. spin_lock(&id_priv->lock);
  4109. list_add(&mc->list, &id_priv->mc_list);
  4110. spin_unlock(&id_priv->lock);
  4111. return 0;
  4112. out_err:
  4113. kfree(mc);
  4114. return ret;
  4115. }
  4116. EXPORT_SYMBOL(rdma_join_multicast);
  4117. void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
  4118. {
  4119. struct rdma_id_private *id_priv;
  4120. struct cma_multicast *mc;
  4121. id_priv = container_of(id, struct rdma_id_private, id);
  4122. spin_lock_irq(&id_priv->lock);
  4123. list_for_each_entry(mc, &id_priv->mc_list, list) {
  4124. if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
  4125. continue;
  4126. list_del(&mc->list);
  4127. spin_unlock_irq(&id_priv->lock);
  4128. WARN_ON(id_priv->cma_dev->device != id->device);
  4129. destroy_mc(id_priv, mc);
  4130. return;
  4131. }
  4132. spin_unlock_irq(&id_priv->lock);
  4133. }
  4134. EXPORT_SYMBOL(rdma_leave_multicast);
  4135. static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
  4136. {
  4137. struct rdma_dev_addr *dev_addr;
  4138. struct cma_work *work;
  4139. dev_addr = &id_priv->id.route.addr.dev_addr;
  4140. if ((dev_addr->bound_dev_if == ndev->ifindex) &&
  4141. (net_eq(dev_net(ndev), dev_addr->net)) &&
  4142. memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
  4143. pr_info("RDMA CM addr change for ndev %s used by id %p\n",
  4144. ndev->name, &id_priv->id);
  4145. work = kzalloc(sizeof *work, GFP_KERNEL);
  4146. if (!work)
  4147. return -ENOMEM;
  4148. INIT_WORK(&work->work, cma_work_handler);
  4149. work->id = id_priv;
  4150. work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
  4151. cma_id_get(id_priv);
  4152. queue_work(cma_wq, &work->work);
  4153. }
  4154. return 0;
  4155. }
  4156. static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
  4157. void *ptr)
  4158. {
  4159. struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
  4160. struct cma_device *cma_dev;
  4161. struct rdma_id_private *id_priv;
  4162. int ret = NOTIFY_DONE;
  4163. if (event != NETDEV_BONDING_FAILOVER)
  4164. return NOTIFY_DONE;
  4165. if (!netif_is_bond_master(ndev))
  4166. return NOTIFY_DONE;
  4167. mutex_lock(&lock);
  4168. list_for_each_entry(cma_dev, &dev_list, list)
  4169. list_for_each_entry(id_priv, &cma_dev->id_list, list) {
  4170. ret = cma_netdev_change(ndev, id_priv);
  4171. if (ret)
  4172. goto out;
  4173. }
  4174. out:
  4175. mutex_unlock(&lock);
  4176. return ret;
  4177. }
  4178. static struct notifier_block cma_nb = {
  4179. .notifier_call = cma_netdev_callback
  4180. };
  4181. static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
  4182. {
  4183. struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
  4184. enum rdma_cm_state state;
  4185. unsigned long flags;
  4186. mutex_lock(&id_priv->handler_mutex);
  4187. /* Record that we want to remove the device */
  4188. spin_lock_irqsave(&id_priv->lock, flags);
  4189. state = id_priv->state;
  4190. if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) {
  4191. spin_unlock_irqrestore(&id_priv->lock, flags);
  4192. mutex_unlock(&id_priv->handler_mutex);
  4193. cma_id_put(id_priv);
  4194. return;
  4195. }
  4196. id_priv->state = RDMA_CM_DEVICE_REMOVAL;
  4197. spin_unlock_irqrestore(&id_priv->lock, flags);
  4198. if (cma_cm_event_handler(id_priv, &event)) {
  4199. /*
  4200. * At this point the ULP promises it won't call
  4201. * rdma_destroy_id() concurrently
  4202. */
  4203. cma_id_put(id_priv);
  4204. mutex_unlock(&id_priv->handler_mutex);
  4205. trace_cm_id_destroy(id_priv);
  4206. _destroy_id(id_priv, state);
  4207. return;
  4208. }
  4209. mutex_unlock(&id_priv->handler_mutex);
  4210. /*
  4211. * If this races with destroy then the thread that first assigns state
  4212. * to a destroying does the cancel.
  4213. */
  4214. cma_cancel_operation(id_priv, state);
  4215. cma_id_put(id_priv);
  4216. }
  4217. static void cma_process_remove(struct cma_device *cma_dev)
  4218. {
  4219. mutex_lock(&lock);
  4220. while (!list_empty(&cma_dev->id_list)) {
  4221. struct rdma_id_private *id_priv = list_first_entry(
  4222. &cma_dev->id_list, struct rdma_id_private, list);
  4223. list_del(&id_priv->listen_list);
  4224. list_del_init(&id_priv->list);
  4225. cma_id_get(id_priv);
  4226. mutex_unlock(&lock);
  4227. cma_send_device_removal_put(id_priv);
  4228. mutex_lock(&lock);
  4229. }
  4230. mutex_unlock(&lock);
  4231. cma_dev_put(cma_dev);
  4232. wait_for_completion(&cma_dev->comp);
  4233. }
  4234. static int cma_add_one(struct ib_device *device)
  4235. {
  4236. struct rdma_id_private *to_destroy;
  4237. struct cma_device *cma_dev;
  4238. struct rdma_id_private *id_priv;
  4239. unsigned int i;
  4240. unsigned long supported_gids = 0;
  4241. int ret;
  4242. cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
  4243. if (!cma_dev)
  4244. return -ENOMEM;
  4245. cma_dev->device = device;
  4246. cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
  4247. sizeof(*cma_dev->default_gid_type),
  4248. GFP_KERNEL);
  4249. if (!cma_dev->default_gid_type) {
  4250. ret = -ENOMEM;
  4251. goto free_cma_dev;
  4252. }
  4253. cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
  4254. sizeof(*cma_dev->default_roce_tos),
  4255. GFP_KERNEL);
  4256. if (!cma_dev->default_roce_tos) {
  4257. ret = -ENOMEM;
  4258. goto free_gid_type;
  4259. }
  4260. rdma_for_each_port (device, i) {
  4261. supported_gids = roce_gid_type_mask_support(device, i);
  4262. WARN_ON(!supported_gids);
  4263. if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
  4264. cma_dev->default_gid_type[i - rdma_start_port(device)] =
  4265. CMA_PREFERRED_ROCE_GID_TYPE;
  4266. else
  4267. cma_dev->default_gid_type[i - rdma_start_port(device)] =
  4268. find_first_bit(&supported_gids, BITS_PER_LONG);
  4269. cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
  4270. }
  4271. init_completion(&cma_dev->comp);
  4272. refcount_set(&cma_dev->refcount, 1);
  4273. INIT_LIST_HEAD(&cma_dev->id_list);
  4274. ib_set_client_data(device, &cma_client, cma_dev);
  4275. mutex_lock(&lock);
  4276. list_add_tail(&cma_dev->list, &dev_list);
  4277. list_for_each_entry(id_priv, &listen_any_list, list) {
  4278. ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
  4279. if (ret)
  4280. goto free_listen;
  4281. }
  4282. mutex_unlock(&lock);
  4283. trace_cm_add_one(device);
  4284. return 0;
  4285. free_listen:
  4286. list_del(&cma_dev->list);
  4287. mutex_unlock(&lock);
  4288. /* cma_process_remove() will delete to_destroy */
  4289. cma_process_remove(cma_dev);
  4290. kfree(cma_dev->default_roce_tos);
  4291. free_gid_type:
  4292. kfree(cma_dev->default_gid_type);
  4293. free_cma_dev:
  4294. kfree(cma_dev);
  4295. return ret;
  4296. }
  4297. static void cma_remove_one(struct ib_device *device, void *client_data)
  4298. {
  4299. struct cma_device *cma_dev = client_data;
  4300. trace_cm_remove_one(device);
  4301. mutex_lock(&lock);
  4302. list_del(&cma_dev->list);
  4303. mutex_unlock(&lock);
  4304. cma_process_remove(cma_dev);
  4305. kfree(cma_dev->default_roce_tos);
  4306. kfree(cma_dev->default_gid_type);
  4307. kfree(cma_dev);
  4308. }
  4309. static int cma_init_net(struct net *net)
  4310. {
  4311. struct cma_pernet *pernet = cma_pernet(net);
  4312. xa_init(&pernet->tcp_ps);
  4313. xa_init(&pernet->udp_ps);
  4314. xa_init(&pernet->ipoib_ps);
  4315. xa_init(&pernet->ib_ps);
  4316. return 0;
  4317. }
  4318. static void cma_exit_net(struct net *net)
  4319. {
  4320. struct cma_pernet *pernet = cma_pernet(net);
  4321. WARN_ON(!xa_empty(&pernet->tcp_ps));
  4322. WARN_ON(!xa_empty(&pernet->udp_ps));
  4323. WARN_ON(!xa_empty(&pernet->ipoib_ps));
  4324. WARN_ON(!xa_empty(&pernet->ib_ps));
  4325. }
  4326. static struct pernet_operations cma_pernet_operations = {
  4327. .init = cma_init_net,
  4328. .exit = cma_exit_net,
  4329. .id = &cma_pernet_id,
  4330. .size = sizeof(struct cma_pernet),
  4331. };
  4332. static int __init cma_init(void)
  4333. {
  4334. int ret;
  4335. /*
  4336. * There is a rare lock ordering dependency in cma_netdev_callback()
  4337. * that only happens when bonding is enabled. Teach lockdep that rtnl
  4338. * must never be nested under lock so it can find these without having
  4339. * to test with bonding.
  4340. */
  4341. if (IS_ENABLED(CONFIG_LOCKDEP)) {
  4342. rtnl_lock();
  4343. mutex_lock(&lock);
  4344. mutex_unlock(&lock);
  4345. rtnl_unlock();
  4346. }
  4347. cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
  4348. if (!cma_wq)
  4349. return -ENOMEM;
  4350. ret = register_pernet_subsys(&cma_pernet_operations);
  4351. if (ret)
  4352. goto err_wq;
  4353. ib_sa_register_client(&sa_client);
  4354. register_netdevice_notifier(&cma_nb);
  4355. ret = ib_register_client(&cma_client);
  4356. if (ret)
  4357. goto err;
  4358. ret = cma_configfs_init();
  4359. if (ret)
  4360. goto err_ib;
  4361. return 0;
  4362. err_ib:
  4363. ib_unregister_client(&cma_client);
  4364. err:
  4365. unregister_netdevice_notifier(&cma_nb);
  4366. ib_sa_unregister_client(&sa_client);
  4367. unregister_pernet_subsys(&cma_pernet_operations);
  4368. err_wq:
  4369. destroy_workqueue(cma_wq);
  4370. return ret;
  4371. }
  4372. static void __exit cma_cleanup(void)
  4373. {
  4374. cma_configfs_exit();
  4375. ib_unregister_client(&cma_client);
  4376. unregister_netdevice_notifier(&cma_nb);
  4377. ib_sa_unregister_client(&sa_client);
  4378. unregister_pernet_subsys(&cma_pernet_operations);
  4379. destroy_workqueue(cma_wq);
  4380. }
  4381. module_init(cma_init);
  4382. module_exit(cma_cleanup);