cm.c 129 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531
  1. // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2. /*
  3. * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
  4. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  5. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
  6. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  7. * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
  8. */
  9. #include <linux/completion.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/device.h>
  12. #include <linux/module.h>
  13. #include <linux/err.h>
  14. #include <linux/idr.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/random.h>
  17. #include <linux/rbtree.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/slab.h>
  20. #include <linux/sysfs.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/kdev_t.h>
  23. #include <linux/etherdevice.h>
  24. #include <rdma/ib_cache.h>
  25. #include <rdma/ib_cm.h>
  26. #include "cm_msgs.h"
  27. #include "core_priv.h"
  28. #include "cm_trace.h"
  29. MODULE_AUTHOR("Sean Hefty");
  30. MODULE_DESCRIPTION("InfiniBand CM");
  31. MODULE_LICENSE("Dual BSD/GPL");
  32. static const char * const ibcm_rej_reason_strs[] = {
  33. [IB_CM_REJ_NO_QP] = "no QP",
  34. [IB_CM_REJ_NO_EEC] = "no EEC",
  35. [IB_CM_REJ_NO_RESOURCES] = "no resources",
  36. [IB_CM_REJ_TIMEOUT] = "timeout",
  37. [IB_CM_REJ_UNSUPPORTED] = "unsupported",
  38. [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
  39. [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
  40. [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
  41. [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
  42. [IB_CM_REJ_STALE_CONN] = "stale conn",
  43. [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
  44. [IB_CM_REJ_INVALID_GID] = "invalid GID",
  45. [IB_CM_REJ_INVALID_LID] = "invalid LID",
  46. [IB_CM_REJ_INVALID_SL] = "invalid SL",
  47. [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
  48. [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
  49. [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
  50. [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
  51. [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
  52. [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
  53. [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
  54. [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
  55. [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
  56. [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
  57. [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
  58. [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
  59. [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
  60. [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
  61. [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
  62. [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
  63. [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
  64. [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
  65. [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
  66. [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
  67. "vendor option is not supported",
  68. };
  69. const char *__attribute_const__ ibcm_reject_msg(int reason)
  70. {
  71. size_t index = reason;
  72. if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
  73. ibcm_rej_reason_strs[index])
  74. return ibcm_rej_reason_strs[index];
  75. else
  76. return "unrecognized reason";
  77. }
  78. EXPORT_SYMBOL(ibcm_reject_msg);
  79. struct cm_id_private;
  80. struct cm_work;
  81. static int cm_add_one(struct ib_device *device);
  82. static void cm_remove_one(struct ib_device *device, void *client_data);
  83. static void cm_process_work(struct cm_id_private *cm_id_priv,
  84. struct cm_work *work);
  85. static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
  86. struct ib_cm_sidr_rep_param *param);
  87. static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
  88. const void *private_data, u8 private_data_len);
  89. static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
  90. void *private_data, u8 private_data_len);
  91. static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
  92. enum ib_cm_rej_reason reason, void *ari,
  93. u8 ari_length, const void *private_data,
  94. u8 private_data_len);
  95. static struct ib_client cm_client = {
  96. .name = "cm",
  97. .add = cm_add_one,
  98. .remove = cm_remove_one
  99. };
  100. static struct ib_cm {
  101. spinlock_t lock;
  102. struct list_head device_list;
  103. rwlock_t device_lock;
  104. struct rb_root listen_service_table;
  105. u64 listen_service_id;
  106. /* struct rb_root peer_service_table; todo: fix peer to peer */
  107. struct rb_root remote_qp_table;
  108. struct rb_root remote_id_table;
  109. struct rb_root remote_sidr_table;
  110. struct xarray local_id_table;
  111. u32 local_id_next;
  112. __be32 random_id_operand;
  113. struct list_head timewait_list;
  114. struct workqueue_struct *wq;
  115. /* Sync on cm change port state */
  116. spinlock_t state_lock;
  117. } cm;
  118. /* Counter indexes ordered by attribute ID */
  119. enum {
  120. CM_REQ_COUNTER,
  121. CM_MRA_COUNTER,
  122. CM_REJ_COUNTER,
  123. CM_REP_COUNTER,
  124. CM_RTU_COUNTER,
  125. CM_DREQ_COUNTER,
  126. CM_DREP_COUNTER,
  127. CM_SIDR_REQ_COUNTER,
  128. CM_SIDR_REP_COUNTER,
  129. CM_LAP_COUNTER,
  130. CM_APR_COUNTER,
  131. CM_ATTR_COUNT,
  132. CM_ATTR_ID_OFFSET = 0x0010,
  133. };
  134. enum {
  135. CM_XMIT,
  136. CM_XMIT_RETRIES,
  137. CM_RECV,
  138. CM_RECV_DUPLICATES,
  139. CM_COUNTER_GROUPS
  140. };
  141. static char const counter_group_names[CM_COUNTER_GROUPS]
  142. [sizeof("cm_rx_duplicates")] = {
  143. "cm_tx_msgs", "cm_tx_retries",
  144. "cm_rx_msgs", "cm_rx_duplicates"
  145. };
  146. struct cm_counter_group {
  147. struct kobject obj;
  148. atomic_long_t counter[CM_ATTR_COUNT];
  149. };
  150. struct cm_counter_attribute {
  151. struct attribute attr;
  152. int index;
  153. };
  154. #define CM_COUNTER_ATTR(_name, _index) \
  155. struct cm_counter_attribute cm_##_name##_counter_attr = { \
  156. .attr = { .name = __stringify(_name), .mode = 0444 }, \
  157. .index = _index \
  158. }
  159. static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
  160. static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
  161. static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
  162. static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
  163. static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
  164. static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
  165. static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
  166. static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
  167. static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
  168. static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
  169. static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
  170. static struct attribute *cm_counter_default_attrs[] = {
  171. &cm_req_counter_attr.attr,
  172. &cm_mra_counter_attr.attr,
  173. &cm_rej_counter_attr.attr,
  174. &cm_rep_counter_attr.attr,
  175. &cm_rtu_counter_attr.attr,
  176. &cm_dreq_counter_attr.attr,
  177. &cm_drep_counter_attr.attr,
  178. &cm_sidr_req_counter_attr.attr,
  179. &cm_sidr_rep_counter_attr.attr,
  180. &cm_lap_counter_attr.attr,
  181. &cm_apr_counter_attr.attr,
  182. NULL
  183. };
  184. struct cm_port {
  185. struct cm_device *cm_dev;
  186. struct ib_mad_agent *mad_agent;
  187. u8 port_num;
  188. struct list_head cm_priv_prim_list;
  189. struct list_head cm_priv_altr_list;
  190. struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
  191. };
  192. struct cm_device {
  193. struct list_head list;
  194. struct ib_device *ib_device;
  195. u8 ack_delay;
  196. int going_down;
  197. struct cm_port *port[];
  198. };
  199. struct cm_av {
  200. struct cm_port *port;
  201. union ib_gid dgid;
  202. struct rdma_ah_attr ah_attr;
  203. u16 pkey_index;
  204. u8 timeout;
  205. };
  206. struct cm_work {
  207. struct delayed_work work;
  208. struct list_head list;
  209. struct cm_port *port;
  210. struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
  211. __be32 local_id; /* Established / timewait */
  212. __be32 remote_id;
  213. struct ib_cm_event cm_event;
  214. struct sa_path_rec path[];
  215. };
  216. struct cm_timewait_info {
  217. struct cm_work work;
  218. struct list_head list;
  219. struct rb_node remote_qp_node;
  220. struct rb_node remote_id_node;
  221. __be64 remote_ca_guid;
  222. __be32 remote_qpn;
  223. u8 inserted_remote_qp;
  224. u8 inserted_remote_id;
  225. };
  226. struct cm_id_private {
  227. struct ib_cm_id id;
  228. struct rb_node service_node;
  229. struct rb_node sidr_id_node;
  230. spinlock_t lock; /* Do not acquire inside cm.lock */
  231. struct completion comp;
  232. refcount_t refcount;
  233. /* Number of clients sharing this ib_cm_id. Only valid for listeners.
  234. * Protected by the cm.lock spinlock. */
  235. int listen_sharecount;
  236. struct rcu_head rcu;
  237. struct ib_mad_send_buf *msg;
  238. struct cm_timewait_info *timewait_info;
  239. /* todo: use alternate port on send failure */
  240. struct cm_av av;
  241. struct cm_av alt_av;
  242. void *private_data;
  243. __be64 tid;
  244. __be32 local_qpn;
  245. __be32 remote_qpn;
  246. enum ib_qp_type qp_type;
  247. __be32 sq_psn;
  248. __be32 rq_psn;
  249. int timeout_ms;
  250. enum ib_mtu path_mtu;
  251. __be16 pkey;
  252. u8 private_data_len;
  253. u8 max_cm_retries;
  254. u8 responder_resources;
  255. u8 initiator_depth;
  256. u8 retry_count;
  257. u8 rnr_retry_count;
  258. u8 service_timeout;
  259. u8 target_ack_delay;
  260. struct list_head prim_list;
  261. struct list_head altr_list;
  262. /* Indicates that the send port mad is registered and av is set */
  263. int prim_send_port_not_ready;
  264. int altr_send_port_not_ready;
  265. struct list_head work_list;
  266. atomic_t work_count;
  267. struct rdma_ucm_ece ece;
  268. };
  269. static void cm_work_handler(struct work_struct *work);
  270. static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
  271. {
  272. if (refcount_dec_and_test(&cm_id_priv->refcount))
  273. complete(&cm_id_priv->comp);
  274. }
  275. static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
  276. struct ib_mad_send_buf **msg)
  277. {
  278. struct ib_mad_agent *mad_agent;
  279. struct ib_mad_send_buf *m;
  280. struct ib_ah *ah;
  281. struct cm_av *av;
  282. unsigned long flags, flags2;
  283. int ret = 0;
  284. /* don't let the port to be released till the agent is down */
  285. spin_lock_irqsave(&cm.state_lock, flags2);
  286. spin_lock_irqsave(&cm.lock, flags);
  287. if (!cm_id_priv->prim_send_port_not_ready)
  288. av = &cm_id_priv->av;
  289. else if (!cm_id_priv->altr_send_port_not_ready &&
  290. (cm_id_priv->alt_av.port))
  291. av = &cm_id_priv->alt_av;
  292. else {
  293. pr_info("%s: not valid CM id\n", __func__);
  294. ret = -ENODEV;
  295. spin_unlock_irqrestore(&cm.lock, flags);
  296. goto out;
  297. }
  298. spin_unlock_irqrestore(&cm.lock, flags);
  299. /* Make sure the port haven't released the mad yet */
  300. mad_agent = cm_id_priv->av.port->mad_agent;
  301. if (!mad_agent) {
  302. pr_info("%s: not a valid MAD agent\n", __func__);
  303. ret = -ENODEV;
  304. goto out;
  305. }
  306. ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
  307. if (IS_ERR(ah)) {
  308. ret = PTR_ERR(ah);
  309. goto out;
  310. }
  311. m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
  312. av->pkey_index,
  313. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  314. GFP_ATOMIC,
  315. IB_MGMT_BASE_VERSION);
  316. if (IS_ERR(m)) {
  317. rdma_destroy_ah(ah, 0);
  318. ret = PTR_ERR(m);
  319. goto out;
  320. }
  321. /* Timeout set by caller if response is expected. */
  322. m->ah = ah;
  323. m->retries = cm_id_priv->max_cm_retries;
  324. refcount_inc(&cm_id_priv->refcount);
  325. m->context[0] = cm_id_priv;
  326. *msg = m;
  327. out:
  328. spin_unlock_irqrestore(&cm.state_lock, flags2);
  329. return ret;
  330. }
  331. static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
  332. struct ib_mad_recv_wc *mad_recv_wc)
  333. {
  334. return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
  335. 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
  336. GFP_ATOMIC,
  337. IB_MGMT_BASE_VERSION);
  338. }
  339. static int cm_create_response_msg_ah(struct cm_port *port,
  340. struct ib_mad_recv_wc *mad_recv_wc,
  341. struct ib_mad_send_buf *msg)
  342. {
  343. struct ib_ah *ah;
  344. ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
  345. mad_recv_wc->recv_buf.grh, port->port_num);
  346. if (IS_ERR(ah))
  347. return PTR_ERR(ah);
  348. msg->ah = ah;
  349. return 0;
  350. }
  351. static void cm_free_msg(struct ib_mad_send_buf *msg)
  352. {
  353. if (msg->ah)
  354. rdma_destroy_ah(msg->ah, 0);
  355. if (msg->context[0])
  356. cm_deref_id(msg->context[0]);
  357. ib_free_send_mad(msg);
  358. }
  359. static int cm_alloc_response_msg(struct cm_port *port,
  360. struct ib_mad_recv_wc *mad_recv_wc,
  361. struct ib_mad_send_buf **msg)
  362. {
  363. struct ib_mad_send_buf *m;
  364. int ret;
  365. m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
  366. if (IS_ERR(m))
  367. return PTR_ERR(m);
  368. ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
  369. if (ret) {
  370. cm_free_msg(m);
  371. return ret;
  372. }
  373. *msg = m;
  374. return 0;
  375. }
  376. static void * cm_copy_private_data(const void *private_data,
  377. u8 private_data_len)
  378. {
  379. void *data;
  380. if (!private_data || !private_data_len)
  381. return NULL;
  382. data = kmemdup(private_data, private_data_len, GFP_KERNEL);
  383. if (!data)
  384. return ERR_PTR(-ENOMEM);
  385. return data;
  386. }
  387. static void cm_set_private_data(struct cm_id_private *cm_id_priv,
  388. void *private_data, u8 private_data_len)
  389. {
  390. if (cm_id_priv->private_data && cm_id_priv->private_data_len)
  391. kfree(cm_id_priv->private_data);
  392. cm_id_priv->private_data = private_data;
  393. cm_id_priv->private_data_len = private_data_len;
  394. }
  395. static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
  396. struct ib_grh *grh, struct cm_av *av)
  397. {
  398. struct rdma_ah_attr new_ah_attr;
  399. int ret;
  400. av->port = port;
  401. av->pkey_index = wc->pkey_index;
  402. /*
  403. * av->ah_attr might be initialized based on past wc during incoming
  404. * connect request or while sending out connect request. So initialize
  405. * a new ah_attr on stack. If initialization fails, old ah_attr is
  406. * used for sending any responses. If initialization is successful,
  407. * than new ah_attr is used by overwriting old one.
  408. */
  409. ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
  410. port->port_num, wc,
  411. grh, &new_ah_attr);
  412. if (ret)
  413. return ret;
  414. rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
  415. return 0;
  416. }
  417. static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
  418. struct ib_grh *grh, struct cm_av *av)
  419. {
  420. av->port = port;
  421. av->pkey_index = wc->pkey_index;
  422. return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
  423. port->port_num, wc,
  424. grh, &av->ah_attr);
  425. }
  426. static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
  427. struct cm_av *av, struct cm_port *port)
  428. {
  429. unsigned long flags;
  430. spin_lock_irqsave(&cm.lock, flags);
  431. if (&cm_id_priv->av == av)
  432. list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
  433. else if (&cm_id_priv->alt_av == av)
  434. list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
  435. else
  436. WARN_ON(true);
  437. spin_unlock_irqrestore(&cm.lock, flags);
  438. }
  439. static struct cm_port *
  440. get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
  441. {
  442. struct cm_device *cm_dev;
  443. struct cm_port *port = NULL;
  444. unsigned long flags;
  445. if (attr) {
  446. read_lock_irqsave(&cm.device_lock, flags);
  447. list_for_each_entry(cm_dev, &cm.device_list, list) {
  448. if (cm_dev->ib_device == attr->device) {
  449. port = cm_dev->port[attr->port_num - 1];
  450. break;
  451. }
  452. }
  453. read_unlock_irqrestore(&cm.device_lock, flags);
  454. } else {
  455. /* SGID attribute can be NULL in following
  456. * conditions.
  457. * (a) Alternative path
  458. * (b) IB link layer without GRH
  459. * (c) LAP send messages
  460. */
  461. read_lock_irqsave(&cm.device_lock, flags);
  462. list_for_each_entry(cm_dev, &cm.device_list, list) {
  463. attr = rdma_find_gid(cm_dev->ib_device,
  464. &path->sgid,
  465. sa_conv_pathrec_to_gid_type(path),
  466. NULL);
  467. if (!IS_ERR(attr)) {
  468. port = cm_dev->port[attr->port_num - 1];
  469. break;
  470. }
  471. }
  472. read_unlock_irqrestore(&cm.device_lock, flags);
  473. if (port)
  474. rdma_put_gid_attr(attr);
  475. }
  476. return port;
  477. }
  478. static int cm_init_av_by_path(struct sa_path_rec *path,
  479. const struct ib_gid_attr *sgid_attr,
  480. struct cm_av *av,
  481. struct cm_id_private *cm_id_priv)
  482. {
  483. struct rdma_ah_attr new_ah_attr;
  484. struct cm_device *cm_dev;
  485. struct cm_port *port;
  486. int ret;
  487. port = get_cm_port_from_path(path, sgid_attr);
  488. if (!port)
  489. return -EINVAL;
  490. cm_dev = port->cm_dev;
  491. ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
  492. be16_to_cpu(path->pkey), &av->pkey_index);
  493. if (ret)
  494. return ret;
  495. av->port = port;
  496. /*
  497. * av->ah_attr might be initialized based on wc or during
  498. * request processing time which might have reference to sgid_attr.
  499. * So initialize a new ah_attr on stack.
  500. * If initialization fails, old ah_attr is used for sending any
  501. * responses. If initialization is successful, than new ah_attr
  502. * is used by overwriting the old one. So that right ah_attr
  503. * can be used to return an error response.
  504. */
  505. ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
  506. &new_ah_attr, sgid_attr);
  507. if (ret)
  508. return ret;
  509. av->timeout = path->packet_life_time + 1;
  510. add_cm_id_to_port_list(cm_id_priv, av, port);
  511. rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
  512. return 0;
  513. }
  514. static u32 cm_local_id(__be32 local_id)
  515. {
  516. return (__force u32) (local_id ^ cm.random_id_operand);
  517. }
  518. static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
  519. {
  520. struct cm_id_private *cm_id_priv;
  521. rcu_read_lock();
  522. cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
  523. if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
  524. !refcount_inc_not_zero(&cm_id_priv->refcount))
  525. cm_id_priv = NULL;
  526. rcu_read_unlock();
  527. return cm_id_priv;
  528. }
  529. /*
  530. * Trivial helpers to strip endian annotation and compare; the
  531. * endianness doesn't actually matter since we just need a stable
  532. * order for the RB tree.
  533. */
  534. static int be32_lt(__be32 a, __be32 b)
  535. {
  536. return (__force u32) a < (__force u32) b;
  537. }
  538. static int be32_gt(__be32 a, __be32 b)
  539. {
  540. return (__force u32) a > (__force u32) b;
  541. }
  542. static int be64_lt(__be64 a, __be64 b)
  543. {
  544. return (__force u64) a < (__force u64) b;
  545. }
  546. static int be64_gt(__be64 a, __be64 b)
  547. {
  548. return (__force u64) a > (__force u64) b;
  549. }
  550. /*
  551. * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
  552. * if the new ID was inserted, NULL if it could not be inserted due to a
  553. * collision, or the existing cm_id_priv ready for shared usage.
  554. */
  555. static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
  556. ib_cm_handler shared_handler)
  557. {
  558. struct rb_node **link = &cm.listen_service_table.rb_node;
  559. struct rb_node *parent = NULL;
  560. struct cm_id_private *cur_cm_id_priv;
  561. __be64 service_id = cm_id_priv->id.service_id;
  562. __be64 service_mask = cm_id_priv->id.service_mask;
  563. unsigned long flags;
  564. spin_lock_irqsave(&cm.lock, flags);
  565. while (*link) {
  566. parent = *link;
  567. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  568. service_node);
  569. if ((cur_cm_id_priv->id.service_mask & service_id) ==
  570. (service_mask & cur_cm_id_priv->id.service_id) &&
  571. (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
  572. /*
  573. * Sharing an ib_cm_id with different handlers is not
  574. * supported
  575. */
  576. if (cur_cm_id_priv->id.cm_handler != shared_handler ||
  577. cur_cm_id_priv->id.context ||
  578. WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
  579. spin_unlock_irqrestore(&cm.lock, flags);
  580. return NULL;
  581. }
  582. refcount_inc(&cur_cm_id_priv->refcount);
  583. cur_cm_id_priv->listen_sharecount++;
  584. spin_unlock_irqrestore(&cm.lock, flags);
  585. return cur_cm_id_priv;
  586. }
  587. if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
  588. link = &(*link)->rb_left;
  589. else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
  590. link = &(*link)->rb_right;
  591. else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
  592. link = &(*link)->rb_left;
  593. else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
  594. link = &(*link)->rb_right;
  595. else
  596. link = &(*link)->rb_right;
  597. }
  598. cm_id_priv->listen_sharecount++;
  599. rb_link_node(&cm_id_priv->service_node, parent, link);
  600. rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
  601. spin_unlock_irqrestore(&cm.lock, flags);
  602. return cm_id_priv;
  603. }
  604. static struct cm_id_private * cm_find_listen(struct ib_device *device,
  605. __be64 service_id)
  606. {
  607. struct rb_node *node = cm.listen_service_table.rb_node;
  608. struct cm_id_private *cm_id_priv;
  609. while (node) {
  610. cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
  611. if ((cm_id_priv->id.service_mask & service_id) ==
  612. cm_id_priv->id.service_id &&
  613. (cm_id_priv->id.device == device)) {
  614. refcount_inc(&cm_id_priv->refcount);
  615. return cm_id_priv;
  616. }
  617. if (device < cm_id_priv->id.device)
  618. node = node->rb_left;
  619. else if (device > cm_id_priv->id.device)
  620. node = node->rb_right;
  621. else if (be64_lt(service_id, cm_id_priv->id.service_id))
  622. node = node->rb_left;
  623. else if (be64_gt(service_id, cm_id_priv->id.service_id))
  624. node = node->rb_right;
  625. else
  626. node = node->rb_right;
  627. }
  628. return NULL;
  629. }
  630. static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
  631. *timewait_info)
  632. {
  633. struct rb_node **link = &cm.remote_id_table.rb_node;
  634. struct rb_node *parent = NULL;
  635. struct cm_timewait_info *cur_timewait_info;
  636. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  637. __be32 remote_id = timewait_info->work.remote_id;
  638. while (*link) {
  639. parent = *link;
  640. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  641. remote_id_node);
  642. if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
  643. link = &(*link)->rb_left;
  644. else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
  645. link = &(*link)->rb_right;
  646. else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
  647. link = &(*link)->rb_left;
  648. else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
  649. link = &(*link)->rb_right;
  650. else
  651. return cur_timewait_info;
  652. }
  653. timewait_info->inserted_remote_id = 1;
  654. rb_link_node(&timewait_info->remote_id_node, parent, link);
  655. rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
  656. return NULL;
  657. }
  658. static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
  659. __be32 remote_id)
  660. {
  661. struct rb_node *node = cm.remote_id_table.rb_node;
  662. struct cm_timewait_info *timewait_info;
  663. struct cm_id_private *res = NULL;
  664. spin_lock_irq(&cm.lock);
  665. while (node) {
  666. timewait_info = rb_entry(node, struct cm_timewait_info,
  667. remote_id_node);
  668. if (be32_lt(remote_id, timewait_info->work.remote_id))
  669. node = node->rb_left;
  670. else if (be32_gt(remote_id, timewait_info->work.remote_id))
  671. node = node->rb_right;
  672. else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
  673. node = node->rb_left;
  674. else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
  675. node = node->rb_right;
  676. else {
  677. res = cm_acquire_id(timewait_info->work.local_id,
  678. timewait_info->work.remote_id);
  679. break;
  680. }
  681. }
  682. spin_unlock_irq(&cm.lock);
  683. return res;
  684. }
  685. static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
  686. *timewait_info)
  687. {
  688. struct rb_node **link = &cm.remote_qp_table.rb_node;
  689. struct rb_node *parent = NULL;
  690. struct cm_timewait_info *cur_timewait_info;
  691. __be64 remote_ca_guid = timewait_info->remote_ca_guid;
  692. __be32 remote_qpn = timewait_info->remote_qpn;
  693. while (*link) {
  694. parent = *link;
  695. cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
  696. remote_qp_node);
  697. if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
  698. link = &(*link)->rb_left;
  699. else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
  700. link = &(*link)->rb_right;
  701. else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
  702. link = &(*link)->rb_left;
  703. else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
  704. link = &(*link)->rb_right;
  705. else
  706. return cur_timewait_info;
  707. }
  708. timewait_info->inserted_remote_qp = 1;
  709. rb_link_node(&timewait_info->remote_qp_node, parent, link);
  710. rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  711. return NULL;
  712. }
  713. static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
  714. *cm_id_priv)
  715. {
  716. struct rb_node **link = &cm.remote_sidr_table.rb_node;
  717. struct rb_node *parent = NULL;
  718. struct cm_id_private *cur_cm_id_priv;
  719. union ib_gid *port_gid = &cm_id_priv->av.dgid;
  720. __be32 remote_id = cm_id_priv->id.remote_id;
  721. while (*link) {
  722. parent = *link;
  723. cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
  724. sidr_id_node);
  725. if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
  726. link = &(*link)->rb_left;
  727. else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
  728. link = &(*link)->rb_right;
  729. else {
  730. int cmp;
  731. cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
  732. sizeof *port_gid);
  733. if (cmp < 0)
  734. link = &(*link)->rb_left;
  735. else if (cmp > 0)
  736. link = &(*link)->rb_right;
  737. else
  738. return cur_cm_id_priv;
  739. }
  740. }
  741. rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
  742. rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  743. return NULL;
  744. }
  745. static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
  746. ib_cm_handler cm_handler,
  747. void *context)
  748. {
  749. struct cm_id_private *cm_id_priv;
  750. u32 id;
  751. int ret;
  752. cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
  753. if (!cm_id_priv)
  754. return ERR_PTR(-ENOMEM);
  755. cm_id_priv->id.state = IB_CM_IDLE;
  756. cm_id_priv->id.device = device;
  757. cm_id_priv->id.cm_handler = cm_handler;
  758. cm_id_priv->id.context = context;
  759. cm_id_priv->id.remote_cm_qpn = 1;
  760. RB_CLEAR_NODE(&cm_id_priv->service_node);
  761. RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
  762. spin_lock_init(&cm_id_priv->lock);
  763. init_completion(&cm_id_priv->comp);
  764. INIT_LIST_HEAD(&cm_id_priv->work_list);
  765. INIT_LIST_HEAD(&cm_id_priv->prim_list);
  766. INIT_LIST_HEAD(&cm_id_priv->altr_list);
  767. atomic_set(&cm_id_priv->work_count, -1);
  768. refcount_set(&cm_id_priv->refcount, 1);
  769. ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
  770. &cm.local_id_next, GFP_KERNEL);
  771. if (ret < 0)
  772. goto error;
  773. cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
  774. return cm_id_priv;
  775. error:
  776. kfree(cm_id_priv);
  777. return ERR_PTR(ret);
  778. }
  779. /*
  780. * Make the ID visible to the MAD handlers and other threads that use the
  781. * xarray.
  782. */
  783. static void cm_finalize_id(struct cm_id_private *cm_id_priv)
  784. {
  785. xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
  786. cm_id_priv, GFP_ATOMIC);
  787. }
  788. struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
  789. ib_cm_handler cm_handler,
  790. void *context)
  791. {
  792. struct cm_id_private *cm_id_priv;
  793. cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
  794. if (IS_ERR(cm_id_priv))
  795. return ERR_CAST(cm_id_priv);
  796. cm_finalize_id(cm_id_priv);
  797. return &cm_id_priv->id;
  798. }
  799. EXPORT_SYMBOL(ib_create_cm_id);
  800. static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
  801. {
  802. struct cm_work *work;
  803. if (list_empty(&cm_id_priv->work_list))
  804. return NULL;
  805. work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
  806. list_del(&work->list);
  807. return work;
  808. }
  809. static void cm_free_work(struct cm_work *work)
  810. {
  811. if (work->mad_recv_wc)
  812. ib_free_recv_mad(work->mad_recv_wc);
  813. kfree(work);
  814. }
  815. static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
  816. struct cm_work *work)
  817. __releases(&cm_id_priv->lock)
  818. {
  819. bool immediate;
  820. /*
  821. * To deliver the event to the user callback we have the drop the
  822. * spinlock, however, we need to ensure that the user callback is single
  823. * threaded and receives events in the temporal order. If there are
  824. * already events being processed then thread new events onto a list,
  825. * the thread currently processing will pick them up.
  826. */
  827. immediate = atomic_inc_and_test(&cm_id_priv->work_count);
  828. if (!immediate) {
  829. list_add_tail(&work->list, &cm_id_priv->work_list);
  830. /*
  831. * This routine always consumes incoming reference. Once queued
  832. * to the work_list then a reference is held by the thread
  833. * currently running cm_process_work() and this reference is not
  834. * needed.
  835. */
  836. cm_deref_id(cm_id_priv);
  837. }
  838. spin_unlock_irq(&cm_id_priv->lock);
  839. if (immediate)
  840. cm_process_work(cm_id_priv, work);
  841. }
  842. static inline int cm_convert_to_ms(int iba_time)
  843. {
  844. /* approximate conversion to ms from 4.096us x 2^iba_time */
  845. return 1 << max(iba_time - 8, 0);
  846. }
  847. /*
  848. * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
  849. * Because of how ack_timeout is stored, adding one doubles the timeout.
  850. * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
  851. * increment it (round up) only if the other is within 50%.
  852. */
  853. static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
  854. {
  855. int ack_timeout = packet_life_time + 1;
  856. if (ack_timeout >= ca_ack_delay)
  857. ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
  858. else
  859. ack_timeout = ca_ack_delay +
  860. (ack_timeout >= (ca_ack_delay - 1));
  861. return min(31, ack_timeout);
  862. }
  863. static void cm_remove_remote(struct cm_id_private *cm_id_priv)
  864. {
  865. struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
  866. if (timewait_info->inserted_remote_id) {
  867. rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
  868. timewait_info->inserted_remote_id = 0;
  869. }
  870. if (timewait_info->inserted_remote_qp) {
  871. rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
  872. timewait_info->inserted_remote_qp = 0;
  873. }
  874. }
  875. static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
  876. {
  877. struct cm_timewait_info *timewait_info;
  878. timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
  879. if (!timewait_info)
  880. return ERR_PTR(-ENOMEM);
  881. timewait_info->work.local_id = local_id;
  882. INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
  883. timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
  884. return timewait_info;
  885. }
  886. static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
  887. {
  888. int wait_time;
  889. unsigned long flags;
  890. struct cm_device *cm_dev;
  891. lockdep_assert_held(&cm_id_priv->lock);
  892. cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
  893. if (!cm_dev)
  894. return;
  895. spin_lock_irqsave(&cm.lock, flags);
  896. cm_remove_remote(cm_id_priv);
  897. list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
  898. spin_unlock_irqrestore(&cm.lock, flags);
  899. /*
  900. * The cm_id could be destroyed by the user before we exit timewait.
  901. * To protect against this, we search for the cm_id after exiting
  902. * timewait before notifying the user that we've exited timewait.
  903. */
  904. cm_id_priv->id.state = IB_CM_TIMEWAIT;
  905. wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
  906. /* Check if the device started its remove_one */
  907. spin_lock_irqsave(&cm.lock, flags);
  908. if (!cm_dev->going_down)
  909. queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
  910. msecs_to_jiffies(wait_time));
  911. spin_unlock_irqrestore(&cm.lock, flags);
  912. /*
  913. * The timewait_info is converted into a work and gets freed during
  914. * cm_free_work() in cm_timewait_handler().
  915. */
  916. BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
  917. cm_id_priv->timewait_info = NULL;
  918. }
  919. static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
  920. {
  921. unsigned long flags;
  922. lockdep_assert_held(&cm_id_priv->lock);
  923. cm_id_priv->id.state = IB_CM_IDLE;
  924. if (cm_id_priv->timewait_info) {
  925. spin_lock_irqsave(&cm.lock, flags);
  926. cm_remove_remote(cm_id_priv);
  927. spin_unlock_irqrestore(&cm.lock, flags);
  928. kfree(cm_id_priv->timewait_info);
  929. cm_id_priv->timewait_info = NULL;
  930. }
  931. }
  932. static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
  933. {
  934. struct cm_id_private *cm_id_priv;
  935. struct cm_work *work;
  936. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  937. spin_lock_irq(&cm_id_priv->lock);
  938. retest:
  939. switch (cm_id->state) {
  940. case IB_CM_LISTEN:
  941. spin_lock(&cm.lock);
  942. if (--cm_id_priv->listen_sharecount > 0) {
  943. /* The id is still shared. */
  944. WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
  945. spin_unlock(&cm.lock);
  946. spin_unlock_irq(&cm_id_priv->lock);
  947. cm_deref_id(cm_id_priv);
  948. return;
  949. }
  950. cm_id->state = IB_CM_IDLE;
  951. rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
  952. RB_CLEAR_NODE(&cm_id_priv->service_node);
  953. spin_unlock(&cm.lock);
  954. break;
  955. case IB_CM_SIDR_REQ_SENT:
  956. cm_id->state = IB_CM_IDLE;
  957. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  958. break;
  959. case IB_CM_SIDR_REQ_RCVD:
  960. cm_send_sidr_rep_locked(cm_id_priv,
  961. &(struct ib_cm_sidr_rep_param){
  962. .status = IB_SIDR_REJECT });
  963. /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
  964. cm_id->state = IB_CM_IDLE;
  965. break;
  966. case IB_CM_REQ_SENT:
  967. case IB_CM_MRA_REQ_RCVD:
  968. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  969. cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
  970. &cm_id_priv->id.device->node_guid,
  971. sizeof(cm_id_priv->id.device->node_guid),
  972. NULL, 0);
  973. break;
  974. case IB_CM_REQ_RCVD:
  975. if (err == -ENOMEM) {
  976. /* Do not reject to allow future retries. */
  977. cm_reset_to_idle(cm_id_priv);
  978. } else {
  979. cm_send_rej_locked(cm_id_priv,
  980. IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
  981. NULL, 0);
  982. }
  983. break;
  984. case IB_CM_REP_SENT:
  985. case IB_CM_MRA_REP_RCVD:
  986. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  987. cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
  988. 0, NULL, 0);
  989. goto retest;
  990. case IB_CM_MRA_REQ_SENT:
  991. case IB_CM_REP_RCVD:
  992. case IB_CM_MRA_REP_SENT:
  993. cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
  994. 0, NULL, 0);
  995. break;
  996. case IB_CM_ESTABLISHED:
  997. if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
  998. cm_id->state = IB_CM_IDLE;
  999. break;
  1000. }
  1001. cm_send_dreq_locked(cm_id_priv, NULL, 0);
  1002. goto retest;
  1003. case IB_CM_DREQ_SENT:
  1004. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  1005. cm_enter_timewait(cm_id_priv);
  1006. goto retest;
  1007. case IB_CM_DREQ_RCVD:
  1008. cm_send_drep_locked(cm_id_priv, NULL, 0);
  1009. WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
  1010. goto retest;
  1011. case IB_CM_TIMEWAIT:
  1012. /*
  1013. * The cm_acquire_id in cm_timewait_handler will stop working
  1014. * once we do xa_erase below, so just move to idle here for
  1015. * consistency.
  1016. */
  1017. cm_id->state = IB_CM_IDLE;
  1018. break;
  1019. case IB_CM_IDLE:
  1020. break;
  1021. }
  1022. WARN_ON(cm_id->state != IB_CM_IDLE);
  1023. spin_lock(&cm.lock);
  1024. /* Required for cleanup paths related cm_req_handler() */
  1025. if (cm_id_priv->timewait_info) {
  1026. cm_remove_remote(cm_id_priv);
  1027. kfree(cm_id_priv->timewait_info);
  1028. cm_id_priv->timewait_info = NULL;
  1029. }
  1030. if (!list_empty(&cm_id_priv->altr_list) &&
  1031. (!cm_id_priv->altr_send_port_not_ready))
  1032. list_del(&cm_id_priv->altr_list);
  1033. if (!list_empty(&cm_id_priv->prim_list) &&
  1034. (!cm_id_priv->prim_send_port_not_ready))
  1035. list_del(&cm_id_priv->prim_list);
  1036. WARN_ON(cm_id_priv->listen_sharecount);
  1037. WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
  1038. if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
  1039. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  1040. spin_unlock(&cm.lock);
  1041. spin_unlock_irq(&cm_id_priv->lock);
  1042. xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
  1043. cm_deref_id(cm_id_priv);
  1044. wait_for_completion(&cm_id_priv->comp);
  1045. while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
  1046. cm_free_work(work);
  1047. rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
  1048. rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
  1049. kfree(cm_id_priv->private_data);
  1050. kfree_rcu(cm_id_priv, rcu);
  1051. }
  1052. void ib_destroy_cm_id(struct ib_cm_id *cm_id)
  1053. {
  1054. cm_destroy_id(cm_id, 0);
  1055. }
  1056. EXPORT_SYMBOL(ib_destroy_cm_id);
  1057. static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
  1058. __be64 service_mask)
  1059. {
  1060. service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
  1061. service_id &= service_mask;
  1062. if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
  1063. (service_id != IB_CM_ASSIGN_SERVICE_ID))
  1064. return -EINVAL;
  1065. if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
  1066. cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
  1067. cm_id_priv->id.service_mask = ~cpu_to_be64(0);
  1068. } else {
  1069. cm_id_priv->id.service_id = service_id;
  1070. cm_id_priv->id.service_mask = service_mask;
  1071. }
  1072. return 0;
  1073. }
  1074. /**
  1075. * ib_cm_listen - Initiates listening on the specified service ID for
  1076. * connection and service ID resolution requests.
  1077. * @cm_id: Connection identifier associated with the listen request.
  1078. * @service_id: Service identifier matched against incoming connection
  1079. * and service ID resolution requests. The service ID should be specified
  1080. * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
  1081. * assign a service ID to the caller.
  1082. * @service_mask: Mask applied to service ID used to listen across a
  1083. * range of service IDs. If set to 0, the service ID is matched
  1084. * exactly. This parameter is ignored if %service_id is set to
  1085. * IB_CM_ASSIGN_SERVICE_ID.
  1086. */
  1087. int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
  1088. {
  1089. struct cm_id_private *cm_id_priv =
  1090. container_of(cm_id, struct cm_id_private, id);
  1091. unsigned long flags;
  1092. int ret;
  1093. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1094. if (cm_id_priv->id.state != IB_CM_IDLE) {
  1095. ret = -EINVAL;
  1096. goto out;
  1097. }
  1098. ret = cm_init_listen(cm_id_priv, service_id, service_mask);
  1099. if (ret)
  1100. goto out;
  1101. if (!cm_insert_listen(cm_id_priv, NULL)) {
  1102. ret = -EBUSY;
  1103. goto out;
  1104. }
  1105. cm_id_priv->id.state = IB_CM_LISTEN;
  1106. ret = 0;
  1107. out:
  1108. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1109. return ret;
  1110. }
  1111. EXPORT_SYMBOL(ib_cm_listen);
  1112. /**
  1113. * Create a new listening ib_cm_id and listen on the given service ID.
  1114. *
  1115. * If there's an existing ID listening on that same device and service ID,
  1116. * return it.
  1117. *
  1118. * @device: Device associated with the cm_id. All related communication will
  1119. * be associated with the specified device.
  1120. * @cm_handler: Callback invoked to notify the user of CM events.
  1121. * @service_id: Service identifier matched against incoming connection
  1122. * and service ID resolution requests. The service ID should be specified
  1123. * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
  1124. * assign a service ID to the caller.
  1125. *
  1126. * Callers should call ib_destroy_cm_id when done with the listener ID.
  1127. */
  1128. struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
  1129. ib_cm_handler cm_handler,
  1130. __be64 service_id)
  1131. {
  1132. struct cm_id_private *listen_id_priv;
  1133. struct cm_id_private *cm_id_priv;
  1134. int err = 0;
  1135. /* Create an ID in advance, since the creation may sleep */
  1136. cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
  1137. if (IS_ERR(cm_id_priv))
  1138. return ERR_CAST(cm_id_priv);
  1139. err = cm_init_listen(cm_id_priv, service_id, 0);
  1140. if (err)
  1141. return ERR_PTR(err);
  1142. spin_lock_irq(&cm_id_priv->lock);
  1143. listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
  1144. if (listen_id_priv != cm_id_priv) {
  1145. spin_unlock_irq(&cm_id_priv->lock);
  1146. ib_destroy_cm_id(&cm_id_priv->id);
  1147. if (!listen_id_priv)
  1148. return ERR_PTR(-EINVAL);
  1149. return &listen_id_priv->id;
  1150. }
  1151. cm_id_priv->id.state = IB_CM_LISTEN;
  1152. spin_unlock_irq(&cm_id_priv->lock);
  1153. /*
  1154. * A listen ID does not need to be in the xarray since it does not
  1155. * receive mads, is not placed in the remote_id or remote_qpn rbtree,
  1156. * and does not enter timewait.
  1157. */
  1158. return &cm_id_priv->id;
  1159. }
  1160. EXPORT_SYMBOL(ib_cm_insert_listen);
  1161. static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
  1162. {
  1163. u64 hi_tid, low_tid;
  1164. hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
  1165. low_tid = (u64)cm_id_priv->id.local_id;
  1166. return cpu_to_be64(hi_tid | low_tid);
  1167. }
  1168. static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
  1169. __be16 attr_id, __be64 tid)
  1170. {
  1171. hdr->base_version = IB_MGMT_BASE_VERSION;
  1172. hdr->mgmt_class = IB_MGMT_CLASS_CM;
  1173. hdr->class_version = IB_CM_CLASS_VERSION;
  1174. hdr->method = IB_MGMT_METHOD_SEND;
  1175. hdr->attr_id = attr_id;
  1176. hdr->tid = tid;
  1177. }
  1178. static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
  1179. __be64 tid, u32 attr_mod)
  1180. {
  1181. cm_format_mad_hdr(hdr, attr_id, tid);
  1182. hdr->attr_mod = cpu_to_be32(attr_mod);
  1183. }
  1184. static void cm_format_req(struct cm_req_msg *req_msg,
  1185. struct cm_id_private *cm_id_priv,
  1186. struct ib_cm_req_param *param)
  1187. {
  1188. struct sa_path_rec *pri_path = param->primary_path;
  1189. struct sa_path_rec *alt_path = param->alternate_path;
  1190. bool pri_ext = false;
  1191. if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
  1192. pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
  1193. pri_path->opa.slid);
  1194. cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
  1195. cm_form_tid(cm_id_priv), param->ece.attr_mod);
  1196. IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
  1197. be32_to_cpu(cm_id_priv->id.local_id));
  1198. IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
  1199. IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
  1200. be64_to_cpu(cm_id_priv->id.device->node_guid));
  1201. IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
  1202. IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
  1203. IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
  1204. param->remote_cm_response_timeout);
  1205. cm_req_set_qp_type(req_msg, param->qp_type);
  1206. IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
  1207. IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
  1208. IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
  1209. param->local_cm_response_timeout);
  1210. IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
  1211. be16_to_cpu(param->primary_path->pkey));
  1212. IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
  1213. param->primary_path->mtu);
  1214. IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
  1215. if (param->qp_type != IB_QPT_XRC_INI) {
  1216. IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
  1217. param->responder_resources);
  1218. IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
  1219. IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
  1220. param->rnr_retry_count);
  1221. IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
  1222. }
  1223. *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
  1224. pri_path->sgid;
  1225. *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
  1226. pri_path->dgid;
  1227. if (pri_ext) {
  1228. IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
  1229. ->global.interface_id =
  1230. OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
  1231. IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
  1232. ->global.interface_id =
  1233. OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
  1234. }
  1235. if (pri_path->hop_limit <= 1) {
  1236. IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
  1237. be16_to_cpu(pri_ext ? 0 :
  1238. htons(ntohl(sa_path_get_slid(
  1239. pri_path)))));
  1240. IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
  1241. be16_to_cpu(pri_ext ? 0 :
  1242. htons(ntohl(sa_path_get_dlid(
  1243. pri_path)))));
  1244. } else {
  1245. /* Work-around until there's a way to obtain remote LID info */
  1246. IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
  1247. be16_to_cpu(IB_LID_PERMISSIVE));
  1248. IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
  1249. be16_to_cpu(IB_LID_PERMISSIVE));
  1250. }
  1251. IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
  1252. be32_to_cpu(pri_path->flow_label));
  1253. IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
  1254. IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
  1255. IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
  1256. IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
  1257. IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
  1258. (pri_path->hop_limit <= 1));
  1259. IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
  1260. cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
  1261. pri_path->packet_life_time));
  1262. if (alt_path) {
  1263. bool alt_ext = false;
  1264. if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
  1265. alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
  1266. alt_path->opa.slid);
  1267. *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
  1268. alt_path->sgid;
  1269. *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
  1270. alt_path->dgid;
  1271. if (alt_ext) {
  1272. IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
  1273. req_msg)
  1274. ->global.interface_id =
  1275. OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
  1276. IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
  1277. req_msg)
  1278. ->global.interface_id =
  1279. OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
  1280. }
  1281. if (alt_path->hop_limit <= 1) {
  1282. IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
  1283. be16_to_cpu(
  1284. alt_ext ? 0 :
  1285. htons(ntohl(sa_path_get_slid(
  1286. alt_path)))));
  1287. IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
  1288. be16_to_cpu(
  1289. alt_ext ? 0 :
  1290. htons(ntohl(sa_path_get_dlid(
  1291. alt_path)))));
  1292. } else {
  1293. IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
  1294. be16_to_cpu(IB_LID_PERMISSIVE));
  1295. IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
  1296. be16_to_cpu(IB_LID_PERMISSIVE));
  1297. }
  1298. IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
  1299. be32_to_cpu(alt_path->flow_label));
  1300. IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
  1301. IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
  1302. alt_path->traffic_class);
  1303. IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
  1304. alt_path->hop_limit);
  1305. IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
  1306. IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
  1307. (alt_path->hop_limit <= 1));
  1308. IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
  1309. cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
  1310. alt_path->packet_life_time));
  1311. }
  1312. IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
  1313. if (param->private_data && param->private_data_len)
  1314. IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
  1315. param->private_data_len);
  1316. }
  1317. static int cm_validate_req_param(struct ib_cm_req_param *param)
  1318. {
  1319. if (!param->primary_path)
  1320. return -EINVAL;
  1321. if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
  1322. param->qp_type != IB_QPT_XRC_INI)
  1323. return -EINVAL;
  1324. if (param->private_data &&
  1325. param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
  1326. return -EINVAL;
  1327. if (param->alternate_path &&
  1328. (param->alternate_path->pkey != param->primary_path->pkey ||
  1329. param->alternate_path->mtu != param->primary_path->mtu))
  1330. return -EINVAL;
  1331. return 0;
  1332. }
  1333. int ib_send_cm_req(struct ib_cm_id *cm_id,
  1334. struct ib_cm_req_param *param)
  1335. {
  1336. struct cm_id_private *cm_id_priv;
  1337. struct cm_req_msg *req_msg;
  1338. unsigned long flags;
  1339. int ret;
  1340. ret = cm_validate_req_param(param);
  1341. if (ret)
  1342. return ret;
  1343. /* Verify that we're not in timewait. */
  1344. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  1345. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1346. if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
  1347. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1348. ret = -EINVAL;
  1349. goto out;
  1350. }
  1351. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1352. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  1353. id.local_id);
  1354. if (IS_ERR(cm_id_priv->timewait_info)) {
  1355. ret = PTR_ERR(cm_id_priv->timewait_info);
  1356. cm_id_priv->timewait_info = NULL;
  1357. goto out;
  1358. }
  1359. ret = cm_init_av_by_path(param->primary_path,
  1360. param->ppath_sgid_attr, &cm_id_priv->av,
  1361. cm_id_priv);
  1362. if (ret)
  1363. goto out;
  1364. if (param->alternate_path) {
  1365. ret = cm_init_av_by_path(param->alternate_path, NULL,
  1366. &cm_id_priv->alt_av, cm_id_priv);
  1367. if (ret)
  1368. goto out;
  1369. }
  1370. cm_id->service_id = param->service_id;
  1371. cm_id->service_mask = ~cpu_to_be64(0);
  1372. cm_id_priv->timeout_ms = cm_convert_to_ms(
  1373. param->primary_path->packet_life_time) * 2 +
  1374. cm_convert_to_ms(
  1375. param->remote_cm_response_timeout);
  1376. cm_id_priv->max_cm_retries = param->max_cm_retries;
  1377. cm_id_priv->initiator_depth = param->initiator_depth;
  1378. cm_id_priv->responder_resources = param->responder_resources;
  1379. cm_id_priv->retry_count = param->retry_count;
  1380. cm_id_priv->path_mtu = param->primary_path->mtu;
  1381. cm_id_priv->pkey = param->primary_path->pkey;
  1382. cm_id_priv->qp_type = param->qp_type;
  1383. ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
  1384. if (ret)
  1385. goto out;
  1386. req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
  1387. cm_format_req(req_msg, cm_id_priv, param);
  1388. cm_id_priv->tid = req_msg->hdr.tid;
  1389. cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
  1390. cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
  1391. cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
  1392. cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
  1393. trace_icm_send_req(&cm_id_priv->id);
  1394. spin_lock_irqsave(&cm_id_priv->lock, flags);
  1395. ret = ib_post_send_mad(cm_id_priv->msg, NULL);
  1396. if (ret) {
  1397. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1398. goto error2;
  1399. }
  1400. BUG_ON(cm_id->state != IB_CM_IDLE);
  1401. cm_id->state = IB_CM_REQ_SENT;
  1402. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  1403. return 0;
  1404. error2: cm_free_msg(cm_id_priv->msg);
  1405. out: return ret;
  1406. }
  1407. EXPORT_SYMBOL(ib_send_cm_req);
  1408. static int cm_issue_rej(struct cm_port *port,
  1409. struct ib_mad_recv_wc *mad_recv_wc,
  1410. enum ib_cm_rej_reason reason,
  1411. enum cm_msg_response msg_rejected,
  1412. void *ari, u8 ari_length)
  1413. {
  1414. struct ib_mad_send_buf *msg = NULL;
  1415. struct cm_rej_msg *rej_msg, *rcv_msg;
  1416. int ret;
  1417. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  1418. if (ret)
  1419. return ret;
  1420. /* We just need common CM header information. Cast to any message. */
  1421. rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
  1422. rej_msg = (struct cm_rej_msg *) msg->mad;
  1423. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
  1424. IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
  1425. IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
  1426. IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
  1427. IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
  1428. IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
  1429. IBA_SET(CM_REJ_REASON, rej_msg, reason);
  1430. if (ari && ari_length) {
  1431. IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
  1432. IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
  1433. }
  1434. trace_icm_issue_rej(
  1435. IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
  1436. IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
  1437. ret = ib_post_send_mad(msg, NULL);
  1438. if (ret)
  1439. cm_free_msg(msg);
  1440. return ret;
  1441. }
  1442. static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
  1443. {
  1444. return ((cpu_to_be16(
  1445. IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
  1446. (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
  1447. req_msg))));
  1448. }
  1449. static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
  1450. struct sa_path_rec *path, union ib_gid *gid)
  1451. {
  1452. if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
  1453. path->rec_type = SA_PATH_REC_TYPE_OPA;
  1454. else
  1455. path->rec_type = SA_PATH_REC_TYPE_IB;
  1456. }
  1457. static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
  1458. struct sa_path_rec *primary_path,
  1459. struct sa_path_rec *alt_path)
  1460. {
  1461. u32 lid;
  1462. if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
  1463. sa_path_set_dlid(primary_path,
  1464. IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
  1465. req_msg));
  1466. sa_path_set_slid(primary_path,
  1467. IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
  1468. req_msg));
  1469. } else {
  1470. lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
  1471. CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
  1472. sa_path_set_dlid(primary_path, lid);
  1473. lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
  1474. CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
  1475. sa_path_set_slid(primary_path, lid);
  1476. }
  1477. if (!cm_req_has_alt_path(req_msg))
  1478. return;
  1479. if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
  1480. sa_path_set_dlid(alt_path,
  1481. IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
  1482. req_msg));
  1483. sa_path_set_slid(alt_path,
  1484. IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
  1485. req_msg));
  1486. } else {
  1487. lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
  1488. CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
  1489. sa_path_set_dlid(alt_path, lid);
  1490. lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
  1491. CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
  1492. sa_path_set_slid(alt_path, lid);
  1493. }
  1494. }
  1495. static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
  1496. struct sa_path_rec *primary_path,
  1497. struct sa_path_rec *alt_path)
  1498. {
  1499. primary_path->dgid =
  1500. *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
  1501. primary_path->sgid =
  1502. *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
  1503. primary_path->flow_label =
  1504. cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
  1505. primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
  1506. primary_path->traffic_class =
  1507. IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
  1508. primary_path->reversible = 1;
  1509. primary_path->pkey =
  1510. cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
  1511. primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
  1512. primary_path->mtu_selector = IB_SA_EQ;
  1513. primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
  1514. primary_path->rate_selector = IB_SA_EQ;
  1515. primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
  1516. primary_path->packet_life_time_selector = IB_SA_EQ;
  1517. primary_path->packet_life_time =
  1518. IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
  1519. primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
  1520. primary_path->service_id =
  1521. cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
  1522. if (sa_path_is_roce(primary_path))
  1523. primary_path->roce.route_resolved = false;
  1524. if (cm_req_has_alt_path(req_msg)) {
  1525. alt_path->dgid = *IBA_GET_MEM_PTR(
  1526. CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
  1527. alt_path->sgid = *IBA_GET_MEM_PTR(
  1528. CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
  1529. alt_path->flow_label = cpu_to_be32(
  1530. IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
  1531. alt_path->hop_limit =
  1532. IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
  1533. alt_path->traffic_class =
  1534. IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
  1535. alt_path->reversible = 1;
  1536. alt_path->pkey =
  1537. cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
  1538. alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
  1539. alt_path->mtu_selector = IB_SA_EQ;
  1540. alt_path->mtu =
  1541. IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
  1542. alt_path->rate_selector = IB_SA_EQ;
  1543. alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
  1544. alt_path->packet_life_time_selector = IB_SA_EQ;
  1545. alt_path->packet_life_time =
  1546. IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
  1547. alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
  1548. alt_path->service_id =
  1549. cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
  1550. if (sa_path_is_roce(alt_path))
  1551. alt_path->roce.route_resolved = false;
  1552. }
  1553. cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
  1554. }
  1555. static u16 cm_get_bth_pkey(struct cm_work *work)
  1556. {
  1557. struct ib_device *ib_dev = work->port->cm_dev->ib_device;
  1558. u8 port_num = work->port->port_num;
  1559. u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
  1560. u16 pkey;
  1561. int ret;
  1562. ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
  1563. if (ret) {
  1564. dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
  1565. port_num, pkey_index, ret);
  1566. return 0;
  1567. }
  1568. return pkey;
  1569. }
  1570. /**
  1571. * Convert OPA SGID to IB SGID
  1572. * ULPs (such as IPoIB) do not understand OPA GIDs and will
  1573. * reject them as the local_gid will not match the sgid. Therefore,
  1574. * change the pathrec's SGID to an IB SGID.
  1575. *
  1576. * @work: Work completion
  1577. * @path: Path record
  1578. */
  1579. static void cm_opa_to_ib_sgid(struct cm_work *work,
  1580. struct sa_path_rec *path)
  1581. {
  1582. struct ib_device *dev = work->port->cm_dev->ib_device;
  1583. u8 port_num = work->port->port_num;
  1584. if (rdma_cap_opa_ah(dev, port_num) &&
  1585. (ib_is_opa_gid(&path->sgid))) {
  1586. union ib_gid sgid;
  1587. if (rdma_query_gid(dev, port_num, 0, &sgid)) {
  1588. dev_warn(&dev->dev,
  1589. "Error updating sgid in CM request\n");
  1590. return;
  1591. }
  1592. path->sgid = sgid;
  1593. }
  1594. }
  1595. static void cm_format_req_event(struct cm_work *work,
  1596. struct cm_id_private *cm_id_priv,
  1597. struct ib_cm_id *listen_id)
  1598. {
  1599. struct cm_req_msg *req_msg;
  1600. struct ib_cm_req_event_param *param;
  1601. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1602. param = &work->cm_event.param.req_rcvd;
  1603. param->listen_id = listen_id;
  1604. param->bth_pkey = cm_get_bth_pkey(work);
  1605. param->port = cm_id_priv->av.port->port_num;
  1606. param->primary_path = &work->path[0];
  1607. cm_opa_to_ib_sgid(work, param->primary_path);
  1608. if (cm_req_has_alt_path(req_msg)) {
  1609. param->alternate_path = &work->path[1];
  1610. cm_opa_to_ib_sgid(work, param->alternate_path);
  1611. } else {
  1612. param->alternate_path = NULL;
  1613. }
  1614. param->remote_ca_guid =
  1615. cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
  1616. param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
  1617. param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
  1618. param->qp_type = cm_req_get_qp_type(req_msg);
  1619. param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
  1620. param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
  1621. param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
  1622. param->local_cm_response_timeout =
  1623. IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
  1624. param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
  1625. param->remote_cm_response_timeout =
  1626. IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
  1627. param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
  1628. param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
  1629. param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
  1630. param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
  1631. param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
  1632. param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
  1633. work->cm_event.private_data =
  1634. IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
  1635. }
  1636. static void cm_process_work(struct cm_id_private *cm_id_priv,
  1637. struct cm_work *work)
  1638. {
  1639. int ret;
  1640. /* We will typically only have the current event to report. */
  1641. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
  1642. cm_free_work(work);
  1643. while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
  1644. spin_lock_irq(&cm_id_priv->lock);
  1645. work = cm_dequeue_work(cm_id_priv);
  1646. spin_unlock_irq(&cm_id_priv->lock);
  1647. if (!work)
  1648. return;
  1649. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
  1650. &work->cm_event);
  1651. cm_free_work(work);
  1652. }
  1653. cm_deref_id(cm_id_priv);
  1654. if (ret)
  1655. cm_destroy_id(&cm_id_priv->id, ret);
  1656. }
  1657. static void cm_format_mra(struct cm_mra_msg *mra_msg,
  1658. struct cm_id_private *cm_id_priv,
  1659. enum cm_msg_response msg_mraed, u8 service_timeout,
  1660. const void *private_data, u8 private_data_len)
  1661. {
  1662. cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
  1663. IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
  1664. IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
  1665. be32_to_cpu(cm_id_priv->id.local_id));
  1666. IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
  1667. be32_to_cpu(cm_id_priv->id.remote_id));
  1668. IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
  1669. if (private_data && private_data_len)
  1670. IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
  1671. private_data_len);
  1672. }
  1673. static void cm_format_rej(struct cm_rej_msg *rej_msg,
  1674. struct cm_id_private *cm_id_priv,
  1675. enum ib_cm_rej_reason reason, void *ari,
  1676. u8 ari_length, const void *private_data,
  1677. u8 private_data_len, enum ib_cm_state state)
  1678. {
  1679. lockdep_assert_held(&cm_id_priv->lock);
  1680. cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
  1681. IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
  1682. be32_to_cpu(cm_id_priv->id.remote_id));
  1683. switch (state) {
  1684. case IB_CM_REQ_RCVD:
  1685. IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
  1686. IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
  1687. break;
  1688. case IB_CM_MRA_REQ_SENT:
  1689. IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
  1690. be32_to_cpu(cm_id_priv->id.local_id));
  1691. IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
  1692. break;
  1693. case IB_CM_REP_RCVD:
  1694. case IB_CM_MRA_REP_SENT:
  1695. IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
  1696. be32_to_cpu(cm_id_priv->id.local_id));
  1697. IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
  1698. break;
  1699. default:
  1700. IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
  1701. be32_to_cpu(cm_id_priv->id.local_id));
  1702. IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
  1703. CM_MSG_RESPONSE_OTHER);
  1704. break;
  1705. }
  1706. IBA_SET(CM_REJ_REASON, rej_msg, reason);
  1707. if (ari && ari_length) {
  1708. IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
  1709. IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
  1710. }
  1711. if (private_data && private_data_len)
  1712. IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
  1713. private_data_len);
  1714. }
  1715. static void cm_dup_req_handler(struct cm_work *work,
  1716. struct cm_id_private *cm_id_priv)
  1717. {
  1718. struct ib_mad_send_buf *msg = NULL;
  1719. int ret;
  1720. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  1721. counter[CM_REQ_COUNTER]);
  1722. /* Quick state check to discard duplicate REQs. */
  1723. spin_lock_irq(&cm_id_priv->lock);
  1724. if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
  1725. spin_unlock_irq(&cm_id_priv->lock);
  1726. return;
  1727. }
  1728. spin_unlock_irq(&cm_id_priv->lock);
  1729. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  1730. if (ret)
  1731. return;
  1732. spin_lock_irq(&cm_id_priv->lock);
  1733. switch (cm_id_priv->id.state) {
  1734. case IB_CM_MRA_REQ_SENT:
  1735. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  1736. CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
  1737. cm_id_priv->private_data,
  1738. cm_id_priv->private_data_len);
  1739. break;
  1740. case IB_CM_TIMEWAIT:
  1741. cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
  1742. IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
  1743. IB_CM_TIMEWAIT);
  1744. break;
  1745. default:
  1746. goto unlock;
  1747. }
  1748. spin_unlock_irq(&cm_id_priv->lock);
  1749. trace_icm_send_dup_req(&cm_id_priv->id);
  1750. ret = ib_post_send_mad(msg, NULL);
  1751. if (ret)
  1752. goto free;
  1753. return;
  1754. unlock: spin_unlock_irq(&cm_id_priv->lock);
  1755. free: cm_free_msg(msg);
  1756. }
  1757. static struct cm_id_private * cm_match_req(struct cm_work *work,
  1758. struct cm_id_private *cm_id_priv)
  1759. {
  1760. struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
  1761. struct cm_timewait_info *timewait_info;
  1762. struct cm_req_msg *req_msg;
  1763. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1764. /* Check for possible duplicate REQ. */
  1765. spin_lock_irq(&cm.lock);
  1766. timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
  1767. if (timewait_info) {
  1768. cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  1769. timewait_info->work.remote_id);
  1770. spin_unlock_irq(&cm.lock);
  1771. if (cur_cm_id_priv) {
  1772. cm_dup_req_handler(work, cur_cm_id_priv);
  1773. cm_deref_id(cur_cm_id_priv);
  1774. }
  1775. return NULL;
  1776. }
  1777. /* Check for stale connections. */
  1778. timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
  1779. if (timewait_info) {
  1780. cm_remove_remote(cm_id_priv);
  1781. cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  1782. timewait_info->work.remote_id);
  1783. spin_unlock_irq(&cm.lock);
  1784. cm_issue_rej(work->port, work->mad_recv_wc,
  1785. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
  1786. NULL, 0);
  1787. if (cur_cm_id_priv) {
  1788. ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
  1789. cm_deref_id(cur_cm_id_priv);
  1790. }
  1791. return NULL;
  1792. }
  1793. /* Find matching listen request. */
  1794. listen_cm_id_priv = cm_find_listen(
  1795. cm_id_priv->id.device,
  1796. cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
  1797. if (!listen_cm_id_priv) {
  1798. cm_remove_remote(cm_id_priv);
  1799. spin_unlock_irq(&cm.lock);
  1800. cm_issue_rej(work->port, work->mad_recv_wc,
  1801. IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
  1802. NULL, 0);
  1803. return NULL;
  1804. }
  1805. spin_unlock_irq(&cm.lock);
  1806. return listen_cm_id_priv;
  1807. }
  1808. /*
  1809. * Work-around for inter-subnet connections. If the LIDs are permissive,
  1810. * we need to override the LID/SL data in the REQ with the LID information
  1811. * in the work completion.
  1812. */
  1813. static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
  1814. {
  1815. if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
  1816. if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
  1817. req_msg)) == IB_LID_PERMISSIVE) {
  1818. IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
  1819. be16_to_cpu(ib_lid_be16(wc->slid)));
  1820. IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
  1821. }
  1822. if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
  1823. req_msg)) == IB_LID_PERMISSIVE)
  1824. IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
  1825. wc->dlid_path_bits);
  1826. }
  1827. if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
  1828. if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
  1829. req_msg)) == IB_LID_PERMISSIVE) {
  1830. IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
  1831. be16_to_cpu(ib_lid_be16(wc->slid)));
  1832. IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
  1833. }
  1834. if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
  1835. req_msg)) == IB_LID_PERMISSIVE)
  1836. IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
  1837. wc->dlid_path_bits);
  1838. }
  1839. }
  1840. static int cm_req_handler(struct cm_work *work)
  1841. {
  1842. struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
  1843. struct cm_req_msg *req_msg;
  1844. const struct ib_global_route *grh;
  1845. const struct ib_gid_attr *gid_attr;
  1846. int ret;
  1847. req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
  1848. cm_id_priv =
  1849. cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
  1850. if (IS_ERR(cm_id_priv))
  1851. return PTR_ERR(cm_id_priv);
  1852. cm_id_priv->id.remote_id =
  1853. cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
  1854. cm_id_priv->id.service_id =
  1855. cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
  1856. cm_id_priv->id.service_mask = ~cpu_to_be64(0);
  1857. cm_id_priv->tid = req_msg->hdr.tid;
  1858. cm_id_priv->timeout_ms = cm_convert_to_ms(
  1859. IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
  1860. cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
  1861. cm_id_priv->remote_qpn =
  1862. cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
  1863. cm_id_priv->initiator_depth =
  1864. IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
  1865. cm_id_priv->responder_resources =
  1866. IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
  1867. cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
  1868. cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
  1869. cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
  1870. cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
  1871. cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
  1872. cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
  1873. ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  1874. work->mad_recv_wc->recv_buf.grh,
  1875. &cm_id_priv->av);
  1876. if (ret)
  1877. goto destroy;
  1878. cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
  1879. id.local_id);
  1880. if (IS_ERR(cm_id_priv->timewait_info)) {
  1881. ret = PTR_ERR(cm_id_priv->timewait_info);
  1882. cm_id_priv->timewait_info = NULL;
  1883. goto destroy;
  1884. }
  1885. cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
  1886. cm_id_priv->timewait_info->remote_ca_guid =
  1887. cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
  1888. cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
  1889. /*
  1890. * Note that the ID pointer is not in the xarray at this point,
  1891. * so this set is only visible to the local thread.
  1892. */
  1893. cm_id_priv->id.state = IB_CM_REQ_RCVD;
  1894. listen_cm_id_priv = cm_match_req(work, cm_id_priv);
  1895. if (!listen_cm_id_priv) {
  1896. trace_icm_no_listener_err(&cm_id_priv->id);
  1897. cm_id_priv->id.state = IB_CM_IDLE;
  1898. ret = -EINVAL;
  1899. goto destroy;
  1900. }
  1901. if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
  1902. cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
  1903. memset(&work->path[0], 0, sizeof(work->path[0]));
  1904. if (cm_req_has_alt_path(req_msg))
  1905. memset(&work->path[1], 0, sizeof(work->path[1]));
  1906. grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
  1907. gid_attr = grh->sgid_attr;
  1908. if (gid_attr &&
  1909. rdma_protocol_roce(work->port->cm_dev->ib_device,
  1910. work->port->port_num)) {
  1911. work->path[0].rec_type =
  1912. sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
  1913. } else {
  1914. cm_path_set_rec_type(
  1915. work->port->cm_dev->ib_device, work->port->port_num,
  1916. &work->path[0],
  1917. IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
  1918. req_msg));
  1919. }
  1920. if (cm_req_has_alt_path(req_msg))
  1921. work->path[1].rec_type = work->path[0].rec_type;
  1922. cm_format_paths_from_req(req_msg, &work->path[0],
  1923. &work->path[1]);
  1924. if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
  1925. sa_path_set_dmac(&work->path[0],
  1926. cm_id_priv->av.ah_attr.roce.dmac);
  1927. work->path[0].hop_limit = grh->hop_limit;
  1928. ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
  1929. cm_id_priv);
  1930. if (ret) {
  1931. int err;
  1932. err = rdma_query_gid(work->port->cm_dev->ib_device,
  1933. work->port->port_num, 0,
  1934. &work->path[0].sgid);
  1935. if (err)
  1936. ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
  1937. NULL, 0, NULL, 0);
  1938. else
  1939. ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
  1940. &work->path[0].sgid,
  1941. sizeof(work->path[0].sgid),
  1942. NULL, 0);
  1943. goto rejected;
  1944. }
  1945. if (cm_req_has_alt_path(req_msg)) {
  1946. ret = cm_init_av_by_path(&work->path[1], NULL,
  1947. &cm_id_priv->alt_av, cm_id_priv);
  1948. if (ret) {
  1949. ib_send_cm_rej(&cm_id_priv->id,
  1950. IB_CM_REJ_INVALID_ALT_GID,
  1951. &work->path[0].sgid,
  1952. sizeof(work->path[0].sgid), NULL, 0);
  1953. goto rejected;
  1954. }
  1955. }
  1956. cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
  1957. cm_id_priv->id.context = listen_cm_id_priv->id.context;
  1958. cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
  1959. /* Now MAD handlers can see the new ID */
  1960. spin_lock_irq(&cm_id_priv->lock);
  1961. cm_finalize_id(cm_id_priv);
  1962. /* Refcount belongs to the event, pairs with cm_process_work() */
  1963. refcount_inc(&cm_id_priv->refcount);
  1964. cm_queue_work_unlock(cm_id_priv, work);
  1965. /*
  1966. * Since this ID was just created and was not made visible to other MAD
  1967. * handlers until the cm_finalize_id() above we know that the
  1968. * cm_process_work() will deliver the event and the listen_cm_id
  1969. * embedded in the event can be derefed here.
  1970. */
  1971. cm_deref_id(listen_cm_id_priv);
  1972. return 0;
  1973. rejected:
  1974. cm_deref_id(listen_cm_id_priv);
  1975. destroy:
  1976. ib_destroy_cm_id(&cm_id_priv->id);
  1977. return ret;
  1978. }
  1979. static void cm_format_rep(struct cm_rep_msg *rep_msg,
  1980. struct cm_id_private *cm_id_priv,
  1981. struct ib_cm_rep_param *param)
  1982. {
  1983. cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
  1984. param->ece.attr_mod);
  1985. IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
  1986. be32_to_cpu(cm_id_priv->id.local_id));
  1987. IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
  1988. be32_to_cpu(cm_id_priv->id.remote_id));
  1989. IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
  1990. IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
  1991. param->responder_resources);
  1992. IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
  1993. cm_id_priv->av.port->cm_dev->ack_delay);
  1994. IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
  1995. IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
  1996. IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
  1997. be64_to_cpu(cm_id_priv->id.device->node_guid));
  1998. if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
  1999. IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
  2000. param->initiator_depth);
  2001. IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
  2002. param->flow_control);
  2003. IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
  2004. IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
  2005. } else {
  2006. IBA_SET(CM_REP_SRQ, rep_msg, 1);
  2007. IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
  2008. }
  2009. IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
  2010. IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
  2011. IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
  2012. if (param->private_data && param->private_data_len)
  2013. IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
  2014. param->private_data_len);
  2015. }
  2016. int ib_send_cm_rep(struct ib_cm_id *cm_id,
  2017. struct ib_cm_rep_param *param)
  2018. {
  2019. struct cm_id_private *cm_id_priv;
  2020. struct ib_mad_send_buf *msg;
  2021. struct cm_rep_msg *rep_msg;
  2022. unsigned long flags;
  2023. int ret;
  2024. if (param->private_data &&
  2025. param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
  2026. return -EINVAL;
  2027. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2028. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2029. if (cm_id->state != IB_CM_REQ_RCVD &&
  2030. cm_id->state != IB_CM_MRA_REQ_SENT) {
  2031. trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
  2032. ret = -EINVAL;
  2033. goto out;
  2034. }
  2035. ret = cm_alloc_msg(cm_id_priv, &msg);
  2036. if (ret)
  2037. goto out;
  2038. rep_msg = (struct cm_rep_msg *) msg->mad;
  2039. cm_format_rep(rep_msg, cm_id_priv, param);
  2040. msg->timeout_ms = cm_id_priv->timeout_ms;
  2041. msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
  2042. trace_icm_send_rep(cm_id);
  2043. ret = ib_post_send_mad(msg, NULL);
  2044. if (ret) {
  2045. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2046. cm_free_msg(msg);
  2047. return ret;
  2048. }
  2049. cm_id->state = IB_CM_REP_SENT;
  2050. cm_id_priv->msg = msg;
  2051. cm_id_priv->initiator_depth = param->initiator_depth;
  2052. cm_id_priv->responder_resources = param->responder_resources;
  2053. cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
  2054. WARN_ONCE(param->qp_num & 0xFF000000,
  2055. "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
  2056. param->qp_num);
  2057. cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
  2058. out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2059. return ret;
  2060. }
  2061. EXPORT_SYMBOL(ib_send_cm_rep);
  2062. static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
  2063. struct cm_id_private *cm_id_priv,
  2064. const void *private_data,
  2065. u8 private_data_len)
  2066. {
  2067. cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
  2068. IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
  2069. be32_to_cpu(cm_id_priv->id.local_id));
  2070. IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
  2071. be32_to_cpu(cm_id_priv->id.remote_id));
  2072. if (private_data && private_data_len)
  2073. IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
  2074. private_data_len);
  2075. }
  2076. int ib_send_cm_rtu(struct ib_cm_id *cm_id,
  2077. const void *private_data,
  2078. u8 private_data_len)
  2079. {
  2080. struct cm_id_private *cm_id_priv;
  2081. struct ib_mad_send_buf *msg;
  2082. unsigned long flags;
  2083. void *data;
  2084. int ret;
  2085. if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
  2086. return -EINVAL;
  2087. data = cm_copy_private_data(private_data, private_data_len);
  2088. if (IS_ERR(data))
  2089. return PTR_ERR(data);
  2090. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2091. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2092. if (cm_id->state != IB_CM_REP_RCVD &&
  2093. cm_id->state != IB_CM_MRA_REP_SENT) {
  2094. trace_icm_send_cm_rtu_err(cm_id);
  2095. ret = -EINVAL;
  2096. goto error;
  2097. }
  2098. ret = cm_alloc_msg(cm_id_priv, &msg);
  2099. if (ret)
  2100. goto error;
  2101. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  2102. private_data, private_data_len);
  2103. trace_icm_send_rtu(cm_id);
  2104. ret = ib_post_send_mad(msg, NULL);
  2105. if (ret) {
  2106. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2107. cm_free_msg(msg);
  2108. kfree(data);
  2109. return ret;
  2110. }
  2111. cm_id->state = IB_CM_ESTABLISHED;
  2112. cm_set_private_data(cm_id_priv, data, private_data_len);
  2113. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2114. return 0;
  2115. error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2116. kfree(data);
  2117. return ret;
  2118. }
  2119. EXPORT_SYMBOL(ib_send_cm_rtu);
  2120. static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
  2121. {
  2122. struct cm_rep_msg *rep_msg;
  2123. struct ib_cm_rep_event_param *param;
  2124. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  2125. param = &work->cm_event.param.rep_rcvd;
  2126. param->remote_ca_guid =
  2127. cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
  2128. param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
  2129. param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
  2130. param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
  2131. param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
  2132. param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
  2133. param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
  2134. param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
  2135. param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
  2136. param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
  2137. param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
  2138. param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
  2139. param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
  2140. param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
  2141. param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
  2142. work->cm_event.private_data =
  2143. IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
  2144. }
  2145. static void cm_dup_rep_handler(struct cm_work *work)
  2146. {
  2147. struct cm_id_private *cm_id_priv;
  2148. struct cm_rep_msg *rep_msg;
  2149. struct ib_mad_send_buf *msg = NULL;
  2150. int ret;
  2151. rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
  2152. cm_id_priv = cm_acquire_id(
  2153. cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
  2154. cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
  2155. if (!cm_id_priv)
  2156. return;
  2157. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  2158. counter[CM_REP_COUNTER]);
  2159. ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
  2160. if (ret)
  2161. goto deref;
  2162. spin_lock_irq(&cm_id_priv->lock);
  2163. if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
  2164. cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
  2165. cm_id_priv->private_data,
  2166. cm_id_priv->private_data_len);
  2167. else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
  2168. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2169. CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
  2170. cm_id_priv->private_data,
  2171. cm_id_priv->private_data_len);
  2172. else
  2173. goto unlock;
  2174. spin_unlock_irq(&cm_id_priv->lock);
  2175. trace_icm_send_dup_rep(&cm_id_priv->id);
  2176. ret = ib_post_send_mad(msg, NULL);
  2177. if (ret)
  2178. goto free;
  2179. goto deref;
  2180. unlock: spin_unlock_irq(&cm_id_priv->lock);
  2181. free: cm_free_msg(msg);
  2182. deref: cm_deref_id(cm_id_priv);
  2183. }
  2184. static int cm_rep_handler(struct cm_work *work)
  2185. {
  2186. struct cm_id_private *cm_id_priv;
  2187. struct cm_rep_msg *rep_msg;
  2188. int ret;
  2189. struct cm_id_private *cur_cm_id_priv;
  2190. struct cm_timewait_info *timewait_info;
  2191. rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
  2192. cm_id_priv = cm_acquire_id(
  2193. cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
  2194. if (!cm_id_priv) {
  2195. cm_dup_rep_handler(work);
  2196. trace_icm_remote_no_priv_err(
  2197. IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
  2198. return -EINVAL;
  2199. }
  2200. cm_format_rep_event(work, cm_id_priv->qp_type);
  2201. spin_lock_irq(&cm_id_priv->lock);
  2202. switch (cm_id_priv->id.state) {
  2203. case IB_CM_REQ_SENT:
  2204. case IB_CM_MRA_REQ_RCVD:
  2205. break;
  2206. default:
  2207. ret = -EINVAL;
  2208. trace_icm_rep_unknown_err(
  2209. IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
  2210. IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
  2211. cm_id_priv->id.state);
  2212. spin_unlock_irq(&cm_id_priv->lock);
  2213. goto error;
  2214. }
  2215. cm_id_priv->timewait_info->work.remote_id =
  2216. cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
  2217. cm_id_priv->timewait_info->remote_ca_guid =
  2218. cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
  2219. cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
  2220. spin_lock(&cm.lock);
  2221. /* Check for duplicate REP. */
  2222. if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
  2223. spin_unlock(&cm.lock);
  2224. spin_unlock_irq(&cm_id_priv->lock);
  2225. ret = -EINVAL;
  2226. trace_icm_insert_failed_err(
  2227. IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
  2228. goto error;
  2229. }
  2230. /* Check for a stale connection. */
  2231. timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
  2232. if (timewait_info) {
  2233. cm_remove_remote(cm_id_priv);
  2234. cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  2235. timewait_info->work.remote_id);
  2236. spin_unlock(&cm.lock);
  2237. spin_unlock_irq(&cm_id_priv->lock);
  2238. cm_issue_rej(work->port, work->mad_recv_wc,
  2239. IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
  2240. NULL, 0);
  2241. ret = -EINVAL;
  2242. trace_icm_staleconn_err(
  2243. IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
  2244. IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
  2245. if (cur_cm_id_priv) {
  2246. ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
  2247. cm_deref_id(cur_cm_id_priv);
  2248. }
  2249. goto error;
  2250. }
  2251. spin_unlock(&cm.lock);
  2252. cm_id_priv->id.state = IB_CM_REP_RCVD;
  2253. cm_id_priv->id.remote_id =
  2254. cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
  2255. cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
  2256. cm_id_priv->initiator_depth =
  2257. IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
  2258. cm_id_priv->responder_resources =
  2259. IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
  2260. cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
  2261. cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
  2262. cm_id_priv->target_ack_delay =
  2263. IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
  2264. cm_id_priv->av.timeout =
  2265. cm_ack_timeout(cm_id_priv->target_ack_delay,
  2266. cm_id_priv->av.timeout - 1);
  2267. cm_id_priv->alt_av.timeout =
  2268. cm_ack_timeout(cm_id_priv->target_ack_delay,
  2269. cm_id_priv->alt_av.timeout - 1);
  2270. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2271. cm_queue_work_unlock(cm_id_priv, work);
  2272. return 0;
  2273. error:
  2274. cm_deref_id(cm_id_priv);
  2275. return ret;
  2276. }
  2277. static int cm_establish_handler(struct cm_work *work)
  2278. {
  2279. struct cm_id_private *cm_id_priv;
  2280. /* See comment in cm_establish about lookup. */
  2281. cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
  2282. if (!cm_id_priv)
  2283. return -EINVAL;
  2284. spin_lock_irq(&cm_id_priv->lock);
  2285. if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
  2286. spin_unlock_irq(&cm_id_priv->lock);
  2287. goto out;
  2288. }
  2289. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2290. cm_queue_work_unlock(cm_id_priv, work);
  2291. return 0;
  2292. out:
  2293. cm_deref_id(cm_id_priv);
  2294. return -EINVAL;
  2295. }
  2296. static int cm_rtu_handler(struct cm_work *work)
  2297. {
  2298. struct cm_id_private *cm_id_priv;
  2299. struct cm_rtu_msg *rtu_msg;
  2300. rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
  2301. cm_id_priv = cm_acquire_id(
  2302. cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
  2303. cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
  2304. if (!cm_id_priv)
  2305. return -EINVAL;
  2306. work->cm_event.private_data =
  2307. IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
  2308. spin_lock_irq(&cm_id_priv->lock);
  2309. if (cm_id_priv->id.state != IB_CM_REP_SENT &&
  2310. cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
  2311. spin_unlock_irq(&cm_id_priv->lock);
  2312. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  2313. counter[CM_RTU_COUNTER]);
  2314. goto out;
  2315. }
  2316. cm_id_priv->id.state = IB_CM_ESTABLISHED;
  2317. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2318. cm_queue_work_unlock(cm_id_priv, work);
  2319. return 0;
  2320. out:
  2321. cm_deref_id(cm_id_priv);
  2322. return -EINVAL;
  2323. }
  2324. static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
  2325. struct cm_id_private *cm_id_priv,
  2326. const void *private_data,
  2327. u8 private_data_len)
  2328. {
  2329. cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
  2330. cm_form_tid(cm_id_priv));
  2331. IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
  2332. be32_to_cpu(cm_id_priv->id.local_id));
  2333. IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
  2334. be32_to_cpu(cm_id_priv->id.remote_id));
  2335. IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
  2336. be32_to_cpu(cm_id_priv->remote_qpn));
  2337. if (private_data && private_data_len)
  2338. IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
  2339. private_data_len);
  2340. }
  2341. static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
  2342. const void *private_data, u8 private_data_len)
  2343. {
  2344. struct ib_mad_send_buf *msg;
  2345. int ret;
  2346. lockdep_assert_held(&cm_id_priv->lock);
  2347. if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
  2348. return -EINVAL;
  2349. if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
  2350. trace_icm_dreq_skipped(&cm_id_priv->id);
  2351. return -EINVAL;
  2352. }
  2353. if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
  2354. cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
  2355. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2356. ret = cm_alloc_msg(cm_id_priv, &msg);
  2357. if (ret) {
  2358. cm_enter_timewait(cm_id_priv);
  2359. return ret;
  2360. }
  2361. cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
  2362. private_data, private_data_len);
  2363. msg->timeout_ms = cm_id_priv->timeout_ms;
  2364. msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
  2365. trace_icm_send_dreq(&cm_id_priv->id);
  2366. ret = ib_post_send_mad(msg, NULL);
  2367. if (ret) {
  2368. cm_enter_timewait(cm_id_priv);
  2369. cm_free_msg(msg);
  2370. return ret;
  2371. }
  2372. cm_id_priv->id.state = IB_CM_DREQ_SENT;
  2373. cm_id_priv->msg = msg;
  2374. return 0;
  2375. }
  2376. int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
  2377. u8 private_data_len)
  2378. {
  2379. struct cm_id_private *cm_id_priv =
  2380. container_of(cm_id, struct cm_id_private, id);
  2381. unsigned long flags;
  2382. int ret;
  2383. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2384. ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
  2385. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2386. return ret;
  2387. }
  2388. EXPORT_SYMBOL(ib_send_cm_dreq);
  2389. static void cm_format_drep(struct cm_drep_msg *drep_msg,
  2390. struct cm_id_private *cm_id_priv,
  2391. const void *private_data,
  2392. u8 private_data_len)
  2393. {
  2394. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
  2395. IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
  2396. be32_to_cpu(cm_id_priv->id.local_id));
  2397. IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
  2398. be32_to_cpu(cm_id_priv->id.remote_id));
  2399. if (private_data && private_data_len)
  2400. IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
  2401. private_data_len);
  2402. }
  2403. static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
  2404. void *private_data, u8 private_data_len)
  2405. {
  2406. struct ib_mad_send_buf *msg;
  2407. int ret;
  2408. lockdep_assert_held(&cm_id_priv->lock);
  2409. if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
  2410. return -EINVAL;
  2411. if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
  2412. trace_icm_send_drep_err(&cm_id_priv->id);
  2413. kfree(private_data);
  2414. return -EINVAL;
  2415. }
  2416. cm_set_private_data(cm_id_priv, private_data, private_data_len);
  2417. cm_enter_timewait(cm_id_priv);
  2418. ret = cm_alloc_msg(cm_id_priv, &msg);
  2419. if (ret)
  2420. return ret;
  2421. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  2422. private_data, private_data_len);
  2423. trace_icm_send_drep(&cm_id_priv->id);
  2424. ret = ib_post_send_mad(msg, NULL);
  2425. if (ret) {
  2426. cm_free_msg(msg);
  2427. return ret;
  2428. }
  2429. return 0;
  2430. }
  2431. int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
  2432. u8 private_data_len)
  2433. {
  2434. struct cm_id_private *cm_id_priv =
  2435. container_of(cm_id, struct cm_id_private, id);
  2436. unsigned long flags;
  2437. void *data;
  2438. int ret;
  2439. data = cm_copy_private_data(private_data, private_data_len);
  2440. if (IS_ERR(data))
  2441. return PTR_ERR(data);
  2442. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2443. ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
  2444. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2445. return ret;
  2446. }
  2447. EXPORT_SYMBOL(ib_send_cm_drep);
  2448. static int cm_issue_drep(struct cm_port *port,
  2449. struct ib_mad_recv_wc *mad_recv_wc)
  2450. {
  2451. struct ib_mad_send_buf *msg = NULL;
  2452. struct cm_dreq_msg *dreq_msg;
  2453. struct cm_drep_msg *drep_msg;
  2454. int ret;
  2455. ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
  2456. if (ret)
  2457. return ret;
  2458. dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
  2459. drep_msg = (struct cm_drep_msg *) msg->mad;
  2460. cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
  2461. IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
  2462. IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
  2463. IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
  2464. IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
  2465. trace_icm_issue_drep(
  2466. IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
  2467. IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
  2468. ret = ib_post_send_mad(msg, NULL);
  2469. if (ret)
  2470. cm_free_msg(msg);
  2471. return ret;
  2472. }
  2473. static int cm_dreq_handler(struct cm_work *work)
  2474. {
  2475. struct cm_id_private *cm_id_priv;
  2476. struct cm_dreq_msg *dreq_msg;
  2477. struct ib_mad_send_buf *msg = NULL;
  2478. dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
  2479. cm_id_priv = cm_acquire_id(
  2480. cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
  2481. cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
  2482. if (!cm_id_priv) {
  2483. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  2484. counter[CM_DREQ_COUNTER]);
  2485. cm_issue_drep(work->port, work->mad_recv_wc);
  2486. trace_icm_no_priv_err(
  2487. IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
  2488. IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
  2489. return -EINVAL;
  2490. }
  2491. work->cm_event.private_data =
  2492. IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
  2493. spin_lock_irq(&cm_id_priv->lock);
  2494. if (cm_id_priv->local_qpn !=
  2495. cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
  2496. goto unlock;
  2497. switch (cm_id_priv->id.state) {
  2498. case IB_CM_REP_SENT:
  2499. case IB_CM_DREQ_SENT:
  2500. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2501. break;
  2502. case IB_CM_ESTABLISHED:
  2503. if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
  2504. cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
  2505. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2506. break;
  2507. case IB_CM_MRA_REP_RCVD:
  2508. break;
  2509. case IB_CM_TIMEWAIT:
  2510. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  2511. counter[CM_DREQ_COUNTER]);
  2512. msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
  2513. if (IS_ERR(msg))
  2514. goto unlock;
  2515. cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
  2516. cm_id_priv->private_data,
  2517. cm_id_priv->private_data_len);
  2518. spin_unlock_irq(&cm_id_priv->lock);
  2519. if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
  2520. ib_post_send_mad(msg, NULL))
  2521. cm_free_msg(msg);
  2522. goto deref;
  2523. case IB_CM_DREQ_RCVD:
  2524. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  2525. counter[CM_DREQ_COUNTER]);
  2526. goto unlock;
  2527. default:
  2528. trace_icm_dreq_unknown_err(&cm_id_priv->id);
  2529. goto unlock;
  2530. }
  2531. cm_id_priv->id.state = IB_CM_DREQ_RCVD;
  2532. cm_id_priv->tid = dreq_msg->hdr.tid;
  2533. cm_queue_work_unlock(cm_id_priv, work);
  2534. return 0;
  2535. unlock: spin_unlock_irq(&cm_id_priv->lock);
  2536. deref: cm_deref_id(cm_id_priv);
  2537. return -EINVAL;
  2538. }
  2539. static int cm_drep_handler(struct cm_work *work)
  2540. {
  2541. struct cm_id_private *cm_id_priv;
  2542. struct cm_drep_msg *drep_msg;
  2543. drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
  2544. cm_id_priv = cm_acquire_id(
  2545. cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
  2546. cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
  2547. if (!cm_id_priv)
  2548. return -EINVAL;
  2549. work->cm_event.private_data =
  2550. IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
  2551. spin_lock_irq(&cm_id_priv->lock);
  2552. if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
  2553. cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
  2554. spin_unlock_irq(&cm_id_priv->lock);
  2555. goto out;
  2556. }
  2557. cm_enter_timewait(cm_id_priv);
  2558. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2559. cm_queue_work_unlock(cm_id_priv, work);
  2560. return 0;
  2561. out:
  2562. cm_deref_id(cm_id_priv);
  2563. return -EINVAL;
  2564. }
  2565. static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
  2566. enum ib_cm_rej_reason reason, void *ari,
  2567. u8 ari_length, const void *private_data,
  2568. u8 private_data_len)
  2569. {
  2570. enum ib_cm_state state = cm_id_priv->id.state;
  2571. struct ib_mad_send_buf *msg;
  2572. int ret;
  2573. lockdep_assert_held(&cm_id_priv->lock);
  2574. if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
  2575. (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
  2576. return -EINVAL;
  2577. switch (state) {
  2578. case IB_CM_REQ_SENT:
  2579. case IB_CM_MRA_REQ_RCVD:
  2580. case IB_CM_REQ_RCVD:
  2581. case IB_CM_MRA_REQ_SENT:
  2582. case IB_CM_REP_RCVD:
  2583. case IB_CM_MRA_REP_SENT:
  2584. cm_reset_to_idle(cm_id_priv);
  2585. ret = cm_alloc_msg(cm_id_priv, &msg);
  2586. if (ret)
  2587. return ret;
  2588. cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
  2589. ari, ari_length, private_data, private_data_len,
  2590. state);
  2591. break;
  2592. case IB_CM_REP_SENT:
  2593. case IB_CM_MRA_REP_RCVD:
  2594. cm_enter_timewait(cm_id_priv);
  2595. ret = cm_alloc_msg(cm_id_priv, &msg);
  2596. if (ret)
  2597. return ret;
  2598. cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
  2599. ari, ari_length, private_data, private_data_len,
  2600. state);
  2601. break;
  2602. default:
  2603. trace_icm_send_unknown_rej_err(&cm_id_priv->id);
  2604. return -EINVAL;
  2605. }
  2606. trace_icm_send_rej(&cm_id_priv->id, reason);
  2607. ret = ib_post_send_mad(msg, NULL);
  2608. if (ret) {
  2609. cm_free_msg(msg);
  2610. return ret;
  2611. }
  2612. return 0;
  2613. }
  2614. int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
  2615. void *ari, u8 ari_length, const void *private_data,
  2616. u8 private_data_len)
  2617. {
  2618. struct cm_id_private *cm_id_priv =
  2619. container_of(cm_id, struct cm_id_private, id);
  2620. unsigned long flags;
  2621. int ret;
  2622. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2623. ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
  2624. private_data, private_data_len);
  2625. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2626. return ret;
  2627. }
  2628. EXPORT_SYMBOL(ib_send_cm_rej);
  2629. static void cm_format_rej_event(struct cm_work *work)
  2630. {
  2631. struct cm_rej_msg *rej_msg;
  2632. struct ib_cm_rej_event_param *param;
  2633. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  2634. param = &work->cm_event.param.rej_rcvd;
  2635. param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
  2636. param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
  2637. param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
  2638. work->cm_event.private_data =
  2639. IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
  2640. }
  2641. static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
  2642. {
  2643. struct cm_id_private *cm_id_priv;
  2644. __be32 remote_id;
  2645. remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
  2646. if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
  2647. cm_id_priv = cm_find_remote_id(
  2648. *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
  2649. remote_id);
  2650. } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
  2651. CM_MSG_RESPONSE_REQ)
  2652. cm_id_priv = cm_acquire_id(
  2653. cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
  2654. 0);
  2655. else
  2656. cm_id_priv = cm_acquire_id(
  2657. cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
  2658. remote_id);
  2659. return cm_id_priv;
  2660. }
  2661. static int cm_rej_handler(struct cm_work *work)
  2662. {
  2663. struct cm_id_private *cm_id_priv;
  2664. struct cm_rej_msg *rej_msg;
  2665. rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
  2666. cm_id_priv = cm_acquire_rejected_id(rej_msg);
  2667. if (!cm_id_priv)
  2668. return -EINVAL;
  2669. cm_format_rej_event(work);
  2670. spin_lock_irq(&cm_id_priv->lock);
  2671. switch (cm_id_priv->id.state) {
  2672. case IB_CM_REQ_SENT:
  2673. case IB_CM_MRA_REQ_RCVD:
  2674. case IB_CM_REP_SENT:
  2675. case IB_CM_MRA_REP_RCVD:
  2676. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2677. fallthrough;
  2678. case IB_CM_REQ_RCVD:
  2679. case IB_CM_MRA_REQ_SENT:
  2680. if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
  2681. cm_enter_timewait(cm_id_priv);
  2682. else
  2683. cm_reset_to_idle(cm_id_priv);
  2684. break;
  2685. case IB_CM_DREQ_SENT:
  2686. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  2687. fallthrough;
  2688. case IB_CM_REP_RCVD:
  2689. case IB_CM_MRA_REP_SENT:
  2690. cm_enter_timewait(cm_id_priv);
  2691. break;
  2692. case IB_CM_ESTABLISHED:
  2693. if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
  2694. cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
  2695. if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
  2696. ib_cancel_mad(cm_id_priv->av.port->mad_agent,
  2697. cm_id_priv->msg);
  2698. cm_enter_timewait(cm_id_priv);
  2699. break;
  2700. }
  2701. fallthrough;
  2702. default:
  2703. trace_icm_rej_unknown_err(&cm_id_priv->id);
  2704. spin_unlock_irq(&cm_id_priv->lock);
  2705. goto out;
  2706. }
  2707. cm_queue_work_unlock(cm_id_priv, work);
  2708. return 0;
  2709. out:
  2710. cm_deref_id(cm_id_priv);
  2711. return -EINVAL;
  2712. }
  2713. int ib_send_cm_mra(struct ib_cm_id *cm_id,
  2714. u8 service_timeout,
  2715. const void *private_data,
  2716. u8 private_data_len)
  2717. {
  2718. struct cm_id_private *cm_id_priv;
  2719. struct ib_mad_send_buf *msg;
  2720. enum ib_cm_state cm_state;
  2721. enum ib_cm_lap_state lap_state;
  2722. enum cm_msg_response msg_response;
  2723. void *data;
  2724. unsigned long flags;
  2725. int ret;
  2726. if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
  2727. return -EINVAL;
  2728. data = cm_copy_private_data(private_data, private_data_len);
  2729. if (IS_ERR(data))
  2730. return PTR_ERR(data);
  2731. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  2732. spin_lock_irqsave(&cm_id_priv->lock, flags);
  2733. switch(cm_id_priv->id.state) {
  2734. case IB_CM_REQ_RCVD:
  2735. cm_state = IB_CM_MRA_REQ_SENT;
  2736. lap_state = cm_id->lap_state;
  2737. msg_response = CM_MSG_RESPONSE_REQ;
  2738. break;
  2739. case IB_CM_REP_RCVD:
  2740. cm_state = IB_CM_MRA_REP_SENT;
  2741. lap_state = cm_id->lap_state;
  2742. msg_response = CM_MSG_RESPONSE_REP;
  2743. break;
  2744. case IB_CM_ESTABLISHED:
  2745. if (cm_id->lap_state == IB_CM_LAP_RCVD) {
  2746. cm_state = cm_id->state;
  2747. lap_state = IB_CM_MRA_LAP_SENT;
  2748. msg_response = CM_MSG_RESPONSE_OTHER;
  2749. break;
  2750. }
  2751. fallthrough;
  2752. default:
  2753. trace_icm_send_mra_unknown_err(&cm_id_priv->id);
  2754. ret = -EINVAL;
  2755. goto error1;
  2756. }
  2757. if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
  2758. ret = cm_alloc_msg(cm_id_priv, &msg);
  2759. if (ret)
  2760. goto error1;
  2761. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2762. msg_response, service_timeout,
  2763. private_data, private_data_len);
  2764. trace_icm_send_mra(cm_id);
  2765. ret = ib_post_send_mad(msg, NULL);
  2766. if (ret)
  2767. goto error2;
  2768. }
  2769. cm_id->state = cm_state;
  2770. cm_id->lap_state = lap_state;
  2771. cm_id_priv->service_timeout = service_timeout;
  2772. cm_set_private_data(cm_id_priv, data, private_data_len);
  2773. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2774. return 0;
  2775. error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2776. kfree(data);
  2777. return ret;
  2778. error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  2779. kfree(data);
  2780. cm_free_msg(msg);
  2781. return ret;
  2782. }
  2783. EXPORT_SYMBOL(ib_send_cm_mra);
  2784. static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
  2785. {
  2786. switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
  2787. case CM_MSG_RESPONSE_REQ:
  2788. return cm_acquire_id(
  2789. cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
  2790. 0);
  2791. case CM_MSG_RESPONSE_REP:
  2792. case CM_MSG_RESPONSE_OTHER:
  2793. return cm_acquire_id(
  2794. cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
  2795. cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
  2796. default:
  2797. return NULL;
  2798. }
  2799. }
  2800. static int cm_mra_handler(struct cm_work *work)
  2801. {
  2802. struct cm_id_private *cm_id_priv;
  2803. struct cm_mra_msg *mra_msg;
  2804. int timeout;
  2805. mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
  2806. cm_id_priv = cm_acquire_mraed_id(mra_msg);
  2807. if (!cm_id_priv)
  2808. return -EINVAL;
  2809. work->cm_event.private_data =
  2810. IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
  2811. work->cm_event.param.mra_rcvd.service_timeout =
  2812. IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
  2813. timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
  2814. cm_convert_to_ms(cm_id_priv->av.timeout);
  2815. spin_lock_irq(&cm_id_priv->lock);
  2816. switch (cm_id_priv->id.state) {
  2817. case IB_CM_REQ_SENT:
  2818. if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
  2819. CM_MSG_RESPONSE_REQ ||
  2820. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2821. cm_id_priv->msg, timeout))
  2822. goto out;
  2823. cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
  2824. break;
  2825. case IB_CM_REP_SENT:
  2826. if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
  2827. CM_MSG_RESPONSE_REP ||
  2828. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2829. cm_id_priv->msg, timeout))
  2830. goto out;
  2831. cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
  2832. break;
  2833. case IB_CM_ESTABLISHED:
  2834. if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
  2835. CM_MSG_RESPONSE_OTHER ||
  2836. cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
  2837. ib_modify_mad(cm_id_priv->av.port->mad_agent,
  2838. cm_id_priv->msg, timeout)) {
  2839. if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
  2840. atomic_long_inc(&work->port->
  2841. counter_group[CM_RECV_DUPLICATES].
  2842. counter[CM_MRA_COUNTER]);
  2843. goto out;
  2844. }
  2845. cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
  2846. break;
  2847. case IB_CM_MRA_REQ_RCVD:
  2848. case IB_CM_MRA_REP_RCVD:
  2849. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  2850. counter[CM_MRA_COUNTER]);
  2851. fallthrough;
  2852. default:
  2853. trace_icm_mra_unknown_err(&cm_id_priv->id);
  2854. goto out;
  2855. }
  2856. cm_id_priv->msg->context[1] = (void *) (unsigned long)
  2857. cm_id_priv->id.state;
  2858. cm_queue_work_unlock(cm_id_priv, work);
  2859. return 0;
  2860. out:
  2861. spin_unlock_irq(&cm_id_priv->lock);
  2862. cm_deref_id(cm_id_priv);
  2863. return -EINVAL;
  2864. }
  2865. static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
  2866. struct sa_path_rec *path)
  2867. {
  2868. u32 lid;
  2869. if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
  2870. sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
  2871. lap_msg));
  2872. sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
  2873. lap_msg));
  2874. } else {
  2875. lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
  2876. CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
  2877. sa_path_set_dlid(path, lid);
  2878. lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
  2879. CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
  2880. sa_path_set_slid(path, lid);
  2881. }
  2882. }
  2883. static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
  2884. struct sa_path_rec *path,
  2885. struct cm_lap_msg *lap_msg)
  2886. {
  2887. path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
  2888. path->sgid =
  2889. *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
  2890. path->flow_label =
  2891. cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
  2892. path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
  2893. path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
  2894. path->reversible = 1;
  2895. path->pkey = cm_id_priv->pkey;
  2896. path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
  2897. path->mtu_selector = IB_SA_EQ;
  2898. path->mtu = cm_id_priv->path_mtu;
  2899. path->rate_selector = IB_SA_EQ;
  2900. path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
  2901. path->packet_life_time_selector = IB_SA_EQ;
  2902. path->packet_life_time =
  2903. IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
  2904. path->packet_life_time -= (path->packet_life_time > 0);
  2905. cm_format_path_lid_from_lap(lap_msg, path);
  2906. }
  2907. static int cm_lap_handler(struct cm_work *work)
  2908. {
  2909. struct cm_id_private *cm_id_priv;
  2910. struct cm_lap_msg *lap_msg;
  2911. struct ib_cm_lap_event_param *param;
  2912. struct ib_mad_send_buf *msg = NULL;
  2913. int ret;
  2914. /* Currently Alternate path messages are not supported for
  2915. * RoCE link layer.
  2916. */
  2917. if (rdma_protocol_roce(work->port->cm_dev->ib_device,
  2918. work->port->port_num))
  2919. return -EINVAL;
  2920. /* todo: verify LAP request and send reject APR if invalid. */
  2921. lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
  2922. cm_id_priv = cm_acquire_id(
  2923. cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
  2924. cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
  2925. if (!cm_id_priv)
  2926. return -EINVAL;
  2927. param = &work->cm_event.param.lap_rcvd;
  2928. memset(&work->path[0], 0, sizeof(work->path[1]));
  2929. cm_path_set_rec_type(work->port->cm_dev->ib_device,
  2930. work->port->port_num, &work->path[0],
  2931. IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
  2932. lap_msg));
  2933. param->alternate_path = &work->path[0];
  2934. cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
  2935. work->cm_event.private_data =
  2936. IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
  2937. spin_lock_irq(&cm_id_priv->lock);
  2938. if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
  2939. goto unlock;
  2940. switch (cm_id_priv->id.lap_state) {
  2941. case IB_CM_LAP_UNINIT:
  2942. case IB_CM_LAP_IDLE:
  2943. break;
  2944. case IB_CM_MRA_LAP_SENT:
  2945. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  2946. counter[CM_LAP_COUNTER]);
  2947. msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
  2948. if (IS_ERR(msg))
  2949. goto unlock;
  2950. cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
  2951. CM_MSG_RESPONSE_OTHER,
  2952. cm_id_priv->service_timeout,
  2953. cm_id_priv->private_data,
  2954. cm_id_priv->private_data_len);
  2955. spin_unlock_irq(&cm_id_priv->lock);
  2956. if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
  2957. ib_post_send_mad(msg, NULL))
  2958. cm_free_msg(msg);
  2959. goto deref;
  2960. case IB_CM_LAP_RCVD:
  2961. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  2962. counter[CM_LAP_COUNTER]);
  2963. goto unlock;
  2964. default:
  2965. goto unlock;
  2966. }
  2967. ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
  2968. work->mad_recv_wc->recv_buf.grh,
  2969. &cm_id_priv->av);
  2970. if (ret)
  2971. goto unlock;
  2972. ret = cm_init_av_by_path(param->alternate_path, NULL,
  2973. &cm_id_priv->alt_av, cm_id_priv);
  2974. if (ret)
  2975. goto unlock;
  2976. cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
  2977. cm_id_priv->tid = lap_msg->hdr.tid;
  2978. cm_queue_work_unlock(cm_id_priv, work);
  2979. return 0;
  2980. unlock: spin_unlock_irq(&cm_id_priv->lock);
  2981. deref: cm_deref_id(cm_id_priv);
  2982. return -EINVAL;
  2983. }
  2984. static int cm_apr_handler(struct cm_work *work)
  2985. {
  2986. struct cm_id_private *cm_id_priv;
  2987. struct cm_apr_msg *apr_msg;
  2988. /* Currently Alternate path messages are not supported for
  2989. * RoCE link layer.
  2990. */
  2991. if (rdma_protocol_roce(work->port->cm_dev->ib_device,
  2992. work->port->port_num))
  2993. return -EINVAL;
  2994. apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
  2995. cm_id_priv = cm_acquire_id(
  2996. cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
  2997. cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
  2998. if (!cm_id_priv)
  2999. return -EINVAL; /* Unmatched reply. */
  3000. work->cm_event.param.apr_rcvd.ap_status =
  3001. IBA_GET(CM_APR_AR_STATUS, apr_msg);
  3002. work->cm_event.param.apr_rcvd.apr_info =
  3003. IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
  3004. work->cm_event.param.apr_rcvd.info_len =
  3005. IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
  3006. work->cm_event.private_data =
  3007. IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
  3008. spin_lock_irq(&cm_id_priv->lock);
  3009. if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
  3010. (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
  3011. cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
  3012. spin_unlock_irq(&cm_id_priv->lock);
  3013. goto out;
  3014. }
  3015. cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
  3016. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  3017. cm_id_priv->msg = NULL;
  3018. cm_queue_work_unlock(cm_id_priv, work);
  3019. return 0;
  3020. out:
  3021. cm_deref_id(cm_id_priv);
  3022. return -EINVAL;
  3023. }
  3024. static int cm_timewait_handler(struct cm_work *work)
  3025. {
  3026. struct cm_timewait_info *timewait_info;
  3027. struct cm_id_private *cm_id_priv;
  3028. timewait_info = container_of(work, struct cm_timewait_info, work);
  3029. spin_lock_irq(&cm.lock);
  3030. list_del(&timewait_info->list);
  3031. spin_unlock_irq(&cm.lock);
  3032. cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
  3033. timewait_info->work.remote_id);
  3034. if (!cm_id_priv)
  3035. return -EINVAL;
  3036. spin_lock_irq(&cm_id_priv->lock);
  3037. if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
  3038. cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
  3039. spin_unlock_irq(&cm_id_priv->lock);
  3040. goto out;
  3041. }
  3042. cm_id_priv->id.state = IB_CM_IDLE;
  3043. cm_queue_work_unlock(cm_id_priv, work);
  3044. return 0;
  3045. out:
  3046. cm_deref_id(cm_id_priv);
  3047. return -EINVAL;
  3048. }
  3049. static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
  3050. struct cm_id_private *cm_id_priv,
  3051. struct ib_cm_sidr_req_param *param)
  3052. {
  3053. cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
  3054. cm_form_tid(cm_id_priv));
  3055. IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
  3056. be32_to_cpu(cm_id_priv->id.local_id));
  3057. IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
  3058. be16_to_cpu(param->path->pkey));
  3059. IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
  3060. be64_to_cpu(param->service_id));
  3061. if (param->private_data && param->private_data_len)
  3062. IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
  3063. param->private_data, param->private_data_len);
  3064. }
  3065. int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
  3066. struct ib_cm_sidr_req_param *param)
  3067. {
  3068. struct cm_id_private *cm_id_priv;
  3069. struct ib_mad_send_buf *msg;
  3070. unsigned long flags;
  3071. int ret;
  3072. if (!param->path || (param->private_data &&
  3073. param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
  3074. return -EINVAL;
  3075. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  3076. ret = cm_init_av_by_path(param->path, param->sgid_attr,
  3077. &cm_id_priv->av,
  3078. cm_id_priv);
  3079. if (ret)
  3080. goto out;
  3081. cm_id->service_id = param->service_id;
  3082. cm_id->service_mask = ~cpu_to_be64(0);
  3083. cm_id_priv->timeout_ms = param->timeout_ms;
  3084. cm_id_priv->max_cm_retries = param->max_cm_retries;
  3085. ret = cm_alloc_msg(cm_id_priv, &msg);
  3086. if (ret)
  3087. goto out;
  3088. cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
  3089. param);
  3090. msg->timeout_ms = cm_id_priv->timeout_ms;
  3091. msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
  3092. spin_lock_irqsave(&cm_id_priv->lock, flags);
  3093. if (cm_id->state == IB_CM_IDLE) {
  3094. trace_icm_send_sidr_req(&cm_id_priv->id);
  3095. ret = ib_post_send_mad(msg, NULL);
  3096. } else {
  3097. ret = -EINVAL;
  3098. }
  3099. if (ret) {
  3100. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  3101. cm_free_msg(msg);
  3102. goto out;
  3103. }
  3104. cm_id->state = IB_CM_SIDR_REQ_SENT;
  3105. cm_id_priv->msg = msg;
  3106. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  3107. out:
  3108. return ret;
  3109. }
  3110. EXPORT_SYMBOL(ib_send_cm_sidr_req);
  3111. static void cm_format_sidr_req_event(struct cm_work *work,
  3112. const struct cm_id_private *rx_cm_id,
  3113. struct ib_cm_id *listen_id)
  3114. {
  3115. struct cm_sidr_req_msg *sidr_req_msg;
  3116. struct ib_cm_sidr_req_event_param *param;
  3117. sidr_req_msg = (struct cm_sidr_req_msg *)
  3118. work->mad_recv_wc->recv_buf.mad;
  3119. param = &work->cm_event.param.sidr_req_rcvd;
  3120. param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
  3121. param->listen_id = listen_id;
  3122. param->service_id =
  3123. cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
  3124. param->bth_pkey = cm_get_bth_pkey(work);
  3125. param->port = work->port->port_num;
  3126. param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
  3127. work->cm_event.private_data =
  3128. IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
  3129. }
  3130. static int cm_sidr_req_handler(struct cm_work *work)
  3131. {
  3132. struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
  3133. struct cm_sidr_req_msg *sidr_req_msg;
  3134. struct ib_wc *wc;
  3135. int ret;
  3136. cm_id_priv =
  3137. cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
  3138. if (IS_ERR(cm_id_priv))
  3139. return PTR_ERR(cm_id_priv);
  3140. /* Record SGID/SLID and request ID for lookup. */
  3141. sidr_req_msg = (struct cm_sidr_req_msg *)
  3142. work->mad_recv_wc->recv_buf.mad;
  3143. cm_id_priv->id.remote_id =
  3144. cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
  3145. cm_id_priv->id.service_id =
  3146. cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
  3147. cm_id_priv->id.service_mask = ~cpu_to_be64(0);
  3148. cm_id_priv->tid = sidr_req_msg->hdr.tid;
  3149. wc = work->mad_recv_wc->wc;
  3150. cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
  3151. cm_id_priv->av.dgid.global.interface_id = 0;
  3152. ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
  3153. work->mad_recv_wc->recv_buf.grh,
  3154. &cm_id_priv->av);
  3155. if (ret)
  3156. goto out;
  3157. spin_lock_irq(&cm.lock);
  3158. listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
  3159. if (listen_cm_id_priv) {
  3160. spin_unlock_irq(&cm.lock);
  3161. atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
  3162. counter[CM_SIDR_REQ_COUNTER]);
  3163. goto out; /* Duplicate message. */
  3164. }
  3165. cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
  3166. listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
  3167. cm_id_priv->id.service_id);
  3168. if (!listen_cm_id_priv) {
  3169. spin_unlock_irq(&cm.lock);
  3170. ib_send_cm_sidr_rep(&cm_id_priv->id,
  3171. &(struct ib_cm_sidr_rep_param){
  3172. .status = IB_SIDR_UNSUPPORTED });
  3173. goto out; /* No match. */
  3174. }
  3175. spin_unlock_irq(&cm.lock);
  3176. cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
  3177. cm_id_priv->id.context = listen_cm_id_priv->id.context;
  3178. /*
  3179. * A SIDR ID does not need to be in the xarray since it does not receive
  3180. * mads, is not placed in the remote_id or remote_qpn rbtree, and does
  3181. * not enter timewait.
  3182. */
  3183. cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
  3184. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
  3185. cm_free_work(work);
  3186. /*
  3187. * A pointer to the listen_cm_id is held in the event, so this deref
  3188. * must be after the event is delivered above.
  3189. */
  3190. cm_deref_id(listen_cm_id_priv);
  3191. if (ret)
  3192. cm_destroy_id(&cm_id_priv->id, ret);
  3193. return 0;
  3194. out:
  3195. ib_destroy_cm_id(&cm_id_priv->id);
  3196. return -EINVAL;
  3197. }
  3198. static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
  3199. struct cm_id_private *cm_id_priv,
  3200. struct ib_cm_sidr_rep_param *param)
  3201. {
  3202. cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
  3203. cm_id_priv->tid, param->ece.attr_mod);
  3204. IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
  3205. be32_to_cpu(cm_id_priv->id.remote_id));
  3206. IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
  3207. IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
  3208. IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
  3209. be64_to_cpu(cm_id_priv->id.service_id));
  3210. IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
  3211. IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
  3212. param->ece.vendor_id & 0xFF);
  3213. IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
  3214. (param->ece.vendor_id >> 8) & 0xFF);
  3215. if (param->info && param->info_length)
  3216. IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
  3217. param->info, param->info_length);
  3218. if (param->private_data && param->private_data_len)
  3219. IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
  3220. param->private_data, param->private_data_len);
  3221. }
  3222. static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
  3223. struct ib_cm_sidr_rep_param *param)
  3224. {
  3225. struct ib_mad_send_buf *msg;
  3226. unsigned long flags;
  3227. int ret;
  3228. lockdep_assert_held(&cm_id_priv->lock);
  3229. if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
  3230. (param->private_data &&
  3231. param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
  3232. return -EINVAL;
  3233. if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
  3234. return -EINVAL;
  3235. ret = cm_alloc_msg(cm_id_priv, &msg);
  3236. if (ret)
  3237. return ret;
  3238. cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
  3239. param);
  3240. trace_icm_send_sidr_rep(&cm_id_priv->id);
  3241. ret = ib_post_send_mad(msg, NULL);
  3242. if (ret) {
  3243. cm_free_msg(msg);
  3244. return ret;
  3245. }
  3246. cm_id_priv->id.state = IB_CM_IDLE;
  3247. spin_lock_irqsave(&cm.lock, flags);
  3248. if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
  3249. rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
  3250. RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
  3251. }
  3252. spin_unlock_irqrestore(&cm.lock, flags);
  3253. return 0;
  3254. }
  3255. int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
  3256. struct ib_cm_sidr_rep_param *param)
  3257. {
  3258. struct cm_id_private *cm_id_priv =
  3259. container_of(cm_id, struct cm_id_private, id);
  3260. unsigned long flags;
  3261. int ret;
  3262. spin_lock_irqsave(&cm_id_priv->lock, flags);
  3263. ret = cm_send_sidr_rep_locked(cm_id_priv, param);
  3264. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  3265. return ret;
  3266. }
  3267. EXPORT_SYMBOL(ib_send_cm_sidr_rep);
  3268. static void cm_format_sidr_rep_event(struct cm_work *work,
  3269. const struct cm_id_private *cm_id_priv)
  3270. {
  3271. struct cm_sidr_rep_msg *sidr_rep_msg;
  3272. struct ib_cm_sidr_rep_event_param *param;
  3273. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  3274. work->mad_recv_wc->recv_buf.mad;
  3275. param = &work->cm_event.param.sidr_rep_rcvd;
  3276. param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
  3277. param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
  3278. param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
  3279. param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
  3280. sidr_rep_msg);
  3281. param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
  3282. sidr_rep_msg);
  3283. param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
  3284. work->cm_event.private_data =
  3285. IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
  3286. }
  3287. static int cm_sidr_rep_handler(struct cm_work *work)
  3288. {
  3289. struct cm_sidr_rep_msg *sidr_rep_msg;
  3290. struct cm_id_private *cm_id_priv;
  3291. sidr_rep_msg = (struct cm_sidr_rep_msg *)
  3292. work->mad_recv_wc->recv_buf.mad;
  3293. cm_id_priv = cm_acquire_id(
  3294. cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
  3295. if (!cm_id_priv)
  3296. return -EINVAL; /* Unmatched reply. */
  3297. spin_lock_irq(&cm_id_priv->lock);
  3298. if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
  3299. spin_unlock_irq(&cm_id_priv->lock);
  3300. goto out;
  3301. }
  3302. cm_id_priv->id.state = IB_CM_IDLE;
  3303. ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
  3304. spin_unlock_irq(&cm_id_priv->lock);
  3305. cm_format_sidr_rep_event(work, cm_id_priv);
  3306. cm_process_work(cm_id_priv, work);
  3307. return 0;
  3308. out:
  3309. cm_deref_id(cm_id_priv);
  3310. return -EINVAL;
  3311. }
  3312. static void cm_process_send_error(struct ib_mad_send_buf *msg,
  3313. enum ib_wc_status wc_status)
  3314. {
  3315. struct cm_id_private *cm_id_priv;
  3316. struct ib_cm_event cm_event;
  3317. enum ib_cm_state state;
  3318. int ret;
  3319. memset(&cm_event, 0, sizeof cm_event);
  3320. cm_id_priv = msg->context[0];
  3321. /* Discard old sends or ones without a response. */
  3322. spin_lock_irq(&cm_id_priv->lock);
  3323. state = (enum ib_cm_state) (unsigned long) msg->context[1];
  3324. if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
  3325. goto discard;
  3326. trace_icm_mad_send_err(state, wc_status);
  3327. switch (state) {
  3328. case IB_CM_REQ_SENT:
  3329. case IB_CM_MRA_REQ_RCVD:
  3330. cm_reset_to_idle(cm_id_priv);
  3331. cm_event.event = IB_CM_REQ_ERROR;
  3332. break;
  3333. case IB_CM_REP_SENT:
  3334. case IB_CM_MRA_REP_RCVD:
  3335. cm_reset_to_idle(cm_id_priv);
  3336. cm_event.event = IB_CM_REP_ERROR;
  3337. break;
  3338. case IB_CM_DREQ_SENT:
  3339. cm_enter_timewait(cm_id_priv);
  3340. cm_event.event = IB_CM_DREQ_ERROR;
  3341. break;
  3342. case IB_CM_SIDR_REQ_SENT:
  3343. cm_id_priv->id.state = IB_CM_IDLE;
  3344. cm_event.event = IB_CM_SIDR_REQ_ERROR;
  3345. break;
  3346. default:
  3347. goto discard;
  3348. }
  3349. spin_unlock_irq(&cm_id_priv->lock);
  3350. cm_event.param.send_status = wc_status;
  3351. /* No other events can occur on the cm_id at this point. */
  3352. ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
  3353. cm_free_msg(msg);
  3354. if (ret)
  3355. ib_destroy_cm_id(&cm_id_priv->id);
  3356. return;
  3357. discard:
  3358. spin_unlock_irq(&cm_id_priv->lock);
  3359. cm_free_msg(msg);
  3360. }
  3361. static void cm_send_handler(struct ib_mad_agent *mad_agent,
  3362. struct ib_mad_send_wc *mad_send_wc)
  3363. {
  3364. struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
  3365. struct cm_port *port;
  3366. u16 attr_index;
  3367. port = mad_agent->context;
  3368. attr_index = be16_to_cpu(((struct ib_mad_hdr *)
  3369. msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
  3370. /*
  3371. * If the send was in response to a received message (context[0] is not
  3372. * set to a cm_id), and is not a REJ, then it is a send that was
  3373. * manually retried.
  3374. */
  3375. if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
  3376. msg->retries = 1;
  3377. atomic_long_add(1 + msg->retries,
  3378. &port->counter_group[CM_XMIT].counter[attr_index]);
  3379. if (msg->retries)
  3380. atomic_long_add(msg->retries,
  3381. &port->counter_group[CM_XMIT_RETRIES].
  3382. counter[attr_index]);
  3383. switch (mad_send_wc->status) {
  3384. case IB_WC_SUCCESS:
  3385. case IB_WC_WR_FLUSH_ERR:
  3386. cm_free_msg(msg);
  3387. break;
  3388. default:
  3389. if (msg->context[0] && msg->context[1])
  3390. cm_process_send_error(msg, mad_send_wc->status);
  3391. else
  3392. cm_free_msg(msg);
  3393. break;
  3394. }
  3395. }
  3396. static void cm_work_handler(struct work_struct *_work)
  3397. {
  3398. struct cm_work *work = container_of(_work, struct cm_work, work.work);
  3399. int ret;
  3400. switch (work->cm_event.event) {
  3401. case IB_CM_REQ_RECEIVED:
  3402. ret = cm_req_handler(work);
  3403. break;
  3404. case IB_CM_MRA_RECEIVED:
  3405. ret = cm_mra_handler(work);
  3406. break;
  3407. case IB_CM_REJ_RECEIVED:
  3408. ret = cm_rej_handler(work);
  3409. break;
  3410. case IB_CM_REP_RECEIVED:
  3411. ret = cm_rep_handler(work);
  3412. break;
  3413. case IB_CM_RTU_RECEIVED:
  3414. ret = cm_rtu_handler(work);
  3415. break;
  3416. case IB_CM_USER_ESTABLISHED:
  3417. ret = cm_establish_handler(work);
  3418. break;
  3419. case IB_CM_DREQ_RECEIVED:
  3420. ret = cm_dreq_handler(work);
  3421. break;
  3422. case IB_CM_DREP_RECEIVED:
  3423. ret = cm_drep_handler(work);
  3424. break;
  3425. case IB_CM_SIDR_REQ_RECEIVED:
  3426. ret = cm_sidr_req_handler(work);
  3427. break;
  3428. case IB_CM_SIDR_REP_RECEIVED:
  3429. ret = cm_sidr_rep_handler(work);
  3430. break;
  3431. case IB_CM_LAP_RECEIVED:
  3432. ret = cm_lap_handler(work);
  3433. break;
  3434. case IB_CM_APR_RECEIVED:
  3435. ret = cm_apr_handler(work);
  3436. break;
  3437. case IB_CM_TIMEWAIT_EXIT:
  3438. ret = cm_timewait_handler(work);
  3439. break;
  3440. default:
  3441. trace_icm_handler_err(work->cm_event.event);
  3442. ret = -EINVAL;
  3443. break;
  3444. }
  3445. if (ret)
  3446. cm_free_work(work);
  3447. }
  3448. static int cm_establish(struct ib_cm_id *cm_id)
  3449. {
  3450. struct cm_id_private *cm_id_priv;
  3451. struct cm_work *work;
  3452. unsigned long flags;
  3453. int ret = 0;
  3454. struct cm_device *cm_dev;
  3455. cm_dev = ib_get_client_data(cm_id->device, &cm_client);
  3456. if (!cm_dev)
  3457. return -ENODEV;
  3458. work = kmalloc(sizeof *work, GFP_ATOMIC);
  3459. if (!work)
  3460. return -ENOMEM;
  3461. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  3462. spin_lock_irqsave(&cm_id_priv->lock, flags);
  3463. switch (cm_id->state)
  3464. {
  3465. case IB_CM_REP_SENT:
  3466. case IB_CM_MRA_REP_RCVD:
  3467. cm_id->state = IB_CM_ESTABLISHED;
  3468. break;
  3469. case IB_CM_ESTABLISHED:
  3470. ret = -EISCONN;
  3471. break;
  3472. default:
  3473. trace_icm_establish_err(cm_id);
  3474. ret = -EINVAL;
  3475. break;
  3476. }
  3477. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  3478. if (ret) {
  3479. kfree(work);
  3480. goto out;
  3481. }
  3482. /*
  3483. * The CM worker thread may try to destroy the cm_id before it
  3484. * can execute this work item. To prevent potential deadlock,
  3485. * we need to find the cm_id once we're in the context of the
  3486. * worker thread, rather than holding a reference on it.
  3487. */
  3488. INIT_DELAYED_WORK(&work->work, cm_work_handler);
  3489. work->local_id = cm_id->local_id;
  3490. work->remote_id = cm_id->remote_id;
  3491. work->mad_recv_wc = NULL;
  3492. work->cm_event.event = IB_CM_USER_ESTABLISHED;
  3493. /* Check if the device started its remove_one */
  3494. spin_lock_irqsave(&cm.lock, flags);
  3495. if (!cm_dev->going_down) {
  3496. queue_delayed_work(cm.wq, &work->work, 0);
  3497. } else {
  3498. kfree(work);
  3499. ret = -ENODEV;
  3500. }
  3501. spin_unlock_irqrestore(&cm.lock, flags);
  3502. out:
  3503. return ret;
  3504. }
  3505. static int cm_migrate(struct ib_cm_id *cm_id)
  3506. {
  3507. struct cm_id_private *cm_id_priv;
  3508. struct cm_av tmp_av;
  3509. unsigned long flags;
  3510. int tmp_send_port_not_ready;
  3511. int ret = 0;
  3512. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  3513. spin_lock_irqsave(&cm_id_priv->lock, flags);
  3514. if (cm_id->state == IB_CM_ESTABLISHED &&
  3515. (cm_id->lap_state == IB_CM_LAP_UNINIT ||
  3516. cm_id->lap_state == IB_CM_LAP_IDLE)) {
  3517. cm_id->lap_state = IB_CM_LAP_IDLE;
  3518. /* Swap address vector */
  3519. tmp_av = cm_id_priv->av;
  3520. cm_id_priv->av = cm_id_priv->alt_av;
  3521. cm_id_priv->alt_av = tmp_av;
  3522. /* Swap port send ready state */
  3523. tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
  3524. cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
  3525. cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
  3526. } else
  3527. ret = -EINVAL;
  3528. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  3529. return ret;
  3530. }
  3531. int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
  3532. {
  3533. int ret;
  3534. switch (event) {
  3535. case IB_EVENT_COMM_EST:
  3536. ret = cm_establish(cm_id);
  3537. break;
  3538. case IB_EVENT_PATH_MIG:
  3539. ret = cm_migrate(cm_id);
  3540. break;
  3541. default:
  3542. ret = -EINVAL;
  3543. }
  3544. return ret;
  3545. }
  3546. EXPORT_SYMBOL(ib_cm_notify);
  3547. static void cm_recv_handler(struct ib_mad_agent *mad_agent,
  3548. struct ib_mad_send_buf *send_buf,
  3549. struct ib_mad_recv_wc *mad_recv_wc)
  3550. {
  3551. struct cm_port *port = mad_agent->context;
  3552. struct cm_work *work;
  3553. enum ib_cm_event_type event;
  3554. bool alt_path = false;
  3555. u16 attr_id;
  3556. int paths = 0;
  3557. int going_down = 0;
  3558. switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
  3559. case CM_REQ_ATTR_ID:
  3560. alt_path = cm_req_has_alt_path((struct cm_req_msg *)
  3561. mad_recv_wc->recv_buf.mad);
  3562. paths = 1 + (alt_path != 0);
  3563. event = IB_CM_REQ_RECEIVED;
  3564. break;
  3565. case CM_MRA_ATTR_ID:
  3566. event = IB_CM_MRA_RECEIVED;
  3567. break;
  3568. case CM_REJ_ATTR_ID:
  3569. event = IB_CM_REJ_RECEIVED;
  3570. break;
  3571. case CM_REP_ATTR_ID:
  3572. event = IB_CM_REP_RECEIVED;
  3573. break;
  3574. case CM_RTU_ATTR_ID:
  3575. event = IB_CM_RTU_RECEIVED;
  3576. break;
  3577. case CM_DREQ_ATTR_ID:
  3578. event = IB_CM_DREQ_RECEIVED;
  3579. break;
  3580. case CM_DREP_ATTR_ID:
  3581. event = IB_CM_DREP_RECEIVED;
  3582. break;
  3583. case CM_SIDR_REQ_ATTR_ID:
  3584. event = IB_CM_SIDR_REQ_RECEIVED;
  3585. break;
  3586. case CM_SIDR_REP_ATTR_ID:
  3587. event = IB_CM_SIDR_REP_RECEIVED;
  3588. break;
  3589. case CM_LAP_ATTR_ID:
  3590. paths = 1;
  3591. event = IB_CM_LAP_RECEIVED;
  3592. break;
  3593. case CM_APR_ATTR_ID:
  3594. event = IB_CM_APR_RECEIVED;
  3595. break;
  3596. default:
  3597. ib_free_recv_mad(mad_recv_wc);
  3598. return;
  3599. }
  3600. attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
  3601. atomic_long_inc(&port->counter_group[CM_RECV].
  3602. counter[attr_id - CM_ATTR_ID_OFFSET]);
  3603. work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
  3604. if (!work) {
  3605. ib_free_recv_mad(mad_recv_wc);
  3606. return;
  3607. }
  3608. INIT_DELAYED_WORK(&work->work, cm_work_handler);
  3609. work->cm_event.event = event;
  3610. work->mad_recv_wc = mad_recv_wc;
  3611. work->port = port;
  3612. /* Check if the device started its remove_one */
  3613. spin_lock_irq(&cm.lock);
  3614. if (!port->cm_dev->going_down)
  3615. queue_delayed_work(cm.wq, &work->work, 0);
  3616. else
  3617. going_down = 1;
  3618. spin_unlock_irq(&cm.lock);
  3619. if (going_down) {
  3620. kfree(work);
  3621. ib_free_recv_mad(mad_recv_wc);
  3622. }
  3623. }
  3624. static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
  3625. struct ib_qp_attr *qp_attr,
  3626. int *qp_attr_mask)
  3627. {
  3628. unsigned long flags;
  3629. int ret;
  3630. spin_lock_irqsave(&cm_id_priv->lock, flags);
  3631. switch (cm_id_priv->id.state) {
  3632. case IB_CM_REQ_SENT:
  3633. case IB_CM_MRA_REQ_RCVD:
  3634. case IB_CM_REQ_RCVD:
  3635. case IB_CM_MRA_REQ_SENT:
  3636. case IB_CM_REP_RCVD:
  3637. case IB_CM_MRA_REP_SENT:
  3638. case IB_CM_REP_SENT:
  3639. case IB_CM_MRA_REP_RCVD:
  3640. case IB_CM_ESTABLISHED:
  3641. *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
  3642. IB_QP_PKEY_INDEX | IB_QP_PORT;
  3643. qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
  3644. if (cm_id_priv->responder_resources)
  3645. qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
  3646. IB_ACCESS_REMOTE_ATOMIC;
  3647. qp_attr->pkey_index = cm_id_priv->av.pkey_index;
  3648. qp_attr->port_num = cm_id_priv->av.port->port_num;
  3649. ret = 0;
  3650. break;
  3651. default:
  3652. trace_icm_qp_init_err(&cm_id_priv->id);
  3653. ret = -EINVAL;
  3654. break;
  3655. }
  3656. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  3657. return ret;
  3658. }
  3659. static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
  3660. struct ib_qp_attr *qp_attr,
  3661. int *qp_attr_mask)
  3662. {
  3663. unsigned long flags;
  3664. int ret;
  3665. spin_lock_irqsave(&cm_id_priv->lock, flags);
  3666. switch (cm_id_priv->id.state) {
  3667. case IB_CM_REQ_RCVD:
  3668. case IB_CM_MRA_REQ_SENT:
  3669. case IB_CM_REP_RCVD:
  3670. case IB_CM_MRA_REP_SENT:
  3671. case IB_CM_REP_SENT:
  3672. case IB_CM_MRA_REP_RCVD:
  3673. case IB_CM_ESTABLISHED:
  3674. *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
  3675. IB_QP_DEST_QPN | IB_QP_RQ_PSN;
  3676. qp_attr->ah_attr = cm_id_priv->av.ah_attr;
  3677. qp_attr->path_mtu = cm_id_priv->path_mtu;
  3678. qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
  3679. qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
  3680. if (cm_id_priv->qp_type == IB_QPT_RC ||
  3681. cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
  3682. *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
  3683. IB_QP_MIN_RNR_TIMER;
  3684. qp_attr->max_dest_rd_atomic =
  3685. cm_id_priv->responder_resources;
  3686. qp_attr->min_rnr_timer = 0;
  3687. }
  3688. if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
  3689. *qp_attr_mask |= IB_QP_ALT_PATH;
  3690. qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
  3691. qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
  3692. qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
  3693. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  3694. }
  3695. ret = 0;
  3696. break;
  3697. default:
  3698. trace_icm_qp_rtr_err(&cm_id_priv->id);
  3699. ret = -EINVAL;
  3700. break;
  3701. }
  3702. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  3703. return ret;
  3704. }
  3705. static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
  3706. struct ib_qp_attr *qp_attr,
  3707. int *qp_attr_mask)
  3708. {
  3709. unsigned long flags;
  3710. int ret;
  3711. spin_lock_irqsave(&cm_id_priv->lock, flags);
  3712. switch (cm_id_priv->id.state) {
  3713. /* Allow transition to RTS before sending REP */
  3714. case IB_CM_REQ_RCVD:
  3715. case IB_CM_MRA_REQ_SENT:
  3716. case IB_CM_REP_RCVD:
  3717. case IB_CM_MRA_REP_SENT:
  3718. case IB_CM_REP_SENT:
  3719. case IB_CM_MRA_REP_RCVD:
  3720. case IB_CM_ESTABLISHED:
  3721. if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
  3722. *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
  3723. qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
  3724. switch (cm_id_priv->qp_type) {
  3725. case IB_QPT_RC:
  3726. case IB_QPT_XRC_INI:
  3727. *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
  3728. IB_QP_MAX_QP_RD_ATOMIC;
  3729. qp_attr->retry_cnt = cm_id_priv->retry_count;
  3730. qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
  3731. qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
  3732. fallthrough;
  3733. case IB_QPT_XRC_TGT:
  3734. *qp_attr_mask |= IB_QP_TIMEOUT;
  3735. qp_attr->timeout = cm_id_priv->av.timeout;
  3736. break;
  3737. default:
  3738. break;
  3739. }
  3740. if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
  3741. *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
  3742. qp_attr->path_mig_state = IB_MIG_REARM;
  3743. }
  3744. } else {
  3745. *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
  3746. qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
  3747. qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
  3748. qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
  3749. qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
  3750. qp_attr->path_mig_state = IB_MIG_REARM;
  3751. }
  3752. ret = 0;
  3753. break;
  3754. default:
  3755. trace_icm_qp_rts_err(&cm_id_priv->id);
  3756. ret = -EINVAL;
  3757. break;
  3758. }
  3759. spin_unlock_irqrestore(&cm_id_priv->lock, flags);
  3760. return ret;
  3761. }
  3762. int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
  3763. struct ib_qp_attr *qp_attr,
  3764. int *qp_attr_mask)
  3765. {
  3766. struct cm_id_private *cm_id_priv;
  3767. int ret;
  3768. cm_id_priv = container_of(cm_id, struct cm_id_private, id);
  3769. switch (qp_attr->qp_state) {
  3770. case IB_QPS_INIT:
  3771. ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
  3772. break;
  3773. case IB_QPS_RTR:
  3774. ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
  3775. break;
  3776. case IB_QPS_RTS:
  3777. ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
  3778. break;
  3779. default:
  3780. ret = -EINVAL;
  3781. break;
  3782. }
  3783. return ret;
  3784. }
  3785. EXPORT_SYMBOL(ib_cm_init_qp_attr);
  3786. static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
  3787. char *buf)
  3788. {
  3789. struct cm_counter_group *group;
  3790. struct cm_counter_attribute *cm_attr;
  3791. group = container_of(obj, struct cm_counter_group, obj);
  3792. cm_attr = container_of(attr, struct cm_counter_attribute, attr);
  3793. return sprintf(buf, "%ld\n",
  3794. atomic_long_read(&group->counter[cm_attr->index]));
  3795. }
  3796. static const struct sysfs_ops cm_counter_ops = {
  3797. .show = cm_show_counter
  3798. };
  3799. static struct kobj_type cm_counter_obj_type = {
  3800. .sysfs_ops = &cm_counter_ops,
  3801. .default_attrs = cm_counter_default_attrs
  3802. };
  3803. static int cm_create_port_fs(struct cm_port *port)
  3804. {
  3805. int i, ret;
  3806. for (i = 0; i < CM_COUNTER_GROUPS; i++) {
  3807. ret = ib_port_register_module_stat(port->cm_dev->ib_device,
  3808. port->port_num,
  3809. &port->counter_group[i].obj,
  3810. &cm_counter_obj_type,
  3811. counter_group_names[i]);
  3812. if (ret)
  3813. goto error;
  3814. }
  3815. return 0;
  3816. error:
  3817. while (i--)
  3818. ib_port_unregister_module_stat(&port->counter_group[i].obj);
  3819. return ret;
  3820. }
  3821. static void cm_remove_port_fs(struct cm_port *port)
  3822. {
  3823. int i;
  3824. for (i = 0; i < CM_COUNTER_GROUPS; i++)
  3825. ib_port_unregister_module_stat(&port->counter_group[i].obj);
  3826. }
  3827. static int cm_add_one(struct ib_device *ib_device)
  3828. {
  3829. struct cm_device *cm_dev;
  3830. struct cm_port *port;
  3831. struct ib_mad_reg_req reg_req = {
  3832. .mgmt_class = IB_MGMT_CLASS_CM,
  3833. .mgmt_class_version = IB_CM_CLASS_VERSION,
  3834. };
  3835. struct ib_port_modify port_modify = {
  3836. .set_port_cap_mask = IB_PORT_CM_SUP
  3837. };
  3838. unsigned long flags;
  3839. int ret;
  3840. int count = 0;
  3841. unsigned int i;
  3842. cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
  3843. GFP_KERNEL);
  3844. if (!cm_dev)
  3845. return -ENOMEM;
  3846. cm_dev->ib_device = ib_device;
  3847. cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
  3848. cm_dev->going_down = 0;
  3849. set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
  3850. rdma_for_each_port (ib_device, i) {
  3851. if (!rdma_cap_ib_cm(ib_device, i))
  3852. continue;
  3853. port = kzalloc(sizeof *port, GFP_KERNEL);
  3854. if (!port) {
  3855. ret = -ENOMEM;
  3856. goto error1;
  3857. }
  3858. cm_dev->port[i-1] = port;
  3859. port->cm_dev = cm_dev;
  3860. port->port_num = i;
  3861. INIT_LIST_HEAD(&port->cm_priv_prim_list);
  3862. INIT_LIST_HEAD(&port->cm_priv_altr_list);
  3863. ret = cm_create_port_fs(port);
  3864. if (ret)
  3865. goto error1;
  3866. port->mad_agent = ib_register_mad_agent(ib_device, i,
  3867. IB_QPT_GSI,
  3868. &reg_req,
  3869. 0,
  3870. cm_send_handler,
  3871. cm_recv_handler,
  3872. port,
  3873. 0);
  3874. if (IS_ERR(port->mad_agent)) {
  3875. ret = PTR_ERR(port->mad_agent);
  3876. goto error2;
  3877. }
  3878. ret = ib_modify_port(ib_device, i, 0, &port_modify);
  3879. if (ret)
  3880. goto error3;
  3881. count++;
  3882. }
  3883. if (!count) {
  3884. ret = -EOPNOTSUPP;
  3885. goto free;
  3886. }
  3887. ib_set_client_data(ib_device, &cm_client, cm_dev);
  3888. write_lock_irqsave(&cm.device_lock, flags);
  3889. list_add_tail(&cm_dev->list, &cm.device_list);
  3890. write_unlock_irqrestore(&cm.device_lock, flags);
  3891. return 0;
  3892. error3:
  3893. ib_unregister_mad_agent(port->mad_agent);
  3894. error2:
  3895. cm_remove_port_fs(port);
  3896. error1:
  3897. port_modify.set_port_cap_mask = 0;
  3898. port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
  3899. kfree(port);
  3900. while (--i) {
  3901. if (!rdma_cap_ib_cm(ib_device, i))
  3902. continue;
  3903. port = cm_dev->port[i-1];
  3904. ib_modify_port(ib_device, port->port_num, 0, &port_modify);
  3905. ib_unregister_mad_agent(port->mad_agent);
  3906. cm_remove_port_fs(port);
  3907. kfree(port);
  3908. }
  3909. free:
  3910. kfree(cm_dev);
  3911. return ret;
  3912. }
  3913. static void cm_remove_one(struct ib_device *ib_device, void *client_data)
  3914. {
  3915. struct cm_device *cm_dev = client_data;
  3916. struct cm_port *port;
  3917. struct cm_id_private *cm_id_priv;
  3918. struct ib_mad_agent *cur_mad_agent;
  3919. struct ib_port_modify port_modify = {
  3920. .clr_port_cap_mask = IB_PORT_CM_SUP
  3921. };
  3922. unsigned long flags;
  3923. unsigned int i;
  3924. write_lock_irqsave(&cm.device_lock, flags);
  3925. list_del(&cm_dev->list);
  3926. write_unlock_irqrestore(&cm.device_lock, flags);
  3927. spin_lock_irq(&cm.lock);
  3928. cm_dev->going_down = 1;
  3929. spin_unlock_irq(&cm.lock);
  3930. rdma_for_each_port (ib_device, i) {
  3931. if (!rdma_cap_ib_cm(ib_device, i))
  3932. continue;
  3933. port = cm_dev->port[i-1];
  3934. ib_modify_port(ib_device, port->port_num, 0, &port_modify);
  3935. /* Mark all the cm_id's as not valid */
  3936. spin_lock_irq(&cm.lock);
  3937. list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
  3938. cm_id_priv->altr_send_port_not_ready = 1;
  3939. list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
  3940. cm_id_priv->prim_send_port_not_ready = 1;
  3941. spin_unlock_irq(&cm.lock);
  3942. /*
  3943. * We flush the queue here after the going_down set, this
  3944. * verify that no new works will be queued in the recv handler,
  3945. * after that we can call the unregister_mad_agent
  3946. */
  3947. flush_workqueue(cm.wq);
  3948. spin_lock_irq(&cm.state_lock);
  3949. cur_mad_agent = port->mad_agent;
  3950. port->mad_agent = NULL;
  3951. spin_unlock_irq(&cm.state_lock);
  3952. ib_unregister_mad_agent(cur_mad_agent);
  3953. cm_remove_port_fs(port);
  3954. kfree(port);
  3955. }
  3956. kfree(cm_dev);
  3957. }
  3958. static int __init ib_cm_init(void)
  3959. {
  3960. int ret;
  3961. INIT_LIST_HEAD(&cm.device_list);
  3962. rwlock_init(&cm.device_lock);
  3963. spin_lock_init(&cm.lock);
  3964. spin_lock_init(&cm.state_lock);
  3965. cm.listen_service_table = RB_ROOT;
  3966. cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
  3967. cm.remote_id_table = RB_ROOT;
  3968. cm.remote_qp_table = RB_ROOT;
  3969. cm.remote_sidr_table = RB_ROOT;
  3970. xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
  3971. get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
  3972. INIT_LIST_HEAD(&cm.timewait_list);
  3973. cm.wq = alloc_workqueue("ib_cm", 0, 1);
  3974. if (!cm.wq) {
  3975. ret = -ENOMEM;
  3976. goto error2;
  3977. }
  3978. ret = ib_register_client(&cm_client);
  3979. if (ret)
  3980. goto error3;
  3981. return 0;
  3982. error3:
  3983. destroy_workqueue(cm.wq);
  3984. error2:
  3985. return ret;
  3986. }
  3987. static void __exit ib_cm_cleanup(void)
  3988. {
  3989. struct cm_timewait_info *timewait_info, *tmp;
  3990. spin_lock_irq(&cm.lock);
  3991. list_for_each_entry(timewait_info, &cm.timewait_list, list)
  3992. cancel_delayed_work(&timewait_info->work.work);
  3993. spin_unlock_irq(&cm.lock);
  3994. ib_unregister_client(&cm_client);
  3995. destroy_workqueue(cm.wq);
  3996. list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
  3997. list_del(&timewait_info->list);
  3998. kfree(timewait_info);
  3999. }
  4000. WARN_ON(!xa_empty(&cm.local_id_table));
  4001. }
  4002. module_init(ib_cm_init);
  4003. module_exit(ib_cm_cleanup);