fc.c 78 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/blk-mq.h>
  9. #include <linux/parser.h>
  10. #include <linux/random.h>
  11. #include <uapi/scsi/fc/fc_fs.h>
  12. #include <uapi/scsi/fc/fc_els.h>
  13. #include "nvmet.h"
  14. #include <linux/nvme-fc-driver.h>
  15. #include <linux/nvme-fc.h>
  16. #include "../host/fc.h"
  17. /* *************************** Data Structures/Defines ****************** */
  18. #define NVMET_LS_CTX_COUNT 256
  19. struct nvmet_fc_tgtport;
  20. struct nvmet_fc_tgt_assoc;
  21. struct nvmet_fc_ls_iod { /* for an LS RQST RCV */
  22. struct nvmefc_ls_rsp *lsrsp;
  23. struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
  24. struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
  25. struct nvmet_fc_tgtport *tgtport;
  26. struct nvmet_fc_tgt_assoc *assoc;
  27. void *hosthandle;
  28. union nvmefc_ls_requests *rqstbuf;
  29. union nvmefc_ls_responses *rspbuf;
  30. u16 rqstdatalen;
  31. dma_addr_t rspdma;
  32. struct scatterlist sg[2];
  33. struct work_struct work;
  34. } __aligned(sizeof(unsigned long long));
  35. struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
  36. struct nvmefc_ls_req ls_req;
  37. struct nvmet_fc_tgtport *tgtport;
  38. void *hosthandle;
  39. int ls_error;
  40. struct list_head lsreq_list; /* tgtport->ls_req_list */
  41. bool req_queued;
  42. };
  43. /* desired maximum for a single sequence - if sg list allows it */
  44. #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
  45. enum nvmet_fcp_datadir {
  46. NVMET_FCP_NODATA,
  47. NVMET_FCP_WRITE,
  48. NVMET_FCP_READ,
  49. NVMET_FCP_ABORTED,
  50. };
  51. struct nvmet_fc_fcp_iod {
  52. struct nvmefc_tgt_fcp_req *fcpreq;
  53. struct nvme_fc_cmd_iu cmdiubuf;
  54. struct nvme_fc_ersp_iu rspiubuf;
  55. dma_addr_t rspdma;
  56. struct scatterlist *next_sg;
  57. struct scatterlist *data_sg;
  58. int data_sg_cnt;
  59. u32 offset;
  60. enum nvmet_fcp_datadir io_dir;
  61. bool active;
  62. bool abort;
  63. bool aborted;
  64. bool writedataactive;
  65. spinlock_t flock;
  66. struct nvmet_req req;
  67. struct work_struct defer_work;
  68. struct nvmet_fc_tgtport *tgtport;
  69. struct nvmet_fc_tgt_queue *queue;
  70. struct list_head fcp_list; /* tgtport->fcp_list */
  71. };
  72. struct nvmet_fc_tgtport {
  73. struct nvmet_fc_target_port fc_target_port;
  74. struct list_head tgt_list; /* nvmet_fc_target_list */
  75. struct device *dev; /* dev for dma mapping */
  76. struct nvmet_fc_target_template *ops;
  77. struct nvmet_fc_ls_iod *iod;
  78. spinlock_t lock;
  79. struct list_head ls_rcv_list;
  80. struct list_head ls_req_list;
  81. struct list_head ls_busylist;
  82. struct list_head assoc_list;
  83. struct list_head host_list;
  84. struct ida assoc_cnt;
  85. struct nvmet_fc_port_entry *pe;
  86. struct kref ref;
  87. u32 max_sg_cnt;
  88. };
  89. struct nvmet_fc_port_entry {
  90. struct nvmet_fc_tgtport *tgtport;
  91. struct nvmet_port *port;
  92. u64 node_name;
  93. u64 port_name;
  94. struct list_head pe_list;
  95. };
  96. struct nvmet_fc_defer_fcp_req {
  97. struct list_head req_list;
  98. struct nvmefc_tgt_fcp_req *fcp_req;
  99. };
  100. struct nvmet_fc_tgt_queue {
  101. bool ninetypercent;
  102. u16 qid;
  103. u16 sqsize;
  104. u16 ersp_ratio;
  105. __le16 sqhd;
  106. atomic_t connected;
  107. atomic_t sqtail;
  108. atomic_t zrspcnt;
  109. atomic_t rsn;
  110. spinlock_t qlock;
  111. struct nvmet_cq nvme_cq;
  112. struct nvmet_sq nvme_sq;
  113. struct nvmet_fc_tgt_assoc *assoc;
  114. struct list_head fod_list;
  115. struct list_head pending_cmd_list;
  116. struct list_head avail_defer_list;
  117. struct workqueue_struct *work_q;
  118. struct kref ref;
  119. struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
  120. } __aligned(sizeof(unsigned long long));
  121. struct nvmet_fc_hostport {
  122. struct nvmet_fc_tgtport *tgtport;
  123. void *hosthandle;
  124. struct list_head host_list;
  125. struct kref ref;
  126. u8 invalid;
  127. };
  128. struct nvmet_fc_tgt_assoc {
  129. u64 association_id;
  130. u32 a_id;
  131. atomic_t terminating;
  132. struct nvmet_fc_tgtport *tgtport;
  133. struct nvmet_fc_hostport *hostport;
  134. struct nvmet_fc_ls_iod *rcv_disconn;
  135. struct list_head a_list;
  136. struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
  137. struct kref ref;
  138. struct work_struct del_work;
  139. };
  140. static inline int
  141. nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
  142. {
  143. return (iodptr - iodptr->tgtport->iod);
  144. }
  145. static inline int
  146. nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
  147. {
  148. return (fodptr - fodptr->queue->fod);
  149. }
  150. /*
  151. * Association and Connection IDs:
  152. *
  153. * Association ID will have random number in upper 6 bytes and zero
  154. * in lower 2 bytes
  155. *
  156. * Connection IDs will be Association ID with QID or'd in lower 2 bytes
  157. *
  158. * note: Association ID = Connection ID for queue 0
  159. */
  160. #define BYTES_FOR_QID sizeof(u16)
  161. #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
  162. #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
  163. static inline u64
  164. nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
  165. {
  166. return (assoc->association_id | qid);
  167. }
  168. static inline u64
  169. nvmet_fc_getassociationid(u64 connectionid)
  170. {
  171. return connectionid & ~NVMET_FC_QUEUEID_MASK;
  172. }
  173. static inline u16
  174. nvmet_fc_getqueueid(u64 connectionid)
  175. {
  176. return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
  177. }
  178. static inline struct nvmet_fc_tgtport *
  179. targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
  180. {
  181. return container_of(targetport, struct nvmet_fc_tgtport,
  182. fc_target_port);
  183. }
  184. static inline struct nvmet_fc_fcp_iod *
  185. nvmet_req_to_fod(struct nvmet_req *nvme_req)
  186. {
  187. return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
  188. }
  189. /* *************************** Globals **************************** */
  190. static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
  191. static LIST_HEAD(nvmet_fc_target_list);
  192. static DEFINE_IDA(nvmet_fc_tgtport_cnt);
  193. static LIST_HEAD(nvmet_fc_portentry_list);
  194. static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
  195. static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
  196. static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
  197. static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
  198. static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
  199. static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
  200. static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
  201. static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
  202. static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
  203. struct nvmet_fc_fcp_iod *fod);
  204. static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
  205. static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
  206. struct nvmet_fc_ls_iod *iod);
  207. /* *********************** FC-NVME DMA Handling **************************** */
  208. /*
  209. * The fcloop device passes in a NULL device pointer. Real LLD's will
  210. * pass in a valid device pointer. If NULL is passed to the dma mapping
  211. * routines, depending on the platform, it may or may not succeed, and
  212. * may crash.
  213. *
  214. * As such:
  215. * Wrapper all the dma routines and check the dev pointer.
  216. *
  217. * If simple mappings (return just a dma address, we'll noop them,
  218. * returning a dma address of 0.
  219. *
  220. * On more complex mappings (dma_map_sg), a pseudo routine fills
  221. * in the scatter list, setting all dma addresses to 0.
  222. */
  223. static inline dma_addr_t
  224. fc_dma_map_single(struct device *dev, void *ptr, size_t size,
  225. enum dma_data_direction dir)
  226. {
  227. return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
  228. }
  229. static inline int
  230. fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  231. {
  232. return dev ? dma_mapping_error(dev, dma_addr) : 0;
  233. }
  234. static inline void
  235. fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
  236. enum dma_data_direction dir)
  237. {
  238. if (dev)
  239. dma_unmap_single(dev, addr, size, dir);
  240. }
  241. static inline void
  242. fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  243. enum dma_data_direction dir)
  244. {
  245. if (dev)
  246. dma_sync_single_for_cpu(dev, addr, size, dir);
  247. }
  248. static inline void
  249. fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
  250. enum dma_data_direction dir)
  251. {
  252. if (dev)
  253. dma_sync_single_for_device(dev, addr, size, dir);
  254. }
  255. /* pseudo dma_map_sg call */
  256. static int
  257. fc_map_sg(struct scatterlist *sg, int nents)
  258. {
  259. struct scatterlist *s;
  260. int i;
  261. WARN_ON(nents == 0 || sg[0].length == 0);
  262. for_each_sg(sg, s, nents, i) {
  263. s->dma_address = 0L;
  264. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  265. s->dma_length = s->length;
  266. #endif
  267. }
  268. return nents;
  269. }
  270. static inline int
  271. fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  272. enum dma_data_direction dir)
  273. {
  274. return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
  275. }
  276. static inline void
  277. fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  278. enum dma_data_direction dir)
  279. {
  280. if (dev)
  281. dma_unmap_sg(dev, sg, nents, dir);
  282. }
  283. /* ********************** FC-NVME LS XMT Handling ************************* */
  284. static void
  285. __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
  286. {
  287. struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
  288. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  289. unsigned long flags;
  290. spin_lock_irqsave(&tgtport->lock, flags);
  291. if (!lsop->req_queued) {
  292. spin_unlock_irqrestore(&tgtport->lock, flags);
  293. return;
  294. }
  295. list_del(&lsop->lsreq_list);
  296. lsop->req_queued = false;
  297. spin_unlock_irqrestore(&tgtport->lock, flags);
  298. fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
  299. (lsreq->rqstlen + lsreq->rsplen),
  300. DMA_BIDIRECTIONAL);
  301. nvmet_fc_tgtport_put(tgtport);
  302. }
  303. static int
  304. __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
  305. struct nvmet_fc_ls_req_op *lsop,
  306. void (*done)(struct nvmefc_ls_req *req, int status))
  307. {
  308. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  309. unsigned long flags;
  310. int ret = 0;
  311. if (!tgtport->ops->ls_req)
  312. return -EOPNOTSUPP;
  313. if (!nvmet_fc_tgtport_get(tgtport))
  314. return -ESHUTDOWN;
  315. lsreq->done = done;
  316. lsop->req_queued = false;
  317. INIT_LIST_HEAD(&lsop->lsreq_list);
  318. lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
  319. lsreq->rqstlen + lsreq->rsplen,
  320. DMA_BIDIRECTIONAL);
  321. if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
  322. ret = -EFAULT;
  323. goto out_puttgtport;
  324. }
  325. lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
  326. spin_lock_irqsave(&tgtport->lock, flags);
  327. list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
  328. lsop->req_queued = true;
  329. spin_unlock_irqrestore(&tgtport->lock, flags);
  330. ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
  331. lsreq);
  332. if (ret)
  333. goto out_unlink;
  334. return 0;
  335. out_unlink:
  336. lsop->ls_error = ret;
  337. spin_lock_irqsave(&tgtport->lock, flags);
  338. lsop->req_queued = false;
  339. list_del(&lsop->lsreq_list);
  340. spin_unlock_irqrestore(&tgtport->lock, flags);
  341. fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
  342. (lsreq->rqstlen + lsreq->rsplen),
  343. DMA_BIDIRECTIONAL);
  344. out_puttgtport:
  345. nvmet_fc_tgtport_put(tgtport);
  346. return ret;
  347. }
  348. static int
  349. nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
  350. struct nvmet_fc_ls_req_op *lsop,
  351. void (*done)(struct nvmefc_ls_req *req, int status))
  352. {
  353. /* don't wait for completion */
  354. return __nvmet_fc_send_ls_req(tgtport, lsop, done);
  355. }
  356. static void
  357. nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
  358. {
  359. struct nvmet_fc_ls_req_op *lsop =
  360. container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
  361. __nvmet_fc_finish_ls_req(lsop);
  362. /* fc-nvme target doesn't care about success or failure of cmd */
  363. kfree(lsop);
  364. }
  365. /*
  366. * This routine sends a FC-NVME LS to disconnect (aka terminate)
  367. * the FC-NVME Association. Terminating the association also
  368. * terminates the FC-NVME connections (per queue, both admin and io
  369. * queues) that are part of the association. E.g. things are torn
  370. * down, and the related FC-NVME Association ID and Connection IDs
  371. * become invalid.
  372. *
  373. * The behavior of the fc-nvme target is such that it's
  374. * understanding of the association and connections will implicitly
  375. * be torn down. The action is implicit as it may be due to a loss of
  376. * connectivity with the fc-nvme host, so the target may never get a
  377. * response even if it tried. As such, the action of this routine
  378. * is to asynchronously send the LS, ignore any results of the LS, and
  379. * continue on with terminating the association. If the fc-nvme host
  380. * is present and receives the LS, it too can tear down.
  381. */
  382. static void
  383. nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
  384. {
  385. struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  386. struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
  387. struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
  388. struct nvmet_fc_ls_req_op *lsop;
  389. struct nvmefc_ls_req *lsreq;
  390. int ret;
  391. /*
  392. * If ls_req is NULL or no hosthandle, it's an older lldd and no
  393. * message is normal. Otherwise, send unless the hostport has
  394. * already been invalidated by the lldd.
  395. */
  396. if (!tgtport->ops->ls_req || !assoc->hostport ||
  397. assoc->hostport->invalid)
  398. return;
  399. lsop = kzalloc((sizeof(*lsop) +
  400. sizeof(*discon_rqst) + sizeof(*discon_acc) +
  401. tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
  402. if (!lsop) {
  403. dev_info(tgtport->dev,
  404. "{%d:%d} send Disconnect Association failed: ENOMEM\n",
  405. tgtport->fc_target_port.port_num, assoc->a_id);
  406. return;
  407. }
  408. discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
  409. discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
  410. lsreq = &lsop->ls_req;
  411. if (tgtport->ops->lsrqst_priv_sz)
  412. lsreq->private = (void *)&discon_acc[1];
  413. else
  414. lsreq->private = NULL;
  415. lsop->tgtport = tgtport;
  416. lsop->hosthandle = assoc->hostport->hosthandle;
  417. nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
  418. assoc->association_id);
  419. ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
  420. nvmet_fc_disconnect_assoc_done);
  421. if (ret) {
  422. dev_info(tgtport->dev,
  423. "{%d:%d} XMT Disconnect Association failed: %d\n",
  424. tgtport->fc_target_port.port_num, assoc->a_id, ret);
  425. kfree(lsop);
  426. }
  427. }
  428. /* *********************** FC-NVME Port Management ************************ */
  429. static int
  430. nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
  431. {
  432. struct nvmet_fc_ls_iod *iod;
  433. int i;
  434. iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
  435. GFP_KERNEL);
  436. if (!iod)
  437. return -ENOMEM;
  438. tgtport->iod = iod;
  439. for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
  440. INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
  441. iod->tgtport = tgtport;
  442. list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
  443. iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
  444. sizeof(union nvmefc_ls_responses),
  445. GFP_KERNEL);
  446. if (!iod->rqstbuf)
  447. goto out_fail;
  448. iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
  449. iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
  450. sizeof(*iod->rspbuf),
  451. DMA_TO_DEVICE);
  452. if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
  453. goto out_fail;
  454. }
  455. return 0;
  456. out_fail:
  457. kfree(iod->rqstbuf);
  458. list_del(&iod->ls_rcv_list);
  459. for (iod--, i--; i >= 0; iod--, i--) {
  460. fc_dma_unmap_single(tgtport->dev, iod->rspdma,
  461. sizeof(*iod->rspbuf), DMA_TO_DEVICE);
  462. kfree(iod->rqstbuf);
  463. list_del(&iod->ls_rcv_list);
  464. }
  465. kfree(iod);
  466. return -EFAULT;
  467. }
  468. static void
  469. nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
  470. {
  471. struct nvmet_fc_ls_iod *iod = tgtport->iod;
  472. int i;
  473. for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
  474. fc_dma_unmap_single(tgtport->dev,
  475. iod->rspdma, sizeof(*iod->rspbuf),
  476. DMA_TO_DEVICE);
  477. kfree(iod->rqstbuf);
  478. list_del(&iod->ls_rcv_list);
  479. }
  480. kfree(tgtport->iod);
  481. }
  482. static struct nvmet_fc_ls_iod *
  483. nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
  484. {
  485. struct nvmet_fc_ls_iod *iod;
  486. unsigned long flags;
  487. spin_lock_irqsave(&tgtport->lock, flags);
  488. iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
  489. struct nvmet_fc_ls_iod, ls_rcv_list);
  490. if (iod)
  491. list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
  492. spin_unlock_irqrestore(&tgtport->lock, flags);
  493. return iod;
  494. }
  495. static void
  496. nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
  497. struct nvmet_fc_ls_iod *iod)
  498. {
  499. unsigned long flags;
  500. spin_lock_irqsave(&tgtport->lock, flags);
  501. list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
  502. spin_unlock_irqrestore(&tgtport->lock, flags);
  503. }
  504. static void
  505. nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
  506. struct nvmet_fc_tgt_queue *queue)
  507. {
  508. struct nvmet_fc_fcp_iod *fod = queue->fod;
  509. int i;
  510. for (i = 0; i < queue->sqsize; fod++, i++) {
  511. INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
  512. fod->tgtport = tgtport;
  513. fod->queue = queue;
  514. fod->active = false;
  515. fod->abort = false;
  516. fod->aborted = false;
  517. fod->fcpreq = NULL;
  518. list_add_tail(&fod->fcp_list, &queue->fod_list);
  519. spin_lock_init(&fod->flock);
  520. fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
  521. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  522. if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
  523. list_del(&fod->fcp_list);
  524. for (fod--, i--; i >= 0; fod--, i--) {
  525. fc_dma_unmap_single(tgtport->dev, fod->rspdma,
  526. sizeof(fod->rspiubuf),
  527. DMA_TO_DEVICE);
  528. fod->rspdma = 0L;
  529. list_del(&fod->fcp_list);
  530. }
  531. return;
  532. }
  533. }
  534. }
  535. static void
  536. nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
  537. struct nvmet_fc_tgt_queue *queue)
  538. {
  539. struct nvmet_fc_fcp_iod *fod = queue->fod;
  540. int i;
  541. for (i = 0; i < queue->sqsize; fod++, i++) {
  542. if (fod->rspdma)
  543. fc_dma_unmap_single(tgtport->dev, fod->rspdma,
  544. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  545. }
  546. }
  547. static struct nvmet_fc_fcp_iod *
  548. nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
  549. {
  550. struct nvmet_fc_fcp_iod *fod;
  551. lockdep_assert_held(&queue->qlock);
  552. fod = list_first_entry_or_null(&queue->fod_list,
  553. struct nvmet_fc_fcp_iod, fcp_list);
  554. if (fod) {
  555. list_del(&fod->fcp_list);
  556. fod->active = true;
  557. /*
  558. * no queue reference is taken, as it was taken by the
  559. * queue lookup just prior to the allocation. The iod
  560. * will "inherit" that reference.
  561. */
  562. }
  563. return fod;
  564. }
  565. static void
  566. nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
  567. struct nvmet_fc_tgt_queue *queue,
  568. struct nvmefc_tgt_fcp_req *fcpreq)
  569. {
  570. struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
  571. /*
  572. * put all admin cmds on hw queue id 0. All io commands go to
  573. * the respective hw queue based on a modulo basis
  574. */
  575. fcpreq->hwqid = queue->qid ?
  576. ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
  577. nvmet_fc_handle_fcp_rqst(tgtport, fod);
  578. }
  579. static void
  580. nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
  581. {
  582. struct nvmet_fc_fcp_iod *fod =
  583. container_of(work, struct nvmet_fc_fcp_iod, defer_work);
  584. /* Submit deferred IO for processing */
  585. nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
  586. }
  587. static void
  588. nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
  589. struct nvmet_fc_fcp_iod *fod)
  590. {
  591. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  592. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  593. struct nvmet_fc_defer_fcp_req *deferfcp;
  594. unsigned long flags;
  595. fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
  596. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  597. fcpreq->nvmet_fc_private = NULL;
  598. fod->active = false;
  599. fod->abort = false;
  600. fod->aborted = false;
  601. fod->writedataactive = false;
  602. fod->fcpreq = NULL;
  603. tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
  604. /* release the queue lookup reference on the completed IO */
  605. nvmet_fc_tgt_q_put(queue);
  606. spin_lock_irqsave(&queue->qlock, flags);
  607. deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
  608. struct nvmet_fc_defer_fcp_req, req_list);
  609. if (!deferfcp) {
  610. list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
  611. spin_unlock_irqrestore(&queue->qlock, flags);
  612. return;
  613. }
  614. /* Re-use the fod for the next pending cmd that was deferred */
  615. list_del(&deferfcp->req_list);
  616. fcpreq = deferfcp->fcp_req;
  617. /* deferfcp can be reused for another IO at a later date */
  618. list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
  619. spin_unlock_irqrestore(&queue->qlock, flags);
  620. /* Save NVME CMD IO in fod */
  621. memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
  622. /* Setup new fcpreq to be processed */
  623. fcpreq->rspaddr = NULL;
  624. fcpreq->rsplen = 0;
  625. fcpreq->nvmet_fc_private = fod;
  626. fod->fcpreq = fcpreq;
  627. fod->active = true;
  628. /* inform LLDD IO is now being processed */
  629. tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
  630. /*
  631. * Leave the queue lookup get reference taken when
  632. * fod was originally allocated.
  633. */
  634. queue_work(queue->work_q, &fod->defer_work);
  635. }
  636. static struct nvmet_fc_tgt_queue *
  637. nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
  638. u16 qid, u16 sqsize)
  639. {
  640. struct nvmet_fc_tgt_queue *queue;
  641. unsigned long flags;
  642. int ret;
  643. if (qid > NVMET_NR_QUEUES)
  644. return NULL;
  645. queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
  646. if (!queue)
  647. return NULL;
  648. if (!nvmet_fc_tgt_a_get(assoc))
  649. goto out_free_queue;
  650. queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
  651. assoc->tgtport->fc_target_port.port_num,
  652. assoc->a_id, qid);
  653. if (!queue->work_q)
  654. goto out_a_put;
  655. queue->qid = qid;
  656. queue->sqsize = sqsize;
  657. queue->assoc = assoc;
  658. INIT_LIST_HEAD(&queue->fod_list);
  659. INIT_LIST_HEAD(&queue->avail_defer_list);
  660. INIT_LIST_HEAD(&queue->pending_cmd_list);
  661. atomic_set(&queue->connected, 0);
  662. atomic_set(&queue->sqtail, 0);
  663. atomic_set(&queue->rsn, 1);
  664. atomic_set(&queue->zrspcnt, 0);
  665. spin_lock_init(&queue->qlock);
  666. kref_init(&queue->ref);
  667. nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
  668. ret = nvmet_sq_init(&queue->nvme_sq);
  669. if (ret)
  670. goto out_fail_iodlist;
  671. WARN_ON(assoc->queues[qid]);
  672. spin_lock_irqsave(&assoc->tgtport->lock, flags);
  673. assoc->queues[qid] = queue;
  674. spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
  675. return queue;
  676. out_fail_iodlist:
  677. nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
  678. destroy_workqueue(queue->work_q);
  679. out_a_put:
  680. nvmet_fc_tgt_a_put(assoc);
  681. out_free_queue:
  682. kfree(queue);
  683. return NULL;
  684. }
  685. static void
  686. nvmet_fc_tgt_queue_free(struct kref *ref)
  687. {
  688. struct nvmet_fc_tgt_queue *queue =
  689. container_of(ref, struct nvmet_fc_tgt_queue, ref);
  690. unsigned long flags;
  691. spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
  692. queue->assoc->queues[queue->qid] = NULL;
  693. spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
  694. nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
  695. nvmet_fc_tgt_a_put(queue->assoc);
  696. destroy_workqueue(queue->work_q);
  697. kfree(queue);
  698. }
  699. static void
  700. nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
  701. {
  702. kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
  703. }
  704. static int
  705. nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
  706. {
  707. return kref_get_unless_zero(&queue->ref);
  708. }
  709. static void
  710. nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
  711. {
  712. struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
  713. struct nvmet_fc_fcp_iod *fod = queue->fod;
  714. struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
  715. unsigned long flags;
  716. int i;
  717. bool disconnect;
  718. disconnect = atomic_xchg(&queue->connected, 0);
  719. /* if not connected, nothing to do */
  720. if (!disconnect)
  721. return;
  722. spin_lock_irqsave(&queue->qlock, flags);
  723. /* abort outstanding io's */
  724. for (i = 0; i < queue->sqsize; fod++, i++) {
  725. if (fod->active) {
  726. spin_lock(&fod->flock);
  727. fod->abort = true;
  728. /*
  729. * only call lldd abort routine if waiting for
  730. * writedata. other outstanding ops should finish
  731. * on their own.
  732. */
  733. if (fod->writedataactive) {
  734. fod->aborted = true;
  735. spin_unlock(&fod->flock);
  736. tgtport->ops->fcp_abort(
  737. &tgtport->fc_target_port, fod->fcpreq);
  738. } else
  739. spin_unlock(&fod->flock);
  740. }
  741. }
  742. /* Cleanup defer'ed IOs in queue */
  743. list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
  744. req_list) {
  745. list_del(&deferfcp->req_list);
  746. kfree(deferfcp);
  747. }
  748. for (;;) {
  749. deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
  750. struct nvmet_fc_defer_fcp_req, req_list);
  751. if (!deferfcp)
  752. break;
  753. list_del(&deferfcp->req_list);
  754. spin_unlock_irqrestore(&queue->qlock, flags);
  755. tgtport->ops->defer_rcv(&tgtport->fc_target_port,
  756. deferfcp->fcp_req);
  757. tgtport->ops->fcp_abort(&tgtport->fc_target_port,
  758. deferfcp->fcp_req);
  759. tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
  760. deferfcp->fcp_req);
  761. /* release the queue lookup reference */
  762. nvmet_fc_tgt_q_put(queue);
  763. kfree(deferfcp);
  764. spin_lock_irqsave(&queue->qlock, flags);
  765. }
  766. spin_unlock_irqrestore(&queue->qlock, flags);
  767. flush_workqueue(queue->work_q);
  768. nvmet_sq_destroy(&queue->nvme_sq);
  769. nvmet_fc_tgt_q_put(queue);
  770. }
  771. static struct nvmet_fc_tgt_queue *
  772. nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
  773. u64 connection_id)
  774. {
  775. struct nvmet_fc_tgt_assoc *assoc;
  776. struct nvmet_fc_tgt_queue *queue;
  777. u64 association_id = nvmet_fc_getassociationid(connection_id);
  778. u16 qid = nvmet_fc_getqueueid(connection_id);
  779. unsigned long flags;
  780. if (qid > NVMET_NR_QUEUES)
  781. return NULL;
  782. spin_lock_irqsave(&tgtport->lock, flags);
  783. list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
  784. if (association_id == assoc->association_id) {
  785. queue = assoc->queues[qid];
  786. if (queue &&
  787. (!atomic_read(&queue->connected) ||
  788. !nvmet_fc_tgt_q_get(queue)))
  789. queue = NULL;
  790. spin_unlock_irqrestore(&tgtport->lock, flags);
  791. return queue;
  792. }
  793. }
  794. spin_unlock_irqrestore(&tgtport->lock, flags);
  795. return NULL;
  796. }
  797. static void
  798. nvmet_fc_hostport_free(struct kref *ref)
  799. {
  800. struct nvmet_fc_hostport *hostport =
  801. container_of(ref, struct nvmet_fc_hostport, ref);
  802. struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
  803. unsigned long flags;
  804. spin_lock_irqsave(&tgtport->lock, flags);
  805. list_del(&hostport->host_list);
  806. spin_unlock_irqrestore(&tgtport->lock, flags);
  807. if (tgtport->ops->host_release && hostport->invalid)
  808. tgtport->ops->host_release(hostport->hosthandle);
  809. kfree(hostport);
  810. nvmet_fc_tgtport_put(tgtport);
  811. }
  812. static void
  813. nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
  814. {
  815. kref_put(&hostport->ref, nvmet_fc_hostport_free);
  816. }
  817. static int
  818. nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
  819. {
  820. return kref_get_unless_zero(&hostport->ref);
  821. }
  822. static void
  823. nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
  824. {
  825. /* if LLDD not implemented, leave as NULL */
  826. if (!hostport || !hostport->hosthandle)
  827. return;
  828. nvmet_fc_hostport_put(hostport);
  829. }
  830. static struct nvmet_fc_hostport *
  831. nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
  832. {
  833. struct nvmet_fc_hostport *newhost, *host, *match = NULL;
  834. unsigned long flags;
  835. /* if LLDD not implemented, leave as NULL */
  836. if (!hosthandle)
  837. return NULL;
  838. /* take reference for what will be the newly allocated hostport */
  839. if (!nvmet_fc_tgtport_get(tgtport))
  840. return ERR_PTR(-EINVAL);
  841. newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
  842. if (!newhost) {
  843. spin_lock_irqsave(&tgtport->lock, flags);
  844. list_for_each_entry(host, &tgtport->host_list, host_list) {
  845. if (host->hosthandle == hosthandle && !host->invalid) {
  846. if (nvmet_fc_hostport_get(host)) {
  847. match = host;
  848. break;
  849. }
  850. }
  851. }
  852. spin_unlock_irqrestore(&tgtport->lock, flags);
  853. /* no allocation - release reference */
  854. nvmet_fc_tgtport_put(tgtport);
  855. return (match) ? match : ERR_PTR(-ENOMEM);
  856. }
  857. newhost->tgtport = tgtport;
  858. newhost->hosthandle = hosthandle;
  859. INIT_LIST_HEAD(&newhost->host_list);
  860. kref_init(&newhost->ref);
  861. spin_lock_irqsave(&tgtport->lock, flags);
  862. list_for_each_entry(host, &tgtport->host_list, host_list) {
  863. if (host->hosthandle == hosthandle && !host->invalid) {
  864. if (nvmet_fc_hostport_get(host)) {
  865. match = host;
  866. break;
  867. }
  868. }
  869. }
  870. if (match) {
  871. kfree(newhost);
  872. newhost = NULL;
  873. /* releasing allocation - release reference */
  874. nvmet_fc_tgtport_put(tgtport);
  875. } else
  876. list_add_tail(&newhost->host_list, &tgtport->host_list);
  877. spin_unlock_irqrestore(&tgtport->lock, flags);
  878. return (match) ? match : newhost;
  879. }
  880. static void
  881. nvmet_fc_delete_assoc(struct work_struct *work)
  882. {
  883. struct nvmet_fc_tgt_assoc *assoc =
  884. container_of(work, struct nvmet_fc_tgt_assoc, del_work);
  885. nvmet_fc_delete_target_assoc(assoc);
  886. nvmet_fc_tgt_a_put(assoc);
  887. }
  888. static struct nvmet_fc_tgt_assoc *
  889. nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
  890. {
  891. struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
  892. unsigned long flags;
  893. u64 ran;
  894. int idx;
  895. bool needrandom = true;
  896. assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
  897. if (!assoc)
  898. return NULL;
  899. idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
  900. if (idx < 0)
  901. goto out_free_assoc;
  902. if (!nvmet_fc_tgtport_get(tgtport))
  903. goto out_ida;
  904. assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
  905. if (IS_ERR(assoc->hostport))
  906. goto out_put;
  907. assoc->tgtport = tgtport;
  908. assoc->a_id = idx;
  909. INIT_LIST_HEAD(&assoc->a_list);
  910. kref_init(&assoc->ref);
  911. INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
  912. atomic_set(&assoc->terminating, 0);
  913. while (needrandom) {
  914. get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
  915. ran = ran << BYTES_FOR_QID_SHIFT;
  916. spin_lock_irqsave(&tgtport->lock, flags);
  917. needrandom = false;
  918. list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
  919. if (ran == tmpassoc->association_id) {
  920. needrandom = true;
  921. break;
  922. }
  923. }
  924. if (!needrandom) {
  925. assoc->association_id = ran;
  926. list_add_tail(&assoc->a_list, &tgtport->assoc_list);
  927. }
  928. spin_unlock_irqrestore(&tgtport->lock, flags);
  929. }
  930. return assoc;
  931. out_put:
  932. nvmet_fc_tgtport_put(tgtport);
  933. out_ida:
  934. ida_simple_remove(&tgtport->assoc_cnt, idx);
  935. out_free_assoc:
  936. kfree(assoc);
  937. return NULL;
  938. }
  939. static void
  940. nvmet_fc_target_assoc_free(struct kref *ref)
  941. {
  942. struct nvmet_fc_tgt_assoc *assoc =
  943. container_of(ref, struct nvmet_fc_tgt_assoc, ref);
  944. struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  945. struct nvmet_fc_ls_iod *oldls;
  946. unsigned long flags;
  947. /* Send Disconnect now that all i/o has completed */
  948. nvmet_fc_xmt_disconnect_assoc(assoc);
  949. nvmet_fc_free_hostport(assoc->hostport);
  950. spin_lock_irqsave(&tgtport->lock, flags);
  951. list_del(&assoc->a_list);
  952. oldls = assoc->rcv_disconn;
  953. spin_unlock_irqrestore(&tgtport->lock, flags);
  954. /* if pending Rcv Disconnect Association LS, send rsp now */
  955. if (oldls)
  956. nvmet_fc_xmt_ls_rsp(tgtport, oldls);
  957. ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
  958. dev_info(tgtport->dev,
  959. "{%d:%d} Association freed\n",
  960. tgtport->fc_target_port.port_num, assoc->a_id);
  961. kfree(assoc);
  962. nvmet_fc_tgtport_put(tgtport);
  963. }
  964. static void
  965. nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
  966. {
  967. kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
  968. }
  969. static int
  970. nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
  971. {
  972. return kref_get_unless_zero(&assoc->ref);
  973. }
  974. static void
  975. nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
  976. {
  977. struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  978. struct nvmet_fc_tgt_queue *queue;
  979. unsigned long flags;
  980. int i, terminating;
  981. terminating = atomic_xchg(&assoc->terminating, 1);
  982. /* if already terminating, do nothing */
  983. if (terminating)
  984. return;
  985. spin_lock_irqsave(&tgtport->lock, flags);
  986. for (i = NVMET_NR_QUEUES; i >= 0; i--) {
  987. queue = assoc->queues[i];
  988. if (queue) {
  989. if (!nvmet_fc_tgt_q_get(queue))
  990. continue;
  991. spin_unlock_irqrestore(&tgtport->lock, flags);
  992. nvmet_fc_delete_target_queue(queue);
  993. nvmet_fc_tgt_q_put(queue);
  994. spin_lock_irqsave(&tgtport->lock, flags);
  995. }
  996. }
  997. spin_unlock_irqrestore(&tgtport->lock, flags);
  998. dev_info(tgtport->dev,
  999. "{%d:%d} Association deleted\n",
  1000. tgtport->fc_target_port.port_num, assoc->a_id);
  1001. nvmet_fc_tgt_a_put(assoc);
  1002. }
  1003. static struct nvmet_fc_tgt_assoc *
  1004. nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
  1005. u64 association_id)
  1006. {
  1007. struct nvmet_fc_tgt_assoc *assoc;
  1008. struct nvmet_fc_tgt_assoc *ret = NULL;
  1009. unsigned long flags;
  1010. spin_lock_irqsave(&tgtport->lock, flags);
  1011. list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
  1012. if (association_id == assoc->association_id) {
  1013. ret = assoc;
  1014. if (!nvmet_fc_tgt_a_get(assoc))
  1015. ret = NULL;
  1016. break;
  1017. }
  1018. }
  1019. spin_unlock_irqrestore(&tgtport->lock, flags);
  1020. return ret;
  1021. }
  1022. static void
  1023. nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
  1024. struct nvmet_fc_port_entry *pe,
  1025. struct nvmet_port *port)
  1026. {
  1027. lockdep_assert_held(&nvmet_fc_tgtlock);
  1028. pe->tgtport = tgtport;
  1029. tgtport->pe = pe;
  1030. pe->port = port;
  1031. port->priv = pe;
  1032. pe->node_name = tgtport->fc_target_port.node_name;
  1033. pe->port_name = tgtport->fc_target_port.port_name;
  1034. INIT_LIST_HEAD(&pe->pe_list);
  1035. list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
  1036. }
  1037. static void
  1038. nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
  1039. {
  1040. unsigned long flags;
  1041. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1042. if (pe->tgtport)
  1043. pe->tgtport->pe = NULL;
  1044. list_del(&pe->pe_list);
  1045. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1046. }
  1047. /*
  1048. * called when a targetport deregisters. Breaks the relationship
  1049. * with the nvmet port, but leaves the port_entry in place so that
  1050. * re-registration can resume operation.
  1051. */
  1052. static void
  1053. nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
  1054. {
  1055. struct nvmet_fc_port_entry *pe;
  1056. unsigned long flags;
  1057. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1058. pe = tgtport->pe;
  1059. if (pe)
  1060. pe->tgtport = NULL;
  1061. tgtport->pe = NULL;
  1062. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1063. }
  1064. /*
  1065. * called when a new targetport is registered. Looks in the
  1066. * existing nvmet port_entries to see if the nvmet layer is
  1067. * configured for the targetport's wwn's. (the targetport existed,
  1068. * nvmet configured, the lldd unregistered the tgtport, and is now
  1069. * reregistering the same targetport). If so, set the nvmet port
  1070. * port entry on the targetport.
  1071. */
  1072. static void
  1073. nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
  1074. {
  1075. struct nvmet_fc_port_entry *pe;
  1076. unsigned long flags;
  1077. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1078. list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
  1079. if (tgtport->fc_target_port.node_name == pe->node_name &&
  1080. tgtport->fc_target_port.port_name == pe->port_name) {
  1081. WARN_ON(pe->tgtport);
  1082. tgtport->pe = pe;
  1083. pe->tgtport = tgtport;
  1084. break;
  1085. }
  1086. }
  1087. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1088. }
  1089. /**
  1090. * nvme_fc_register_targetport - transport entry point called by an
  1091. * LLDD to register the existence of a local
  1092. * NVME subystem FC port.
  1093. * @pinfo: pointer to information about the port to be registered
  1094. * @template: LLDD entrypoints and operational parameters for the port
  1095. * @dev: physical hardware device node port corresponds to. Will be
  1096. * used for DMA mappings
  1097. * @portptr: pointer to a local port pointer. Upon success, the routine
  1098. * will allocate a nvme_fc_local_port structure and place its
  1099. * address in the local port pointer. Upon failure, local port
  1100. * pointer will be set to NULL.
  1101. *
  1102. * Returns:
  1103. * a completion status. Must be 0 upon success; a negative errno
  1104. * (ex: -ENXIO) upon failure.
  1105. */
  1106. int
  1107. nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
  1108. struct nvmet_fc_target_template *template,
  1109. struct device *dev,
  1110. struct nvmet_fc_target_port **portptr)
  1111. {
  1112. struct nvmet_fc_tgtport *newrec;
  1113. unsigned long flags;
  1114. int ret, idx;
  1115. if (!template->xmt_ls_rsp || !template->fcp_op ||
  1116. !template->fcp_abort ||
  1117. !template->fcp_req_release || !template->targetport_delete ||
  1118. !template->max_hw_queues || !template->max_sgl_segments ||
  1119. !template->max_dif_sgl_segments || !template->dma_boundary) {
  1120. ret = -EINVAL;
  1121. goto out_regtgt_failed;
  1122. }
  1123. newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
  1124. GFP_KERNEL);
  1125. if (!newrec) {
  1126. ret = -ENOMEM;
  1127. goto out_regtgt_failed;
  1128. }
  1129. idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
  1130. if (idx < 0) {
  1131. ret = -ENOSPC;
  1132. goto out_fail_kfree;
  1133. }
  1134. if (!get_device(dev) && dev) {
  1135. ret = -ENODEV;
  1136. goto out_ida_put;
  1137. }
  1138. newrec->fc_target_port.node_name = pinfo->node_name;
  1139. newrec->fc_target_port.port_name = pinfo->port_name;
  1140. if (template->target_priv_sz)
  1141. newrec->fc_target_port.private = &newrec[1];
  1142. else
  1143. newrec->fc_target_port.private = NULL;
  1144. newrec->fc_target_port.port_id = pinfo->port_id;
  1145. newrec->fc_target_port.port_num = idx;
  1146. INIT_LIST_HEAD(&newrec->tgt_list);
  1147. newrec->dev = dev;
  1148. newrec->ops = template;
  1149. spin_lock_init(&newrec->lock);
  1150. INIT_LIST_HEAD(&newrec->ls_rcv_list);
  1151. INIT_LIST_HEAD(&newrec->ls_req_list);
  1152. INIT_LIST_HEAD(&newrec->ls_busylist);
  1153. INIT_LIST_HEAD(&newrec->assoc_list);
  1154. INIT_LIST_HEAD(&newrec->host_list);
  1155. kref_init(&newrec->ref);
  1156. ida_init(&newrec->assoc_cnt);
  1157. newrec->max_sg_cnt = template->max_sgl_segments;
  1158. ret = nvmet_fc_alloc_ls_iodlist(newrec);
  1159. if (ret) {
  1160. ret = -ENOMEM;
  1161. goto out_free_newrec;
  1162. }
  1163. nvmet_fc_portentry_rebind_tgt(newrec);
  1164. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1165. list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
  1166. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1167. *portptr = &newrec->fc_target_port;
  1168. return 0;
  1169. out_free_newrec:
  1170. put_device(dev);
  1171. out_ida_put:
  1172. ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
  1173. out_fail_kfree:
  1174. kfree(newrec);
  1175. out_regtgt_failed:
  1176. *portptr = NULL;
  1177. return ret;
  1178. }
  1179. EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
  1180. static void
  1181. nvmet_fc_free_tgtport(struct kref *ref)
  1182. {
  1183. struct nvmet_fc_tgtport *tgtport =
  1184. container_of(ref, struct nvmet_fc_tgtport, ref);
  1185. struct device *dev = tgtport->dev;
  1186. unsigned long flags;
  1187. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1188. list_del(&tgtport->tgt_list);
  1189. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1190. nvmet_fc_free_ls_iodlist(tgtport);
  1191. /* let the LLDD know we've finished tearing it down */
  1192. tgtport->ops->targetport_delete(&tgtport->fc_target_port);
  1193. ida_simple_remove(&nvmet_fc_tgtport_cnt,
  1194. tgtport->fc_target_port.port_num);
  1195. ida_destroy(&tgtport->assoc_cnt);
  1196. kfree(tgtport);
  1197. put_device(dev);
  1198. }
  1199. static void
  1200. nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
  1201. {
  1202. kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
  1203. }
  1204. static int
  1205. nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
  1206. {
  1207. return kref_get_unless_zero(&tgtport->ref);
  1208. }
  1209. static void
  1210. __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
  1211. {
  1212. struct nvmet_fc_tgt_assoc *assoc, *next;
  1213. unsigned long flags;
  1214. spin_lock_irqsave(&tgtport->lock, flags);
  1215. list_for_each_entry_safe(assoc, next,
  1216. &tgtport->assoc_list, a_list) {
  1217. if (!nvmet_fc_tgt_a_get(assoc))
  1218. continue;
  1219. if (!schedule_work(&assoc->del_work))
  1220. /* already deleting - release local reference */
  1221. nvmet_fc_tgt_a_put(assoc);
  1222. }
  1223. spin_unlock_irqrestore(&tgtport->lock, flags);
  1224. }
  1225. /**
  1226. * nvmet_fc_invalidate_host - transport entry point called by an LLDD
  1227. * to remove references to a hosthandle for LS's.
  1228. *
  1229. * The nvmet-fc layer ensures that any references to the hosthandle
  1230. * on the targetport are forgotten (set to NULL). The LLDD will
  1231. * typically call this when a login with a remote host port has been
  1232. * lost, thus LS's for the remote host port are no longer possible.
  1233. *
  1234. * If an LS request is outstanding to the targetport/hosthandle (or
  1235. * issued concurrently with the call to invalidate the host), the
  1236. * LLDD is responsible for terminating/aborting the LS and completing
  1237. * the LS request. It is recommended that these terminations/aborts
  1238. * occur after calling to invalidate the host handle to avoid additional
  1239. * retries by the nvmet-fc transport. The nvmet-fc transport may
  1240. * continue to reference host handle while it cleans up outstanding
  1241. * NVME associations. The nvmet-fc transport will call the
  1242. * ops->host_release() callback to notify the LLDD that all references
  1243. * are complete and the related host handle can be recovered.
  1244. * Note: if there are no references, the callback may be called before
  1245. * the invalidate host call returns.
  1246. *
  1247. * @target_port: pointer to the (registered) target port that a prior
  1248. * LS was received on and which supplied the transport the
  1249. * hosthandle.
  1250. * @hosthandle: the handle (pointer) that represents the host port
  1251. * that no longer has connectivity and that LS's should
  1252. * no longer be directed to.
  1253. */
  1254. void
  1255. nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
  1256. void *hosthandle)
  1257. {
  1258. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  1259. struct nvmet_fc_tgt_assoc *assoc, *next;
  1260. unsigned long flags;
  1261. bool noassoc = true;
  1262. spin_lock_irqsave(&tgtport->lock, flags);
  1263. list_for_each_entry_safe(assoc, next,
  1264. &tgtport->assoc_list, a_list) {
  1265. if (!assoc->hostport ||
  1266. assoc->hostport->hosthandle != hosthandle)
  1267. continue;
  1268. if (!nvmet_fc_tgt_a_get(assoc))
  1269. continue;
  1270. assoc->hostport->invalid = 1;
  1271. noassoc = false;
  1272. if (!schedule_work(&assoc->del_work))
  1273. /* already deleting - release local reference */
  1274. nvmet_fc_tgt_a_put(assoc);
  1275. }
  1276. spin_unlock_irqrestore(&tgtport->lock, flags);
  1277. /* if there's nothing to wait for - call the callback */
  1278. if (noassoc && tgtport->ops->host_release)
  1279. tgtport->ops->host_release(hosthandle);
  1280. }
  1281. EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
  1282. /*
  1283. * nvmet layer has called to terminate an association
  1284. */
  1285. static void
  1286. nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
  1287. {
  1288. struct nvmet_fc_tgtport *tgtport, *next;
  1289. struct nvmet_fc_tgt_assoc *assoc;
  1290. struct nvmet_fc_tgt_queue *queue;
  1291. unsigned long flags;
  1292. bool found_ctrl = false;
  1293. /* this is a bit ugly, but don't want to make locks layered */
  1294. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1295. list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
  1296. tgt_list) {
  1297. if (!nvmet_fc_tgtport_get(tgtport))
  1298. continue;
  1299. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1300. spin_lock_irqsave(&tgtport->lock, flags);
  1301. list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
  1302. queue = assoc->queues[0];
  1303. if (queue && queue->nvme_sq.ctrl == ctrl) {
  1304. if (nvmet_fc_tgt_a_get(assoc))
  1305. found_ctrl = true;
  1306. break;
  1307. }
  1308. }
  1309. spin_unlock_irqrestore(&tgtport->lock, flags);
  1310. nvmet_fc_tgtport_put(tgtport);
  1311. if (found_ctrl) {
  1312. if (!schedule_work(&assoc->del_work))
  1313. /* already deleting - release local reference */
  1314. nvmet_fc_tgt_a_put(assoc);
  1315. return;
  1316. }
  1317. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1318. }
  1319. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1320. }
  1321. /**
  1322. * nvme_fc_unregister_targetport - transport entry point called by an
  1323. * LLDD to deregister/remove a previously
  1324. * registered a local NVME subsystem FC port.
  1325. * @target_port: pointer to the (registered) target port that is to be
  1326. * deregistered.
  1327. *
  1328. * Returns:
  1329. * a completion status. Must be 0 upon success; a negative errno
  1330. * (ex: -ENXIO) upon failure.
  1331. */
  1332. int
  1333. nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
  1334. {
  1335. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  1336. nvmet_fc_portentry_unbind_tgt(tgtport);
  1337. /* terminate any outstanding associations */
  1338. __nvmet_fc_free_assocs(tgtport);
  1339. /*
  1340. * should terminate LS's as well. However, LS's will be generated
  1341. * at the tail end of association termination, so they likely don't
  1342. * exist yet. And even if they did, it's worthwhile to just let
  1343. * them finish and targetport ref counting will clean things up.
  1344. */
  1345. nvmet_fc_tgtport_put(tgtport);
  1346. return 0;
  1347. }
  1348. EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
  1349. /* ********************** FC-NVME LS RCV Handling ************************* */
  1350. static void
  1351. nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
  1352. struct nvmet_fc_ls_iod *iod)
  1353. {
  1354. struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
  1355. struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
  1356. struct nvmet_fc_tgt_queue *queue;
  1357. int ret = 0;
  1358. memset(acc, 0, sizeof(*acc));
  1359. /*
  1360. * FC-NVME spec changes. There are initiators sending different
  1361. * lengths as padding sizes for Create Association Cmd descriptor
  1362. * was incorrect.
  1363. * Accept anything of "minimum" length. Assume format per 1.15
  1364. * spec (with HOSTID reduced to 16 bytes), ignore how long the
  1365. * trailing pad length is.
  1366. */
  1367. if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
  1368. ret = VERR_CR_ASSOC_LEN;
  1369. else if (be32_to_cpu(rqst->desc_list_len) <
  1370. FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
  1371. ret = VERR_CR_ASSOC_RQST_LEN;
  1372. else if (rqst->assoc_cmd.desc_tag !=
  1373. cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
  1374. ret = VERR_CR_ASSOC_CMD;
  1375. else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
  1376. FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
  1377. ret = VERR_CR_ASSOC_CMD_LEN;
  1378. else if (!rqst->assoc_cmd.ersp_ratio ||
  1379. (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
  1380. be16_to_cpu(rqst->assoc_cmd.sqsize)))
  1381. ret = VERR_ERSP_RATIO;
  1382. else {
  1383. /* new association w/ admin queue */
  1384. iod->assoc = nvmet_fc_alloc_target_assoc(
  1385. tgtport, iod->hosthandle);
  1386. if (!iod->assoc)
  1387. ret = VERR_ASSOC_ALLOC_FAIL;
  1388. else {
  1389. queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
  1390. be16_to_cpu(rqst->assoc_cmd.sqsize));
  1391. if (!queue)
  1392. ret = VERR_QUEUE_ALLOC_FAIL;
  1393. }
  1394. }
  1395. if (ret) {
  1396. dev_err(tgtport->dev,
  1397. "Create Association LS failed: %s\n",
  1398. validation_errors[ret]);
  1399. iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
  1400. sizeof(*acc), rqst->w0.ls_cmd,
  1401. FCNVME_RJT_RC_LOGIC,
  1402. FCNVME_RJT_EXP_NONE, 0);
  1403. return;
  1404. }
  1405. queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
  1406. atomic_set(&queue->connected, 1);
  1407. queue->sqhd = 0; /* best place to init value */
  1408. dev_info(tgtport->dev,
  1409. "{%d:%d} Association created\n",
  1410. tgtport->fc_target_port.port_num, iod->assoc->a_id);
  1411. /* format a response */
  1412. iod->lsrsp->rsplen = sizeof(*acc);
  1413. nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
  1414. fcnvme_lsdesc_len(
  1415. sizeof(struct fcnvme_ls_cr_assoc_acc)),
  1416. FCNVME_LS_CREATE_ASSOCIATION);
  1417. acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
  1418. acc->associd.desc_len =
  1419. fcnvme_lsdesc_len(
  1420. sizeof(struct fcnvme_lsdesc_assoc_id));
  1421. acc->associd.association_id =
  1422. cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
  1423. acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
  1424. acc->connectid.desc_len =
  1425. fcnvme_lsdesc_len(
  1426. sizeof(struct fcnvme_lsdesc_conn_id));
  1427. acc->connectid.connection_id = acc->associd.association_id;
  1428. }
  1429. static void
  1430. nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
  1431. struct nvmet_fc_ls_iod *iod)
  1432. {
  1433. struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
  1434. struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
  1435. struct nvmet_fc_tgt_queue *queue;
  1436. int ret = 0;
  1437. memset(acc, 0, sizeof(*acc));
  1438. if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
  1439. ret = VERR_CR_CONN_LEN;
  1440. else if (rqst->desc_list_len !=
  1441. fcnvme_lsdesc_len(
  1442. sizeof(struct fcnvme_ls_cr_conn_rqst)))
  1443. ret = VERR_CR_CONN_RQST_LEN;
  1444. else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
  1445. ret = VERR_ASSOC_ID;
  1446. else if (rqst->associd.desc_len !=
  1447. fcnvme_lsdesc_len(
  1448. sizeof(struct fcnvme_lsdesc_assoc_id)))
  1449. ret = VERR_ASSOC_ID_LEN;
  1450. else if (rqst->connect_cmd.desc_tag !=
  1451. cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
  1452. ret = VERR_CR_CONN_CMD;
  1453. else if (rqst->connect_cmd.desc_len !=
  1454. fcnvme_lsdesc_len(
  1455. sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
  1456. ret = VERR_CR_CONN_CMD_LEN;
  1457. else if (!rqst->connect_cmd.ersp_ratio ||
  1458. (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
  1459. be16_to_cpu(rqst->connect_cmd.sqsize)))
  1460. ret = VERR_ERSP_RATIO;
  1461. else {
  1462. /* new io queue */
  1463. iod->assoc = nvmet_fc_find_target_assoc(tgtport,
  1464. be64_to_cpu(rqst->associd.association_id));
  1465. if (!iod->assoc)
  1466. ret = VERR_NO_ASSOC;
  1467. else {
  1468. queue = nvmet_fc_alloc_target_queue(iod->assoc,
  1469. be16_to_cpu(rqst->connect_cmd.qid),
  1470. be16_to_cpu(rqst->connect_cmd.sqsize));
  1471. if (!queue)
  1472. ret = VERR_QUEUE_ALLOC_FAIL;
  1473. /* release get taken in nvmet_fc_find_target_assoc */
  1474. nvmet_fc_tgt_a_put(iod->assoc);
  1475. }
  1476. }
  1477. if (ret) {
  1478. dev_err(tgtport->dev,
  1479. "Create Connection LS failed: %s\n",
  1480. validation_errors[ret]);
  1481. iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
  1482. sizeof(*acc), rqst->w0.ls_cmd,
  1483. (ret == VERR_NO_ASSOC) ?
  1484. FCNVME_RJT_RC_INV_ASSOC :
  1485. FCNVME_RJT_RC_LOGIC,
  1486. FCNVME_RJT_EXP_NONE, 0);
  1487. return;
  1488. }
  1489. queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
  1490. atomic_set(&queue->connected, 1);
  1491. queue->sqhd = 0; /* best place to init value */
  1492. /* format a response */
  1493. iod->lsrsp->rsplen = sizeof(*acc);
  1494. nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
  1495. fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
  1496. FCNVME_LS_CREATE_CONNECTION);
  1497. acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
  1498. acc->connectid.desc_len =
  1499. fcnvme_lsdesc_len(
  1500. sizeof(struct fcnvme_lsdesc_conn_id));
  1501. acc->connectid.connection_id =
  1502. cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
  1503. be16_to_cpu(rqst->connect_cmd.qid)));
  1504. }
  1505. /*
  1506. * Returns true if the LS response is to be transmit
  1507. * Returns false if the LS response is to be delayed
  1508. */
  1509. static int
  1510. nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
  1511. struct nvmet_fc_ls_iod *iod)
  1512. {
  1513. struct fcnvme_ls_disconnect_assoc_rqst *rqst =
  1514. &iod->rqstbuf->rq_dis_assoc;
  1515. struct fcnvme_ls_disconnect_assoc_acc *acc =
  1516. &iod->rspbuf->rsp_dis_assoc;
  1517. struct nvmet_fc_tgt_assoc *assoc = NULL;
  1518. struct nvmet_fc_ls_iod *oldls = NULL;
  1519. unsigned long flags;
  1520. int ret = 0;
  1521. memset(acc, 0, sizeof(*acc));
  1522. ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
  1523. if (!ret) {
  1524. /* match an active association - takes an assoc ref if !NULL */
  1525. assoc = nvmet_fc_find_target_assoc(tgtport,
  1526. be64_to_cpu(rqst->associd.association_id));
  1527. iod->assoc = assoc;
  1528. if (!assoc)
  1529. ret = VERR_NO_ASSOC;
  1530. }
  1531. if (ret || !assoc) {
  1532. dev_err(tgtport->dev,
  1533. "Disconnect LS failed: %s\n",
  1534. validation_errors[ret]);
  1535. iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
  1536. sizeof(*acc), rqst->w0.ls_cmd,
  1537. (ret == VERR_NO_ASSOC) ?
  1538. FCNVME_RJT_RC_INV_ASSOC :
  1539. FCNVME_RJT_RC_LOGIC,
  1540. FCNVME_RJT_EXP_NONE, 0);
  1541. return true;
  1542. }
  1543. /* format a response */
  1544. iod->lsrsp->rsplen = sizeof(*acc);
  1545. nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
  1546. fcnvme_lsdesc_len(
  1547. sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
  1548. FCNVME_LS_DISCONNECT_ASSOC);
  1549. /* release get taken in nvmet_fc_find_target_assoc */
  1550. nvmet_fc_tgt_a_put(assoc);
  1551. /*
  1552. * The rules for LS response says the response cannot
  1553. * go back until ABTS's have been sent for all outstanding
  1554. * I/O and a Disconnect Association LS has been sent.
  1555. * So... save off the Disconnect LS to send the response
  1556. * later. If there was a prior LS already saved, replace
  1557. * it with the newer one and send a can't perform reject
  1558. * on the older one.
  1559. */
  1560. spin_lock_irqsave(&tgtport->lock, flags);
  1561. oldls = assoc->rcv_disconn;
  1562. assoc->rcv_disconn = iod;
  1563. spin_unlock_irqrestore(&tgtport->lock, flags);
  1564. nvmet_fc_delete_target_assoc(assoc);
  1565. if (oldls) {
  1566. dev_info(tgtport->dev,
  1567. "{%d:%d} Multiple Disconnect Association LS's "
  1568. "received\n",
  1569. tgtport->fc_target_port.port_num, assoc->a_id);
  1570. /* overwrite good response with bogus failure */
  1571. oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
  1572. sizeof(*iod->rspbuf),
  1573. /* ok to use rqst, LS is same */
  1574. rqst->w0.ls_cmd,
  1575. FCNVME_RJT_RC_UNAB,
  1576. FCNVME_RJT_EXP_NONE, 0);
  1577. nvmet_fc_xmt_ls_rsp(tgtport, oldls);
  1578. }
  1579. return false;
  1580. }
  1581. /* *********************** NVME Ctrl Routines **************************** */
  1582. static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
  1583. static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
  1584. static void
  1585. nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
  1586. {
  1587. struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
  1588. struct nvmet_fc_tgtport *tgtport = iod->tgtport;
  1589. fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
  1590. sizeof(*iod->rspbuf), DMA_TO_DEVICE);
  1591. nvmet_fc_free_ls_iod(tgtport, iod);
  1592. nvmet_fc_tgtport_put(tgtport);
  1593. }
  1594. static void
  1595. nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
  1596. struct nvmet_fc_ls_iod *iod)
  1597. {
  1598. int ret;
  1599. fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
  1600. sizeof(*iod->rspbuf), DMA_TO_DEVICE);
  1601. ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
  1602. if (ret)
  1603. nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
  1604. }
  1605. /*
  1606. * Actual processing routine for received FC-NVME LS Requests from the LLD
  1607. */
  1608. static void
  1609. nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
  1610. struct nvmet_fc_ls_iod *iod)
  1611. {
  1612. struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
  1613. bool sendrsp = true;
  1614. iod->lsrsp->nvme_fc_private = iod;
  1615. iod->lsrsp->rspbuf = iod->rspbuf;
  1616. iod->lsrsp->rspdma = iod->rspdma;
  1617. iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
  1618. /* Be preventative. handlers will later set to valid length */
  1619. iod->lsrsp->rsplen = 0;
  1620. iod->assoc = NULL;
  1621. /*
  1622. * handlers:
  1623. * parse request input, execute the request, and format the
  1624. * LS response
  1625. */
  1626. switch (w0->ls_cmd) {
  1627. case FCNVME_LS_CREATE_ASSOCIATION:
  1628. /* Creates Association and initial Admin Queue/Connection */
  1629. nvmet_fc_ls_create_association(tgtport, iod);
  1630. break;
  1631. case FCNVME_LS_CREATE_CONNECTION:
  1632. /* Creates an IO Queue/Connection */
  1633. nvmet_fc_ls_create_connection(tgtport, iod);
  1634. break;
  1635. case FCNVME_LS_DISCONNECT_ASSOC:
  1636. /* Terminate a Queue/Connection or the Association */
  1637. sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
  1638. break;
  1639. default:
  1640. iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
  1641. sizeof(*iod->rspbuf), w0->ls_cmd,
  1642. FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
  1643. }
  1644. if (sendrsp)
  1645. nvmet_fc_xmt_ls_rsp(tgtport, iod);
  1646. }
  1647. /*
  1648. * Actual processing routine for received FC-NVME LS Requests from the LLD
  1649. */
  1650. static void
  1651. nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
  1652. {
  1653. struct nvmet_fc_ls_iod *iod =
  1654. container_of(work, struct nvmet_fc_ls_iod, work);
  1655. struct nvmet_fc_tgtport *tgtport = iod->tgtport;
  1656. nvmet_fc_handle_ls_rqst(tgtport, iod);
  1657. }
  1658. /**
  1659. * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
  1660. * upon the reception of a NVME LS request.
  1661. *
  1662. * The nvmet-fc layer will copy payload to an internal structure for
  1663. * processing. As such, upon completion of the routine, the LLDD may
  1664. * immediately free/reuse the LS request buffer passed in the call.
  1665. *
  1666. * If this routine returns error, the LLDD should abort the exchange.
  1667. *
  1668. * @target_port: pointer to the (registered) target port the LS was
  1669. * received on.
  1670. * @lsrsp: pointer to a lsrsp structure to be used to reference
  1671. * the exchange corresponding to the LS.
  1672. * @lsreqbuf: pointer to the buffer containing the LS Request
  1673. * @lsreqbuf_len: length, in bytes, of the received LS request
  1674. */
  1675. int
  1676. nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
  1677. void *hosthandle,
  1678. struct nvmefc_ls_rsp *lsrsp,
  1679. void *lsreqbuf, u32 lsreqbuf_len)
  1680. {
  1681. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  1682. struct nvmet_fc_ls_iod *iod;
  1683. struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
  1684. if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
  1685. dev_info(tgtport->dev,
  1686. "RCV %s LS failed: payload too large (%d)\n",
  1687. (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
  1688. nvmefc_ls_names[w0->ls_cmd] : "",
  1689. lsreqbuf_len);
  1690. return -E2BIG;
  1691. }
  1692. if (!nvmet_fc_tgtport_get(tgtport)) {
  1693. dev_info(tgtport->dev,
  1694. "RCV %s LS failed: target deleting\n",
  1695. (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
  1696. nvmefc_ls_names[w0->ls_cmd] : "");
  1697. return -ESHUTDOWN;
  1698. }
  1699. iod = nvmet_fc_alloc_ls_iod(tgtport);
  1700. if (!iod) {
  1701. dev_info(tgtport->dev,
  1702. "RCV %s LS failed: context allocation failed\n",
  1703. (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
  1704. nvmefc_ls_names[w0->ls_cmd] : "");
  1705. nvmet_fc_tgtport_put(tgtport);
  1706. return -ENOENT;
  1707. }
  1708. iod->lsrsp = lsrsp;
  1709. iod->fcpreq = NULL;
  1710. memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
  1711. iod->rqstdatalen = lsreqbuf_len;
  1712. iod->hosthandle = hosthandle;
  1713. schedule_work(&iod->work);
  1714. return 0;
  1715. }
  1716. EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
  1717. /*
  1718. * **********************
  1719. * Start of FCP handling
  1720. * **********************
  1721. */
  1722. static int
  1723. nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
  1724. {
  1725. struct scatterlist *sg;
  1726. unsigned int nent;
  1727. sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
  1728. if (!sg)
  1729. goto out;
  1730. fod->data_sg = sg;
  1731. fod->data_sg_cnt = nent;
  1732. fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
  1733. ((fod->io_dir == NVMET_FCP_WRITE) ?
  1734. DMA_FROM_DEVICE : DMA_TO_DEVICE));
  1735. /* note: write from initiator perspective */
  1736. fod->next_sg = fod->data_sg;
  1737. return 0;
  1738. out:
  1739. return NVME_SC_INTERNAL;
  1740. }
  1741. static void
  1742. nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
  1743. {
  1744. if (!fod->data_sg || !fod->data_sg_cnt)
  1745. return;
  1746. fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
  1747. ((fod->io_dir == NVMET_FCP_WRITE) ?
  1748. DMA_FROM_DEVICE : DMA_TO_DEVICE));
  1749. sgl_free(fod->data_sg);
  1750. fod->data_sg = NULL;
  1751. fod->data_sg_cnt = 0;
  1752. }
  1753. static bool
  1754. queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
  1755. {
  1756. u32 sqtail, used;
  1757. /* egad, this is ugly. And sqtail is just a best guess */
  1758. sqtail = atomic_read(&q->sqtail) % q->sqsize;
  1759. used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
  1760. return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
  1761. }
  1762. /*
  1763. * Prep RSP payload.
  1764. * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
  1765. */
  1766. static void
  1767. nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
  1768. struct nvmet_fc_fcp_iod *fod)
  1769. {
  1770. struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
  1771. struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
  1772. struct nvme_completion *cqe = &ersp->cqe;
  1773. u32 *cqewd = (u32 *)cqe;
  1774. bool send_ersp = false;
  1775. u32 rsn, rspcnt, xfr_length;
  1776. if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
  1777. xfr_length = fod->req.transfer_len;
  1778. else
  1779. xfr_length = fod->offset;
  1780. /*
  1781. * check to see if we can send a 0's rsp.
  1782. * Note: to send a 0's response, the NVME-FC host transport will
  1783. * recreate the CQE. The host transport knows: sq id, SQHD (last
  1784. * seen in an ersp), and command_id. Thus it will create a
  1785. * zero-filled CQE with those known fields filled in. Transport
  1786. * must send an ersp for any condition where the cqe won't match
  1787. * this.
  1788. *
  1789. * Here are the FC-NVME mandated cases where we must send an ersp:
  1790. * every N responses, where N=ersp_ratio
  1791. * force fabric commands to send ersp's (not in FC-NVME but good
  1792. * practice)
  1793. * normal cmds: any time status is non-zero, or status is zero
  1794. * but words 0 or 1 are non-zero.
  1795. * the SQ is 90% or more full
  1796. * the cmd is a fused command
  1797. * transferred data length not equal to cmd iu length
  1798. */
  1799. rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
  1800. if (!(rspcnt % fod->queue->ersp_ratio) ||
  1801. nvme_is_fabrics((struct nvme_command *) sqe) ||
  1802. xfr_length != fod->req.transfer_len ||
  1803. (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
  1804. (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
  1805. queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
  1806. send_ersp = true;
  1807. /* re-set the fields */
  1808. fod->fcpreq->rspaddr = ersp;
  1809. fod->fcpreq->rspdma = fod->rspdma;
  1810. if (!send_ersp) {
  1811. memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
  1812. fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
  1813. } else {
  1814. ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
  1815. rsn = atomic_inc_return(&fod->queue->rsn);
  1816. ersp->rsn = cpu_to_be32(rsn);
  1817. ersp->xfrd_len = cpu_to_be32(xfr_length);
  1818. fod->fcpreq->rsplen = sizeof(*ersp);
  1819. }
  1820. fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
  1821. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  1822. }
  1823. static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
  1824. static void
  1825. nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
  1826. struct nvmet_fc_fcp_iod *fod)
  1827. {
  1828. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  1829. /* data no longer needed */
  1830. nvmet_fc_free_tgt_pgs(fod);
  1831. /*
  1832. * if an ABTS was received or we issued the fcp_abort early
  1833. * don't call abort routine again.
  1834. */
  1835. /* no need to take lock - lock was taken earlier to get here */
  1836. if (!fod->aborted)
  1837. tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
  1838. nvmet_fc_free_fcp_iod(fod->queue, fod);
  1839. }
  1840. static void
  1841. nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
  1842. struct nvmet_fc_fcp_iod *fod)
  1843. {
  1844. int ret;
  1845. fod->fcpreq->op = NVMET_FCOP_RSP;
  1846. fod->fcpreq->timeout = 0;
  1847. nvmet_fc_prep_fcp_rsp(tgtport, fod);
  1848. ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
  1849. if (ret)
  1850. nvmet_fc_abort_op(tgtport, fod);
  1851. }
  1852. static void
  1853. nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
  1854. struct nvmet_fc_fcp_iod *fod, u8 op)
  1855. {
  1856. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  1857. struct scatterlist *sg = fod->next_sg;
  1858. unsigned long flags;
  1859. u32 remaininglen = fod->req.transfer_len - fod->offset;
  1860. u32 tlen = 0;
  1861. int ret;
  1862. fcpreq->op = op;
  1863. fcpreq->offset = fod->offset;
  1864. fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
  1865. /*
  1866. * for next sequence:
  1867. * break at a sg element boundary
  1868. * attempt to keep sequence length capped at
  1869. * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
  1870. * be longer if a single sg element is larger
  1871. * than that amount. This is done to avoid creating
  1872. * a new sg list to use for the tgtport api.
  1873. */
  1874. fcpreq->sg = sg;
  1875. fcpreq->sg_cnt = 0;
  1876. while (tlen < remaininglen &&
  1877. fcpreq->sg_cnt < tgtport->max_sg_cnt &&
  1878. tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
  1879. fcpreq->sg_cnt++;
  1880. tlen += sg_dma_len(sg);
  1881. sg = sg_next(sg);
  1882. }
  1883. if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
  1884. fcpreq->sg_cnt++;
  1885. tlen += min_t(u32, sg_dma_len(sg), remaininglen);
  1886. sg = sg_next(sg);
  1887. }
  1888. if (tlen < remaininglen)
  1889. fod->next_sg = sg;
  1890. else
  1891. fod->next_sg = NULL;
  1892. fcpreq->transfer_length = tlen;
  1893. fcpreq->transferred_length = 0;
  1894. fcpreq->fcp_error = 0;
  1895. fcpreq->rsplen = 0;
  1896. /*
  1897. * If the last READDATA request: check if LLDD supports
  1898. * combined xfr with response.
  1899. */
  1900. if ((op == NVMET_FCOP_READDATA) &&
  1901. ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
  1902. (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
  1903. fcpreq->op = NVMET_FCOP_READDATA_RSP;
  1904. nvmet_fc_prep_fcp_rsp(tgtport, fod);
  1905. }
  1906. ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
  1907. if (ret) {
  1908. /*
  1909. * should be ok to set w/o lock as its in the thread of
  1910. * execution (not an async timer routine) and doesn't
  1911. * contend with any clearing action
  1912. */
  1913. fod->abort = true;
  1914. if (op == NVMET_FCOP_WRITEDATA) {
  1915. spin_lock_irqsave(&fod->flock, flags);
  1916. fod->writedataactive = false;
  1917. spin_unlock_irqrestore(&fod->flock, flags);
  1918. nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
  1919. } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
  1920. fcpreq->fcp_error = ret;
  1921. fcpreq->transferred_length = 0;
  1922. nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
  1923. }
  1924. }
  1925. }
  1926. static inline bool
  1927. __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
  1928. {
  1929. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  1930. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  1931. /* if in the middle of an io and we need to tear down */
  1932. if (abort) {
  1933. if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
  1934. nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
  1935. return true;
  1936. }
  1937. nvmet_fc_abort_op(tgtport, fod);
  1938. return true;
  1939. }
  1940. return false;
  1941. }
  1942. /*
  1943. * actual done handler for FCP operations when completed by the lldd
  1944. */
  1945. static void
  1946. nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
  1947. {
  1948. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  1949. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  1950. unsigned long flags;
  1951. bool abort;
  1952. spin_lock_irqsave(&fod->flock, flags);
  1953. abort = fod->abort;
  1954. fod->writedataactive = false;
  1955. spin_unlock_irqrestore(&fod->flock, flags);
  1956. switch (fcpreq->op) {
  1957. case NVMET_FCOP_WRITEDATA:
  1958. if (__nvmet_fc_fod_op_abort(fod, abort))
  1959. return;
  1960. if (fcpreq->fcp_error ||
  1961. fcpreq->transferred_length != fcpreq->transfer_length) {
  1962. spin_lock_irqsave(&fod->flock, flags);
  1963. fod->abort = true;
  1964. spin_unlock_irqrestore(&fod->flock, flags);
  1965. nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
  1966. return;
  1967. }
  1968. fod->offset += fcpreq->transferred_length;
  1969. if (fod->offset != fod->req.transfer_len) {
  1970. spin_lock_irqsave(&fod->flock, flags);
  1971. fod->writedataactive = true;
  1972. spin_unlock_irqrestore(&fod->flock, flags);
  1973. /* transfer the next chunk */
  1974. nvmet_fc_transfer_fcp_data(tgtport, fod,
  1975. NVMET_FCOP_WRITEDATA);
  1976. return;
  1977. }
  1978. /* data transfer complete, resume with nvmet layer */
  1979. fod->req.execute(&fod->req);
  1980. break;
  1981. case NVMET_FCOP_READDATA:
  1982. case NVMET_FCOP_READDATA_RSP:
  1983. if (__nvmet_fc_fod_op_abort(fod, abort))
  1984. return;
  1985. if (fcpreq->fcp_error ||
  1986. fcpreq->transferred_length != fcpreq->transfer_length) {
  1987. nvmet_fc_abort_op(tgtport, fod);
  1988. return;
  1989. }
  1990. /* success */
  1991. if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
  1992. /* data no longer needed */
  1993. nvmet_fc_free_tgt_pgs(fod);
  1994. nvmet_fc_free_fcp_iod(fod->queue, fod);
  1995. return;
  1996. }
  1997. fod->offset += fcpreq->transferred_length;
  1998. if (fod->offset != fod->req.transfer_len) {
  1999. /* transfer the next chunk */
  2000. nvmet_fc_transfer_fcp_data(tgtport, fod,
  2001. NVMET_FCOP_READDATA);
  2002. return;
  2003. }
  2004. /* data transfer complete, send response */
  2005. /* data no longer needed */
  2006. nvmet_fc_free_tgt_pgs(fod);
  2007. nvmet_fc_xmt_fcp_rsp(tgtport, fod);
  2008. break;
  2009. case NVMET_FCOP_RSP:
  2010. if (__nvmet_fc_fod_op_abort(fod, abort))
  2011. return;
  2012. nvmet_fc_free_fcp_iod(fod->queue, fod);
  2013. break;
  2014. default:
  2015. break;
  2016. }
  2017. }
  2018. static void
  2019. nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
  2020. {
  2021. struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
  2022. nvmet_fc_fod_op_done(fod);
  2023. }
  2024. /*
  2025. * actual completion handler after execution by the nvmet layer
  2026. */
  2027. static void
  2028. __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
  2029. struct nvmet_fc_fcp_iod *fod, int status)
  2030. {
  2031. struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
  2032. struct nvme_completion *cqe = &fod->rspiubuf.cqe;
  2033. unsigned long flags;
  2034. bool abort;
  2035. spin_lock_irqsave(&fod->flock, flags);
  2036. abort = fod->abort;
  2037. spin_unlock_irqrestore(&fod->flock, flags);
  2038. /* if we have a CQE, snoop the last sq_head value */
  2039. if (!status)
  2040. fod->queue->sqhd = cqe->sq_head;
  2041. if (abort) {
  2042. nvmet_fc_abort_op(tgtport, fod);
  2043. return;
  2044. }
  2045. /* if an error handling the cmd post initial parsing */
  2046. if (status) {
  2047. /* fudge up a failed CQE status for our transport error */
  2048. memset(cqe, 0, sizeof(*cqe));
  2049. cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
  2050. cqe->sq_id = cpu_to_le16(fod->queue->qid);
  2051. cqe->command_id = sqe->command_id;
  2052. cqe->status = cpu_to_le16(status);
  2053. } else {
  2054. /*
  2055. * try to push the data even if the SQE status is non-zero.
  2056. * There may be a status where data still was intended to
  2057. * be moved
  2058. */
  2059. if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
  2060. /* push the data over before sending rsp */
  2061. nvmet_fc_transfer_fcp_data(tgtport, fod,
  2062. NVMET_FCOP_READDATA);
  2063. return;
  2064. }
  2065. /* writes & no data - fall thru */
  2066. }
  2067. /* data no longer needed */
  2068. nvmet_fc_free_tgt_pgs(fod);
  2069. nvmet_fc_xmt_fcp_rsp(tgtport, fod);
  2070. }
  2071. static void
  2072. nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
  2073. {
  2074. struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
  2075. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  2076. __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
  2077. }
  2078. /*
  2079. * Actual processing routine for received FC-NVME I/O Requests from the LLD
  2080. */
  2081. static void
  2082. nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
  2083. struct nvmet_fc_fcp_iod *fod)
  2084. {
  2085. struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
  2086. u32 xfrlen = be32_to_cpu(cmdiu->data_len);
  2087. int ret;
  2088. /*
  2089. * Fused commands are currently not supported in the linux
  2090. * implementation.
  2091. *
  2092. * As such, the implementation of the FC transport does not
  2093. * look at the fused commands and order delivery to the upper
  2094. * layer until we have both based on csn.
  2095. */
  2096. fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
  2097. if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
  2098. fod->io_dir = NVMET_FCP_WRITE;
  2099. if (!nvme_is_write(&cmdiu->sqe))
  2100. goto transport_error;
  2101. } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
  2102. fod->io_dir = NVMET_FCP_READ;
  2103. if (nvme_is_write(&cmdiu->sqe))
  2104. goto transport_error;
  2105. } else {
  2106. fod->io_dir = NVMET_FCP_NODATA;
  2107. if (xfrlen)
  2108. goto transport_error;
  2109. }
  2110. fod->req.cmd = &fod->cmdiubuf.sqe;
  2111. fod->req.cqe = &fod->rspiubuf.cqe;
  2112. if (tgtport->pe)
  2113. fod->req.port = tgtport->pe->port;
  2114. /* clear any response payload */
  2115. memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
  2116. fod->data_sg = NULL;
  2117. fod->data_sg_cnt = 0;
  2118. ret = nvmet_req_init(&fod->req,
  2119. &fod->queue->nvme_cq,
  2120. &fod->queue->nvme_sq,
  2121. &nvmet_fc_tgt_fcp_ops);
  2122. if (!ret) {
  2123. /* bad SQE content or invalid ctrl state */
  2124. /* nvmet layer has already called op done to send rsp. */
  2125. return;
  2126. }
  2127. fod->req.transfer_len = xfrlen;
  2128. /* keep a running counter of tail position */
  2129. atomic_inc(&fod->queue->sqtail);
  2130. if (fod->req.transfer_len) {
  2131. ret = nvmet_fc_alloc_tgt_pgs(fod);
  2132. if (ret) {
  2133. nvmet_req_complete(&fod->req, ret);
  2134. return;
  2135. }
  2136. }
  2137. fod->req.sg = fod->data_sg;
  2138. fod->req.sg_cnt = fod->data_sg_cnt;
  2139. fod->offset = 0;
  2140. if (fod->io_dir == NVMET_FCP_WRITE) {
  2141. /* pull the data over before invoking nvmet layer */
  2142. nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
  2143. return;
  2144. }
  2145. /*
  2146. * Reads or no data:
  2147. *
  2148. * can invoke the nvmet_layer now. If read data, cmd completion will
  2149. * push the data
  2150. */
  2151. fod->req.execute(&fod->req);
  2152. return;
  2153. transport_error:
  2154. nvmet_fc_abort_op(tgtport, fod);
  2155. }
  2156. /**
  2157. * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
  2158. * upon the reception of a NVME FCP CMD IU.
  2159. *
  2160. * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
  2161. * layer for processing.
  2162. *
  2163. * The nvmet_fc layer allocates a local job structure (struct
  2164. * nvmet_fc_fcp_iod) from the queue for the io and copies the
  2165. * CMD IU buffer to the job structure. As such, on a successful
  2166. * completion (returns 0), the LLDD may immediately free/reuse
  2167. * the CMD IU buffer passed in the call.
  2168. *
  2169. * However, in some circumstances, due to the packetized nature of FC
  2170. * and the api of the FC LLDD which may issue a hw command to send the
  2171. * response, but the LLDD may not get the hw completion for that command
  2172. * and upcall the nvmet_fc layer before a new command may be
  2173. * asynchronously received - its possible for a command to be received
  2174. * before the LLDD and nvmet_fc have recycled the job structure. It gives
  2175. * the appearance of more commands received than fits in the sq.
  2176. * To alleviate this scenario, a temporary queue is maintained in the
  2177. * transport for pending LLDD requests waiting for a queue job structure.
  2178. * In these "overrun" cases, a temporary queue element is allocated
  2179. * the LLDD request and CMD iu buffer information remembered, and the
  2180. * routine returns a -EOVERFLOW status. Subsequently, when a queue job
  2181. * structure is freed, it is immediately reallocated for anything on the
  2182. * pending request list. The LLDDs defer_rcv() callback is called,
  2183. * informing the LLDD that it may reuse the CMD IU buffer, and the io
  2184. * is then started normally with the transport.
  2185. *
  2186. * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
  2187. * the completion as successful but must not reuse the CMD IU buffer
  2188. * until the LLDD's defer_rcv() callback has been called for the
  2189. * corresponding struct nvmefc_tgt_fcp_req pointer.
  2190. *
  2191. * If there is any other condition in which an error occurs, the
  2192. * transport will return a non-zero status indicating the error.
  2193. * In all cases other than -EOVERFLOW, the transport has not accepted the
  2194. * request and the LLDD should abort the exchange.
  2195. *
  2196. * @target_port: pointer to the (registered) target port the FCP CMD IU
  2197. * was received on.
  2198. * @fcpreq: pointer to a fcpreq request structure to be used to reference
  2199. * the exchange corresponding to the FCP Exchange.
  2200. * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
  2201. * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
  2202. */
  2203. int
  2204. nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
  2205. struct nvmefc_tgt_fcp_req *fcpreq,
  2206. void *cmdiubuf, u32 cmdiubuf_len)
  2207. {
  2208. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  2209. struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
  2210. struct nvmet_fc_tgt_queue *queue;
  2211. struct nvmet_fc_fcp_iod *fod;
  2212. struct nvmet_fc_defer_fcp_req *deferfcp;
  2213. unsigned long flags;
  2214. /* validate iu, so the connection id can be used to find the queue */
  2215. if ((cmdiubuf_len != sizeof(*cmdiu)) ||
  2216. (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
  2217. (cmdiu->fc_id != NVME_CMD_FC_ID) ||
  2218. (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
  2219. return -EIO;
  2220. queue = nvmet_fc_find_target_queue(tgtport,
  2221. be64_to_cpu(cmdiu->connection_id));
  2222. if (!queue)
  2223. return -ENOTCONN;
  2224. /*
  2225. * note: reference taken by find_target_queue
  2226. * After successful fod allocation, the fod will inherit the
  2227. * ownership of that reference and will remove the reference
  2228. * when the fod is freed.
  2229. */
  2230. spin_lock_irqsave(&queue->qlock, flags);
  2231. fod = nvmet_fc_alloc_fcp_iod(queue);
  2232. if (fod) {
  2233. spin_unlock_irqrestore(&queue->qlock, flags);
  2234. fcpreq->nvmet_fc_private = fod;
  2235. fod->fcpreq = fcpreq;
  2236. memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
  2237. nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
  2238. return 0;
  2239. }
  2240. if (!tgtport->ops->defer_rcv) {
  2241. spin_unlock_irqrestore(&queue->qlock, flags);
  2242. /* release the queue lookup reference */
  2243. nvmet_fc_tgt_q_put(queue);
  2244. return -ENOENT;
  2245. }
  2246. deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
  2247. struct nvmet_fc_defer_fcp_req, req_list);
  2248. if (deferfcp) {
  2249. /* Just re-use one that was previously allocated */
  2250. list_del(&deferfcp->req_list);
  2251. } else {
  2252. spin_unlock_irqrestore(&queue->qlock, flags);
  2253. /* Now we need to dynamically allocate one */
  2254. deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
  2255. if (!deferfcp) {
  2256. /* release the queue lookup reference */
  2257. nvmet_fc_tgt_q_put(queue);
  2258. return -ENOMEM;
  2259. }
  2260. spin_lock_irqsave(&queue->qlock, flags);
  2261. }
  2262. /* For now, use rspaddr / rsplen to save payload information */
  2263. fcpreq->rspaddr = cmdiubuf;
  2264. fcpreq->rsplen = cmdiubuf_len;
  2265. deferfcp->fcp_req = fcpreq;
  2266. /* defer processing till a fod becomes available */
  2267. list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
  2268. /* NOTE: the queue lookup reference is still valid */
  2269. spin_unlock_irqrestore(&queue->qlock, flags);
  2270. return -EOVERFLOW;
  2271. }
  2272. EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
  2273. /**
  2274. * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
  2275. * upon the reception of an ABTS for a FCP command
  2276. *
  2277. * Notify the transport that an ABTS has been received for a FCP command
  2278. * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
  2279. * LLDD believes the command is still being worked on
  2280. * (template_ops->fcp_req_release() has not been called).
  2281. *
  2282. * The transport will wait for any outstanding work (an op to the LLDD,
  2283. * which the lldd should complete with error due to the ABTS; or the
  2284. * completion from the nvmet layer of the nvme command), then will
  2285. * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
  2286. * return the i/o context to the LLDD. The LLDD may send the BA_ACC
  2287. * to the ABTS either after return from this function (assuming any
  2288. * outstanding op work has been terminated) or upon the callback being
  2289. * called.
  2290. *
  2291. * @target_port: pointer to the (registered) target port the FCP CMD IU
  2292. * was received on.
  2293. * @fcpreq: pointer to the fcpreq request structure that corresponds
  2294. * to the exchange that received the ABTS.
  2295. */
  2296. void
  2297. nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
  2298. struct nvmefc_tgt_fcp_req *fcpreq)
  2299. {
  2300. struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
  2301. struct nvmet_fc_tgt_queue *queue;
  2302. unsigned long flags;
  2303. if (!fod || fod->fcpreq != fcpreq)
  2304. /* job appears to have already completed, ignore abort */
  2305. return;
  2306. queue = fod->queue;
  2307. spin_lock_irqsave(&queue->qlock, flags);
  2308. if (fod->active) {
  2309. /*
  2310. * mark as abort. The abort handler, invoked upon completion
  2311. * of any work, will detect the aborted status and do the
  2312. * callback.
  2313. */
  2314. spin_lock(&fod->flock);
  2315. fod->abort = true;
  2316. fod->aborted = true;
  2317. spin_unlock(&fod->flock);
  2318. }
  2319. spin_unlock_irqrestore(&queue->qlock, flags);
  2320. }
  2321. EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
  2322. struct nvmet_fc_traddr {
  2323. u64 nn;
  2324. u64 pn;
  2325. };
  2326. static int
  2327. __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
  2328. {
  2329. u64 token64;
  2330. if (match_u64(sstr, &token64))
  2331. return -EINVAL;
  2332. *val = token64;
  2333. return 0;
  2334. }
  2335. /*
  2336. * This routine validates and extracts the WWN's from the TRADDR string.
  2337. * As kernel parsers need the 0x to determine number base, universally
  2338. * build string to parse with 0x prefix before parsing name strings.
  2339. */
  2340. static int
  2341. nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
  2342. {
  2343. char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
  2344. substring_t wwn = { name, &name[sizeof(name)-1] };
  2345. int nnoffset, pnoffset;
  2346. /* validate if string is one of the 2 allowed formats */
  2347. if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
  2348. !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
  2349. !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
  2350. "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
  2351. nnoffset = NVME_FC_TRADDR_OXNNLEN;
  2352. pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
  2353. NVME_FC_TRADDR_OXNNLEN;
  2354. } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
  2355. !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
  2356. !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
  2357. "pn-", NVME_FC_TRADDR_NNLEN))) {
  2358. nnoffset = NVME_FC_TRADDR_NNLEN;
  2359. pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
  2360. } else
  2361. goto out_einval;
  2362. name[0] = '0';
  2363. name[1] = 'x';
  2364. name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
  2365. memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
  2366. if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
  2367. goto out_einval;
  2368. memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
  2369. if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
  2370. goto out_einval;
  2371. return 0;
  2372. out_einval:
  2373. pr_warn("%s: bad traddr string\n", __func__);
  2374. return -EINVAL;
  2375. }
  2376. static int
  2377. nvmet_fc_add_port(struct nvmet_port *port)
  2378. {
  2379. struct nvmet_fc_tgtport *tgtport;
  2380. struct nvmet_fc_port_entry *pe;
  2381. struct nvmet_fc_traddr traddr = { 0L, 0L };
  2382. unsigned long flags;
  2383. int ret;
  2384. /* validate the address info */
  2385. if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
  2386. (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
  2387. return -EINVAL;
  2388. /* map the traddr address info to a target port */
  2389. ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
  2390. sizeof(port->disc_addr.traddr));
  2391. if (ret)
  2392. return ret;
  2393. pe = kzalloc(sizeof(*pe), GFP_KERNEL);
  2394. if (!pe)
  2395. return -ENOMEM;
  2396. ret = -ENXIO;
  2397. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  2398. list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
  2399. if ((tgtport->fc_target_port.node_name == traddr.nn) &&
  2400. (tgtport->fc_target_port.port_name == traddr.pn)) {
  2401. /* a FC port can only be 1 nvmet port id */
  2402. if (!tgtport->pe) {
  2403. nvmet_fc_portentry_bind(tgtport, pe, port);
  2404. ret = 0;
  2405. } else
  2406. ret = -EALREADY;
  2407. break;
  2408. }
  2409. }
  2410. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  2411. if (ret)
  2412. kfree(pe);
  2413. return ret;
  2414. }
  2415. static void
  2416. nvmet_fc_remove_port(struct nvmet_port *port)
  2417. {
  2418. struct nvmet_fc_port_entry *pe = port->priv;
  2419. nvmet_fc_portentry_unbind(pe);
  2420. kfree(pe);
  2421. }
  2422. static void
  2423. nvmet_fc_discovery_chg(struct nvmet_port *port)
  2424. {
  2425. struct nvmet_fc_port_entry *pe = port->priv;
  2426. struct nvmet_fc_tgtport *tgtport = pe->tgtport;
  2427. if (tgtport && tgtport->ops->discovery_event)
  2428. tgtport->ops->discovery_event(&tgtport->fc_target_port);
  2429. }
  2430. static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
  2431. .owner = THIS_MODULE,
  2432. .type = NVMF_TRTYPE_FC,
  2433. .msdbd = 1,
  2434. .add_port = nvmet_fc_add_port,
  2435. .remove_port = nvmet_fc_remove_port,
  2436. .queue_response = nvmet_fc_fcp_nvme_cmd_done,
  2437. .delete_ctrl = nvmet_fc_delete_ctrl,
  2438. .discovery_chg = nvmet_fc_discovery_chg,
  2439. };
  2440. static int __init nvmet_fc_init_module(void)
  2441. {
  2442. return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
  2443. }
  2444. static void __exit nvmet_fc_exit_module(void)
  2445. {
  2446. /* sanity check - all lports should be removed */
  2447. if (!list_empty(&nvmet_fc_target_list))
  2448. pr_warn("%s: targetport list not empty\n", __func__);
  2449. nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
  2450. ida_destroy(&nvmet_fc_tgtport_cnt);
  2451. }
  2452. module_init(nvmet_fc_init_module);
  2453. module_exit(nvmet_fc_exit_module);
  2454. MODULE_LICENSE("GPL v2");