smbdirect.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2017, Microsoft Corporation.
  4. *
  5. * Author(s): Long Li <longli@microsoft.com>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/highmem.h>
  9. #include "smbdirect.h"
  10. #include "cifs_debug.h"
  11. #include "cifsproto.h"
  12. #include "smb2proto.h"
  13. static struct smbd_response *get_empty_queue_buffer(
  14. struct smbd_connection *info);
  15. static struct smbd_response *get_receive_buffer(
  16. struct smbd_connection *info);
  17. static void put_receive_buffer(
  18. struct smbd_connection *info,
  19. struct smbd_response *response);
  20. static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
  21. static void destroy_receive_buffers(struct smbd_connection *info);
  22. static void put_empty_packet(
  23. struct smbd_connection *info, struct smbd_response *response);
  24. static void enqueue_reassembly(
  25. struct smbd_connection *info,
  26. struct smbd_response *response, int data_length);
  27. static struct smbd_response *_get_first_reassembly(
  28. struct smbd_connection *info);
  29. static int smbd_post_recv(
  30. struct smbd_connection *info,
  31. struct smbd_response *response);
  32. static int smbd_post_send_empty(struct smbd_connection *info);
  33. static int smbd_post_send_data(
  34. struct smbd_connection *info,
  35. struct kvec *iov, int n_vec, int remaining_data_length);
  36. static int smbd_post_send_page(struct smbd_connection *info,
  37. struct page *page, unsigned long offset,
  38. size_t size, int remaining_data_length);
  39. static void destroy_mr_list(struct smbd_connection *info);
  40. static int allocate_mr_list(struct smbd_connection *info);
  41. /* SMBD version number */
  42. #define SMBD_V1 0x0100
  43. /* Port numbers for SMBD transport */
  44. #define SMB_PORT 445
  45. #define SMBD_PORT 5445
  46. /* Address lookup and resolve timeout in ms */
  47. #define RDMA_RESOLVE_TIMEOUT 5000
  48. /* SMBD negotiation timeout in seconds */
  49. #define SMBD_NEGOTIATE_TIMEOUT 120
  50. /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
  51. #define SMBD_MIN_RECEIVE_SIZE 128
  52. #define SMBD_MIN_FRAGMENTED_SIZE 131072
  53. /*
  54. * Default maximum number of RDMA read/write outstanding on this connection
  55. * This value is possibly decreased during QP creation on hardware limit
  56. */
  57. #define SMBD_CM_RESPONDER_RESOURCES 32
  58. /* Maximum number of retries on data transfer operations */
  59. #define SMBD_CM_RETRY 6
  60. /* No need to retry on Receiver Not Ready since SMBD manages credits */
  61. #define SMBD_CM_RNR_RETRY 0
  62. /*
  63. * User configurable initial values per SMBD transport connection
  64. * as defined in [MS-SMBD] 3.1.1.1
  65. * Those may change after a SMBD negotiation
  66. */
  67. /* The local peer's maximum number of credits to grant to the peer */
  68. int smbd_receive_credit_max = 255;
  69. /* The remote peer's credit request of local peer */
  70. int smbd_send_credit_target = 255;
  71. /* The maximum single message size can be sent to remote peer */
  72. int smbd_max_send_size = 1364;
  73. /* The maximum fragmented upper-layer payload receive size supported */
  74. int smbd_max_fragmented_recv_size = 1024 * 1024;
  75. /* The maximum single-message size which can be received */
  76. int smbd_max_receive_size = 8192;
  77. /* The timeout to initiate send of a keepalive message on idle */
  78. int smbd_keep_alive_interval = 120;
  79. /*
  80. * User configurable initial values for RDMA transport
  81. * The actual values used may be lower and are limited to hardware capabilities
  82. */
  83. /* Default maximum number of SGEs in a RDMA write/read */
  84. int smbd_max_frmr_depth = 2048;
  85. /* If payload is less than this byte, use RDMA send/recv not read/write */
  86. int rdma_readwrite_threshold = 4096;
  87. /* Transport logging functions
  88. * Logging are defined as classes. They can be OR'ed to define the actual
  89. * logging level via module parameter smbd_logging_class
  90. * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
  91. * log_rdma_event()
  92. */
  93. #define LOG_OUTGOING 0x1
  94. #define LOG_INCOMING 0x2
  95. #define LOG_READ 0x4
  96. #define LOG_WRITE 0x8
  97. #define LOG_RDMA_SEND 0x10
  98. #define LOG_RDMA_RECV 0x20
  99. #define LOG_KEEP_ALIVE 0x40
  100. #define LOG_RDMA_EVENT 0x80
  101. #define LOG_RDMA_MR 0x100
  102. static unsigned int smbd_logging_class;
  103. module_param(smbd_logging_class, uint, 0644);
  104. MODULE_PARM_DESC(smbd_logging_class,
  105. "Logging class for SMBD transport 0x0 to 0x100");
  106. #define ERR 0x0
  107. #define INFO 0x1
  108. static unsigned int smbd_logging_level = ERR;
  109. module_param(smbd_logging_level, uint, 0644);
  110. MODULE_PARM_DESC(smbd_logging_level,
  111. "Logging level for SMBD transport, 0 (default): error, 1: info");
  112. #define log_rdma(level, class, fmt, args...) \
  113. do { \
  114. if (level <= smbd_logging_level || class & smbd_logging_class) \
  115. cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
  116. } while (0)
  117. #define log_outgoing(level, fmt, args...) \
  118. log_rdma(level, LOG_OUTGOING, fmt, ##args)
  119. #define log_incoming(level, fmt, args...) \
  120. log_rdma(level, LOG_INCOMING, fmt, ##args)
  121. #define log_read(level, fmt, args...) log_rdma(level, LOG_READ, fmt, ##args)
  122. #define log_write(level, fmt, args...) log_rdma(level, LOG_WRITE, fmt, ##args)
  123. #define log_rdma_send(level, fmt, args...) \
  124. log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
  125. #define log_rdma_recv(level, fmt, args...) \
  126. log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
  127. #define log_keep_alive(level, fmt, args...) \
  128. log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
  129. #define log_rdma_event(level, fmt, args...) \
  130. log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
  131. #define log_rdma_mr(level, fmt, args...) \
  132. log_rdma(level, LOG_RDMA_MR, fmt, ##args)
  133. static void smbd_disconnect_rdma_work(struct work_struct *work)
  134. {
  135. struct smbd_connection *info =
  136. container_of(work, struct smbd_connection, disconnect_work);
  137. if (info->transport_status == SMBD_CONNECTED) {
  138. info->transport_status = SMBD_DISCONNECTING;
  139. rdma_disconnect(info->id);
  140. }
  141. }
  142. static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
  143. {
  144. queue_work(info->workqueue, &info->disconnect_work);
  145. }
  146. /* Upcall from RDMA CM */
  147. static int smbd_conn_upcall(
  148. struct rdma_cm_id *id, struct rdma_cm_event *event)
  149. {
  150. struct smbd_connection *info = id->context;
  151. log_rdma_event(INFO, "event=%d status=%d\n",
  152. event->event, event->status);
  153. switch (event->event) {
  154. case RDMA_CM_EVENT_ADDR_RESOLVED:
  155. case RDMA_CM_EVENT_ROUTE_RESOLVED:
  156. info->ri_rc = 0;
  157. complete(&info->ri_done);
  158. break;
  159. case RDMA_CM_EVENT_ADDR_ERROR:
  160. info->ri_rc = -EHOSTUNREACH;
  161. complete(&info->ri_done);
  162. break;
  163. case RDMA_CM_EVENT_ROUTE_ERROR:
  164. info->ri_rc = -ENETUNREACH;
  165. complete(&info->ri_done);
  166. break;
  167. case RDMA_CM_EVENT_ESTABLISHED:
  168. log_rdma_event(INFO, "connected event=%d\n", event->event);
  169. info->transport_status = SMBD_CONNECTED;
  170. wake_up_interruptible(&info->conn_wait);
  171. break;
  172. case RDMA_CM_EVENT_CONNECT_ERROR:
  173. case RDMA_CM_EVENT_UNREACHABLE:
  174. case RDMA_CM_EVENT_REJECTED:
  175. log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
  176. info->transport_status = SMBD_DISCONNECTED;
  177. wake_up_interruptible(&info->conn_wait);
  178. break;
  179. case RDMA_CM_EVENT_DEVICE_REMOVAL:
  180. case RDMA_CM_EVENT_DISCONNECTED:
  181. /* This happenes when we fail the negotiation */
  182. if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
  183. info->transport_status = SMBD_DISCONNECTED;
  184. wake_up(&info->conn_wait);
  185. break;
  186. }
  187. info->transport_status = SMBD_DISCONNECTED;
  188. wake_up_interruptible(&info->disconn_wait);
  189. wake_up_interruptible(&info->wait_reassembly_queue);
  190. wake_up_interruptible_all(&info->wait_send_queue);
  191. break;
  192. default:
  193. break;
  194. }
  195. return 0;
  196. }
  197. /* Upcall from RDMA QP */
  198. static void
  199. smbd_qp_async_error_upcall(struct ib_event *event, void *context)
  200. {
  201. struct smbd_connection *info = context;
  202. log_rdma_event(ERR, "%s on device %s info %p\n",
  203. ib_event_msg(event->event), event->device->name, info);
  204. switch (event->event) {
  205. case IB_EVENT_CQ_ERR:
  206. case IB_EVENT_QP_FATAL:
  207. smbd_disconnect_rdma_connection(info);
  208. default:
  209. break;
  210. }
  211. }
  212. static inline void *smbd_request_payload(struct smbd_request *request)
  213. {
  214. return (void *)request->packet;
  215. }
  216. static inline void *smbd_response_payload(struct smbd_response *response)
  217. {
  218. return (void *)response->packet;
  219. }
  220. /* Called when a RDMA send is done */
  221. static void send_done(struct ib_cq *cq, struct ib_wc *wc)
  222. {
  223. int i;
  224. struct smbd_request *request =
  225. container_of(wc->wr_cqe, struct smbd_request, cqe);
  226. log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n",
  227. request, wc->status);
  228. if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
  229. log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
  230. wc->status, wc->opcode);
  231. smbd_disconnect_rdma_connection(request->info);
  232. }
  233. for (i = 0; i < request->num_sge; i++)
  234. ib_dma_unmap_single(request->info->id->device,
  235. request->sge[i].addr,
  236. request->sge[i].length,
  237. DMA_TO_DEVICE);
  238. if (atomic_dec_and_test(&request->info->send_pending))
  239. wake_up(&request->info->wait_send_pending);
  240. wake_up(&request->info->wait_post_send);
  241. mempool_free(request, request->info->request_mempool);
  242. }
  243. static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
  244. {
  245. log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
  246. resp->min_version, resp->max_version,
  247. resp->negotiated_version, resp->credits_requested,
  248. resp->credits_granted, resp->status,
  249. resp->max_readwrite_size, resp->preferred_send_size,
  250. resp->max_receive_size, resp->max_fragmented_size);
  251. }
  252. /*
  253. * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
  254. * response, packet_length: the negotiation response message
  255. * return value: true if negotiation is a success, false if failed
  256. */
  257. static bool process_negotiation_response(
  258. struct smbd_response *response, int packet_length)
  259. {
  260. struct smbd_connection *info = response->info;
  261. struct smbd_negotiate_resp *packet = smbd_response_payload(response);
  262. if (packet_length < sizeof(struct smbd_negotiate_resp)) {
  263. log_rdma_event(ERR,
  264. "error: packet_length=%d\n", packet_length);
  265. return false;
  266. }
  267. if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
  268. log_rdma_event(ERR, "error: negotiated_version=%x\n",
  269. le16_to_cpu(packet->negotiated_version));
  270. return false;
  271. }
  272. info->protocol = le16_to_cpu(packet->negotiated_version);
  273. if (packet->credits_requested == 0) {
  274. log_rdma_event(ERR, "error: credits_requested==0\n");
  275. return false;
  276. }
  277. info->receive_credit_target = le16_to_cpu(packet->credits_requested);
  278. if (packet->credits_granted == 0) {
  279. log_rdma_event(ERR, "error: credits_granted==0\n");
  280. return false;
  281. }
  282. atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
  283. atomic_set(&info->receive_credits, 0);
  284. if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
  285. log_rdma_event(ERR, "error: preferred_send_size=%d\n",
  286. le32_to_cpu(packet->preferred_send_size));
  287. return false;
  288. }
  289. info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
  290. if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
  291. log_rdma_event(ERR, "error: max_receive_size=%d\n",
  292. le32_to_cpu(packet->max_receive_size));
  293. return false;
  294. }
  295. info->max_send_size = min_t(int, info->max_send_size,
  296. le32_to_cpu(packet->max_receive_size));
  297. if (le32_to_cpu(packet->max_fragmented_size) <
  298. SMBD_MIN_FRAGMENTED_SIZE) {
  299. log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
  300. le32_to_cpu(packet->max_fragmented_size));
  301. return false;
  302. }
  303. info->max_fragmented_send_size =
  304. le32_to_cpu(packet->max_fragmented_size);
  305. info->rdma_readwrite_threshold =
  306. rdma_readwrite_threshold > info->max_fragmented_send_size ?
  307. info->max_fragmented_send_size :
  308. rdma_readwrite_threshold;
  309. info->max_readwrite_size = min_t(u32,
  310. le32_to_cpu(packet->max_readwrite_size),
  311. info->max_frmr_depth * PAGE_SIZE);
  312. info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
  313. return true;
  314. }
  315. static void smbd_post_send_credits(struct work_struct *work)
  316. {
  317. int ret = 0;
  318. int use_receive_queue = 1;
  319. int rc;
  320. struct smbd_response *response;
  321. struct smbd_connection *info =
  322. container_of(work, struct smbd_connection,
  323. post_send_credits_work);
  324. if (info->transport_status != SMBD_CONNECTED) {
  325. wake_up(&info->wait_receive_queues);
  326. return;
  327. }
  328. if (info->receive_credit_target >
  329. atomic_read(&info->receive_credits)) {
  330. while (true) {
  331. if (use_receive_queue)
  332. response = get_receive_buffer(info);
  333. else
  334. response = get_empty_queue_buffer(info);
  335. if (!response) {
  336. /* now switch to emtpy packet queue */
  337. if (use_receive_queue) {
  338. use_receive_queue = 0;
  339. continue;
  340. } else
  341. break;
  342. }
  343. response->type = SMBD_TRANSFER_DATA;
  344. response->first_segment = false;
  345. rc = smbd_post_recv(info, response);
  346. if (rc) {
  347. log_rdma_recv(ERR,
  348. "post_recv failed rc=%d\n", rc);
  349. put_receive_buffer(info, response);
  350. break;
  351. }
  352. ret++;
  353. }
  354. }
  355. spin_lock(&info->lock_new_credits_offered);
  356. info->new_credits_offered += ret;
  357. spin_unlock(&info->lock_new_credits_offered);
  358. /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */
  359. info->send_immediate = true;
  360. if (atomic_read(&info->receive_credits) <
  361. info->receive_credit_target - 1) {
  362. if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
  363. info->send_immediate) {
  364. log_keep_alive(INFO, "send an empty message\n");
  365. smbd_post_send_empty(info);
  366. }
  367. }
  368. }
  369. /* Called from softirq, when recv is done */
  370. static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
  371. {
  372. struct smbd_data_transfer *data_transfer;
  373. struct smbd_response *response =
  374. container_of(wc->wr_cqe, struct smbd_response, cqe);
  375. struct smbd_connection *info = response->info;
  376. int data_length = 0;
  377. log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%x\n",
  378. response, response->type, wc->status, wc->opcode,
  379. wc->byte_len, wc->pkey_index);
  380. if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
  381. log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
  382. wc->status, wc->opcode);
  383. smbd_disconnect_rdma_connection(info);
  384. goto error;
  385. }
  386. ib_dma_sync_single_for_cpu(
  387. wc->qp->device,
  388. response->sge.addr,
  389. response->sge.length,
  390. DMA_FROM_DEVICE);
  391. switch (response->type) {
  392. /* SMBD negotiation response */
  393. case SMBD_NEGOTIATE_RESP:
  394. dump_smbd_negotiate_resp(smbd_response_payload(response));
  395. info->full_packet_received = true;
  396. info->negotiate_done =
  397. process_negotiation_response(response, wc->byte_len);
  398. complete(&info->negotiate_completion);
  399. break;
  400. /* SMBD data transfer packet */
  401. case SMBD_TRANSFER_DATA:
  402. data_transfer = smbd_response_payload(response);
  403. data_length = le32_to_cpu(data_transfer->data_length);
  404. /*
  405. * If this is a packet with data playload place the data in
  406. * reassembly queue and wake up the reading thread
  407. */
  408. if (data_length) {
  409. if (info->full_packet_received)
  410. response->first_segment = true;
  411. if (le32_to_cpu(data_transfer->remaining_data_length))
  412. info->full_packet_received = false;
  413. else
  414. info->full_packet_received = true;
  415. enqueue_reassembly(
  416. info,
  417. response,
  418. data_length);
  419. } else
  420. put_empty_packet(info, response);
  421. if (data_length)
  422. wake_up_interruptible(&info->wait_reassembly_queue);
  423. atomic_dec(&info->receive_credits);
  424. info->receive_credit_target =
  425. le16_to_cpu(data_transfer->credits_requested);
  426. if (le16_to_cpu(data_transfer->credits_granted)) {
  427. atomic_add(le16_to_cpu(data_transfer->credits_granted),
  428. &info->send_credits);
  429. /*
  430. * We have new send credits granted from remote peer
  431. * If any sender is waiting for credits, unblock it
  432. */
  433. wake_up_interruptible(&info->wait_send_queue);
  434. }
  435. log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n",
  436. le16_to_cpu(data_transfer->flags),
  437. le32_to_cpu(data_transfer->data_offset),
  438. le32_to_cpu(data_transfer->data_length),
  439. le32_to_cpu(data_transfer->remaining_data_length));
  440. /* Send a KEEP_ALIVE response right away if requested */
  441. info->keep_alive_requested = KEEP_ALIVE_NONE;
  442. if (le16_to_cpu(data_transfer->flags) &
  443. SMB_DIRECT_RESPONSE_REQUESTED) {
  444. info->keep_alive_requested = KEEP_ALIVE_PENDING;
  445. }
  446. return;
  447. default:
  448. log_rdma_recv(ERR,
  449. "unexpected response type=%d\n", response->type);
  450. }
  451. error:
  452. put_receive_buffer(info, response);
  453. }
  454. static struct rdma_cm_id *smbd_create_id(
  455. struct smbd_connection *info,
  456. struct sockaddr *dstaddr, int port)
  457. {
  458. struct rdma_cm_id *id;
  459. int rc;
  460. __be16 *sport;
  461. id = rdma_create_id(&init_net, smbd_conn_upcall, info,
  462. RDMA_PS_TCP, IB_QPT_RC);
  463. if (IS_ERR(id)) {
  464. rc = PTR_ERR(id);
  465. log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
  466. return id;
  467. }
  468. if (dstaddr->sa_family == AF_INET6)
  469. sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
  470. else
  471. sport = &((struct sockaddr_in *)dstaddr)->sin_port;
  472. *sport = htons(port);
  473. init_completion(&info->ri_done);
  474. info->ri_rc = -ETIMEDOUT;
  475. rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
  476. RDMA_RESOLVE_TIMEOUT);
  477. if (rc) {
  478. log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
  479. goto out;
  480. }
  481. wait_for_completion_interruptible_timeout(
  482. &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
  483. rc = info->ri_rc;
  484. if (rc) {
  485. log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
  486. goto out;
  487. }
  488. info->ri_rc = -ETIMEDOUT;
  489. rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
  490. if (rc) {
  491. log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
  492. goto out;
  493. }
  494. wait_for_completion_interruptible_timeout(
  495. &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
  496. rc = info->ri_rc;
  497. if (rc) {
  498. log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
  499. goto out;
  500. }
  501. return id;
  502. out:
  503. rdma_destroy_id(id);
  504. return ERR_PTR(rc);
  505. }
  506. /*
  507. * Test if FRWR (Fast Registration Work Requests) is supported on the device
  508. * This implementation requries FRWR on RDMA read/write
  509. * return value: true if it is supported
  510. */
  511. static bool frwr_is_supported(struct ib_device_attr *attrs)
  512. {
  513. if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
  514. return false;
  515. if (attrs->max_fast_reg_page_list_len == 0)
  516. return false;
  517. return true;
  518. }
  519. static int smbd_ia_open(
  520. struct smbd_connection *info,
  521. struct sockaddr *dstaddr, int port)
  522. {
  523. int rc;
  524. info->id = smbd_create_id(info, dstaddr, port);
  525. if (IS_ERR(info->id)) {
  526. rc = PTR_ERR(info->id);
  527. goto out1;
  528. }
  529. if (!frwr_is_supported(&info->id->device->attrs)) {
  530. log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
  531. log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
  532. info->id->device->attrs.device_cap_flags,
  533. info->id->device->attrs.max_fast_reg_page_list_len);
  534. rc = -EPROTONOSUPPORT;
  535. goto out2;
  536. }
  537. info->max_frmr_depth = min_t(int,
  538. smbd_max_frmr_depth,
  539. info->id->device->attrs.max_fast_reg_page_list_len);
  540. info->mr_type = IB_MR_TYPE_MEM_REG;
  541. if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
  542. info->mr_type = IB_MR_TYPE_SG_GAPS;
  543. info->pd = ib_alloc_pd(info->id->device, 0);
  544. if (IS_ERR(info->pd)) {
  545. rc = PTR_ERR(info->pd);
  546. log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
  547. goto out2;
  548. }
  549. return 0;
  550. out2:
  551. rdma_destroy_id(info->id);
  552. info->id = NULL;
  553. out1:
  554. return rc;
  555. }
  556. /*
  557. * Send a negotiation request message to the peer
  558. * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
  559. * After negotiation, the transport is connected and ready for
  560. * carrying upper layer SMB payload
  561. */
  562. static int smbd_post_send_negotiate_req(struct smbd_connection *info)
  563. {
  564. struct ib_send_wr send_wr;
  565. int rc = -ENOMEM;
  566. struct smbd_request *request;
  567. struct smbd_negotiate_req *packet;
  568. request = mempool_alloc(info->request_mempool, GFP_KERNEL);
  569. if (!request)
  570. return rc;
  571. request->info = info;
  572. packet = smbd_request_payload(request);
  573. packet->min_version = cpu_to_le16(SMBD_V1);
  574. packet->max_version = cpu_to_le16(SMBD_V1);
  575. packet->reserved = 0;
  576. packet->credits_requested = cpu_to_le16(info->send_credit_target);
  577. packet->preferred_send_size = cpu_to_le32(info->max_send_size);
  578. packet->max_receive_size = cpu_to_le32(info->max_receive_size);
  579. packet->max_fragmented_size =
  580. cpu_to_le32(info->max_fragmented_recv_size);
  581. request->num_sge = 1;
  582. request->sge[0].addr = ib_dma_map_single(
  583. info->id->device, (void *)packet,
  584. sizeof(*packet), DMA_TO_DEVICE);
  585. if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
  586. rc = -EIO;
  587. goto dma_mapping_failed;
  588. }
  589. request->sge[0].length = sizeof(*packet);
  590. request->sge[0].lkey = info->pd->local_dma_lkey;
  591. ib_dma_sync_single_for_device(
  592. info->id->device, request->sge[0].addr,
  593. request->sge[0].length, DMA_TO_DEVICE);
  594. request->cqe.done = send_done;
  595. send_wr.next = NULL;
  596. send_wr.wr_cqe = &request->cqe;
  597. send_wr.sg_list = request->sge;
  598. send_wr.num_sge = request->num_sge;
  599. send_wr.opcode = IB_WR_SEND;
  600. send_wr.send_flags = IB_SEND_SIGNALED;
  601. log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n",
  602. request->sge[0].addr,
  603. request->sge[0].length, request->sge[0].lkey);
  604. atomic_inc(&info->send_pending);
  605. rc = ib_post_send(info->id->qp, &send_wr, NULL);
  606. if (!rc)
  607. return 0;
  608. /* if we reach here, post send failed */
  609. log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
  610. atomic_dec(&info->send_pending);
  611. ib_dma_unmap_single(info->id->device, request->sge[0].addr,
  612. request->sge[0].length, DMA_TO_DEVICE);
  613. smbd_disconnect_rdma_connection(info);
  614. dma_mapping_failed:
  615. mempool_free(request, info->request_mempool);
  616. return rc;
  617. }
  618. /*
  619. * Extend the credits to remote peer
  620. * This implements [MS-SMBD] 3.1.5.9
  621. * The idea is that we should extend credits to remote peer as quickly as
  622. * it's allowed, to maintain data flow. We allocate as much receive
  623. * buffer as possible, and extend the receive credits to remote peer
  624. * return value: the new credtis being granted.
  625. */
  626. static int manage_credits_prior_sending(struct smbd_connection *info)
  627. {
  628. int new_credits;
  629. spin_lock(&info->lock_new_credits_offered);
  630. new_credits = info->new_credits_offered;
  631. info->new_credits_offered = 0;
  632. spin_unlock(&info->lock_new_credits_offered);
  633. return new_credits;
  634. }
  635. /*
  636. * Check if we need to send a KEEP_ALIVE message
  637. * The idle connection timer triggers a KEEP_ALIVE message when expires
  638. * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
  639. * back a response.
  640. * return value:
  641. * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
  642. * 0: otherwise
  643. */
  644. static int manage_keep_alive_before_sending(struct smbd_connection *info)
  645. {
  646. if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
  647. info->keep_alive_requested = KEEP_ALIVE_SENT;
  648. return 1;
  649. }
  650. return 0;
  651. }
  652. /* Post the send request */
  653. static int smbd_post_send(struct smbd_connection *info,
  654. struct smbd_request *request)
  655. {
  656. struct ib_send_wr send_wr;
  657. int rc, i;
  658. for (i = 0; i < request->num_sge; i++) {
  659. log_rdma_send(INFO,
  660. "rdma_request sge[%d] addr=%llu length=%u\n",
  661. i, request->sge[i].addr, request->sge[i].length);
  662. ib_dma_sync_single_for_device(
  663. info->id->device,
  664. request->sge[i].addr,
  665. request->sge[i].length,
  666. DMA_TO_DEVICE);
  667. }
  668. request->cqe.done = send_done;
  669. send_wr.next = NULL;
  670. send_wr.wr_cqe = &request->cqe;
  671. send_wr.sg_list = request->sge;
  672. send_wr.num_sge = request->num_sge;
  673. send_wr.opcode = IB_WR_SEND;
  674. send_wr.send_flags = IB_SEND_SIGNALED;
  675. rc = ib_post_send(info->id->qp, &send_wr, NULL);
  676. if (rc) {
  677. log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
  678. smbd_disconnect_rdma_connection(info);
  679. rc = -EAGAIN;
  680. } else
  681. /* Reset timer for idle connection after packet is sent */
  682. mod_delayed_work(info->workqueue, &info->idle_timer_work,
  683. info->keep_alive_interval*HZ);
  684. return rc;
  685. }
  686. static int smbd_post_send_sgl(struct smbd_connection *info,
  687. struct scatterlist *sgl, int data_length, int remaining_data_length)
  688. {
  689. int num_sgs;
  690. int i, rc;
  691. int header_length;
  692. struct smbd_request *request;
  693. struct smbd_data_transfer *packet;
  694. int new_credits;
  695. struct scatterlist *sg;
  696. wait_credit:
  697. /* Wait for send credits. A SMBD packet needs one credit */
  698. rc = wait_event_interruptible(info->wait_send_queue,
  699. atomic_read(&info->send_credits) > 0 ||
  700. info->transport_status != SMBD_CONNECTED);
  701. if (rc)
  702. goto err_wait_credit;
  703. if (info->transport_status != SMBD_CONNECTED) {
  704. log_outgoing(ERR, "disconnected not sending on wait_credit\n");
  705. rc = -EAGAIN;
  706. goto err_wait_credit;
  707. }
  708. if (unlikely(atomic_dec_return(&info->send_credits) < 0)) {
  709. atomic_inc(&info->send_credits);
  710. goto wait_credit;
  711. }
  712. wait_send_queue:
  713. wait_event(info->wait_post_send,
  714. atomic_read(&info->send_pending) < info->send_credit_target ||
  715. info->transport_status != SMBD_CONNECTED);
  716. if (info->transport_status != SMBD_CONNECTED) {
  717. log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
  718. rc = -EAGAIN;
  719. goto err_wait_send_queue;
  720. }
  721. if (unlikely(atomic_inc_return(&info->send_pending) >
  722. info->send_credit_target)) {
  723. atomic_dec(&info->send_pending);
  724. goto wait_send_queue;
  725. }
  726. request = mempool_alloc(info->request_mempool, GFP_KERNEL);
  727. if (!request) {
  728. rc = -ENOMEM;
  729. goto err_alloc;
  730. }
  731. request->info = info;
  732. /* Fill in the packet header */
  733. packet = smbd_request_payload(request);
  734. packet->credits_requested = cpu_to_le16(info->send_credit_target);
  735. new_credits = manage_credits_prior_sending(info);
  736. atomic_add(new_credits, &info->receive_credits);
  737. packet->credits_granted = cpu_to_le16(new_credits);
  738. info->send_immediate = false;
  739. packet->flags = 0;
  740. if (manage_keep_alive_before_sending(info))
  741. packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
  742. packet->reserved = 0;
  743. if (!data_length)
  744. packet->data_offset = 0;
  745. else
  746. packet->data_offset = cpu_to_le32(24);
  747. packet->data_length = cpu_to_le32(data_length);
  748. packet->remaining_data_length = cpu_to_le32(remaining_data_length);
  749. packet->padding = 0;
  750. log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
  751. le16_to_cpu(packet->credits_requested),
  752. le16_to_cpu(packet->credits_granted),
  753. le32_to_cpu(packet->data_offset),
  754. le32_to_cpu(packet->data_length),
  755. le32_to_cpu(packet->remaining_data_length));
  756. /* Map the packet to DMA */
  757. header_length = sizeof(struct smbd_data_transfer);
  758. /* If this is a packet without payload, don't send padding */
  759. if (!data_length)
  760. header_length = offsetof(struct smbd_data_transfer, padding);
  761. request->num_sge = 1;
  762. request->sge[0].addr = ib_dma_map_single(info->id->device,
  763. (void *)packet,
  764. header_length,
  765. DMA_TO_DEVICE);
  766. if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
  767. rc = -EIO;
  768. request->sge[0].addr = 0;
  769. goto err_dma;
  770. }
  771. request->sge[0].length = header_length;
  772. request->sge[0].lkey = info->pd->local_dma_lkey;
  773. /* Fill in the packet data payload */
  774. num_sgs = sgl ? sg_nents(sgl) : 0;
  775. for_each_sg(sgl, sg, num_sgs, i) {
  776. request->sge[i+1].addr =
  777. ib_dma_map_page(info->id->device, sg_page(sg),
  778. sg->offset, sg->length, DMA_TO_DEVICE);
  779. if (ib_dma_mapping_error(
  780. info->id->device, request->sge[i+1].addr)) {
  781. rc = -EIO;
  782. request->sge[i+1].addr = 0;
  783. goto err_dma;
  784. }
  785. request->sge[i+1].length = sg->length;
  786. request->sge[i+1].lkey = info->pd->local_dma_lkey;
  787. request->num_sge++;
  788. }
  789. rc = smbd_post_send(info, request);
  790. if (!rc)
  791. return 0;
  792. err_dma:
  793. for (i = 0; i < request->num_sge; i++)
  794. if (request->sge[i].addr)
  795. ib_dma_unmap_single(info->id->device,
  796. request->sge[i].addr,
  797. request->sge[i].length,
  798. DMA_TO_DEVICE);
  799. mempool_free(request, info->request_mempool);
  800. /* roll back receive credits and credits to be offered */
  801. spin_lock(&info->lock_new_credits_offered);
  802. info->new_credits_offered += new_credits;
  803. spin_unlock(&info->lock_new_credits_offered);
  804. atomic_sub(new_credits, &info->receive_credits);
  805. err_alloc:
  806. if (atomic_dec_and_test(&info->send_pending))
  807. wake_up(&info->wait_send_pending);
  808. err_wait_send_queue:
  809. /* roll back send credits and pending */
  810. atomic_inc(&info->send_credits);
  811. err_wait_credit:
  812. return rc;
  813. }
  814. /*
  815. * Send a page
  816. * page: the page to send
  817. * offset: offset in the page to send
  818. * size: length in the page to send
  819. * remaining_data_length: remaining data to send in this payload
  820. */
  821. static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
  822. unsigned long offset, size_t size, int remaining_data_length)
  823. {
  824. struct scatterlist sgl;
  825. sg_init_table(&sgl, 1);
  826. sg_set_page(&sgl, page, size, offset);
  827. return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
  828. }
  829. /*
  830. * Send an empty message
  831. * Empty message is used to extend credits to peer to for keep live
  832. * while there is no upper layer payload to send at the time
  833. */
  834. static int smbd_post_send_empty(struct smbd_connection *info)
  835. {
  836. info->count_send_empty++;
  837. return smbd_post_send_sgl(info, NULL, 0, 0);
  838. }
  839. /*
  840. * Send a data buffer
  841. * iov: the iov array describing the data buffers
  842. * n_vec: number of iov array
  843. * remaining_data_length: remaining data to send following this packet
  844. * in segmented SMBD packet
  845. */
  846. static int smbd_post_send_data(
  847. struct smbd_connection *info, struct kvec *iov, int n_vec,
  848. int remaining_data_length)
  849. {
  850. int i;
  851. u32 data_length = 0;
  852. struct scatterlist sgl[SMBDIRECT_MAX_SGE];
  853. if (n_vec > SMBDIRECT_MAX_SGE) {
  854. cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
  855. return -EINVAL;
  856. }
  857. sg_init_table(sgl, n_vec);
  858. for (i = 0; i < n_vec; i++) {
  859. data_length += iov[i].iov_len;
  860. sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
  861. }
  862. return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
  863. }
  864. /*
  865. * Post a receive request to the transport
  866. * The remote peer can only send data when a receive request is posted
  867. * The interaction is controlled by send/receive credit system
  868. */
  869. static int smbd_post_recv(
  870. struct smbd_connection *info, struct smbd_response *response)
  871. {
  872. struct ib_recv_wr recv_wr;
  873. int rc = -EIO;
  874. response->sge.addr = ib_dma_map_single(
  875. info->id->device, response->packet,
  876. info->max_receive_size, DMA_FROM_DEVICE);
  877. if (ib_dma_mapping_error(info->id->device, response->sge.addr))
  878. return rc;
  879. response->sge.length = info->max_receive_size;
  880. response->sge.lkey = info->pd->local_dma_lkey;
  881. response->cqe.done = recv_done;
  882. recv_wr.wr_cqe = &response->cqe;
  883. recv_wr.next = NULL;
  884. recv_wr.sg_list = &response->sge;
  885. recv_wr.num_sge = 1;
  886. rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
  887. if (rc) {
  888. ib_dma_unmap_single(info->id->device, response->sge.addr,
  889. response->sge.length, DMA_FROM_DEVICE);
  890. smbd_disconnect_rdma_connection(info);
  891. log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
  892. }
  893. return rc;
  894. }
  895. /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
  896. static int smbd_negotiate(struct smbd_connection *info)
  897. {
  898. int rc;
  899. struct smbd_response *response = get_receive_buffer(info);
  900. response->type = SMBD_NEGOTIATE_RESP;
  901. rc = smbd_post_recv(info, response);
  902. log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x iov.lkey=%x\n",
  903. rc, response->sge.addr,
  904. response->sge.length, response->sge.lkey);
  905. if (rc)
  906. return rc;
  907. init_completion(&info->negotiate_completion);
  908. info->negotiate_done = false;
  909. rc = smbd_post_send_negotiate_req(info);
  910. if (rc)
  911. return rc;
  912. rc = wait_for_completion_interruptible_timeout(
  913. &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
  914. log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
  915. if (info->negotiate_done)
  916. return 0;
  917. if (rc == 0)
  918. rc = -ETIMEDOUT;
  919. else if (rc == -ERESTARTSYS)
  920. rc = -EINTR;
  921. else
  922. rc = -ENOTCONN;
  923. return rc;
  924. }
  925. static void put_empty_packet(
  926. struct smbd_connection *info, struct smbd_response *response)
  927. {
  928. spin_lock(&info->empty_packet_queue_lock);
  929. list_add_tail(&response->list, &info->empty_packet_queue);
  930. info->count_empty_packet_queue++;
  931. spin_unlock(&info->empty_packet_queue_lock);
  932. queue_work(info->workqueue, &info->post_send_credits_work);
  933. }
  934. /*
  935. * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
  936. * This is a queue for reassembling upper layer payload and present to upper
  937. * layer. All the inncoming payload go to the reassembly queue, regardless of
  938. * if reassembly is required. The uuper layer code reads from the queue for all
  939. * incoming payloads.
  940. * Put a received packet to the reassembly queue
  941. * response: the packet received
  942. * data_length: the size of payload in this packet
  943. */
  944. static void enqueue_reassembly(
  945. struct smbd_connection *info,
  946. struct smbd_response *response,
  947. int data_length)
  948. {
  949. spin_lock(&info->reassembly_queue_lock);
  950. list_add_tail(&response->list, &info->reassembly_queue);
  951. info->reassembly_queue_length++;
  952. /*
  953. * Make sure reassembly_data_length is updated after list and
  954. * reassembly_queue_length are updated. On the dequeue side
  955. * reassembly_data_length is checked without a lock to determine
  956. * if reassembly_queue_length and list is up to date
  957. */
  958. virt_wmb();
  959. info->reassembly_data_length += data_length;
  960. spin_unlock(&info->reassembly_queue_lock);
  961. info->count_reassembly_queue++;
  962. info->count_enqueue_reassembly_queue++;
  963. }
  964. /*
  965. * Get the first entry at the front of reassembly queue
  966. * Caller is responsible for locking
  967. * return value: the first entry if any, NULL if queue is empty
  968. */
  969. static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
  970. {
  971. struct smbd_response *ret = NULL;
  972. if (!list_empty(&info->reassembly_queue)) {
  973. ret = list_first_entry(
  974. &info->reassembly_queue,
  975. struct smbd_response, list);
  976. }
  977. return ret;
  978. }
  979. static struct smbd_response *get_empty_queue_buffer(
  980. struct smbd_connection *info)
  981. {
  982. struct smbd_response *ret = NULL;
  983. unsigned long flags;
  984. spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
  985. if (!list_empty(&info->empty_packet_queue)) {
  986. ret = list_first_entry(
  987. &info->empty_packet_queue,
  988. struct smbd_response, list);
  989. list_del(&ret->list);
  990. info->count_empty_packet_queue--;
  991. }
  992. spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
  993. return ret;
  994. }
  995. /*
  996. * Get a receive buffer
  997. * For each remote send, we need to post a receive. The receive buffers are
  998. * pre-allocated in advance.
  999. * return value: the receive buffer, NULL if none is available
  1000. */
  1001. static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
  1002. {
  1003. struct smbd_response *ret = NULL;
  1004. unsigned long flags;
  1005. spin_lock_irqsave(&info->receive_queue_lock, flags);
  1006. if (!list_empty(&info->receive_queue)) {
  1007. ret = list_first_entry(
  1008. &info->receive_queue,
  1009. struct smbd_response, list);
  1010. list_del(&ret->list);
  1011. info->count_receive_queue--;
  1012. info->count_get_receive_buffer++;
  1013. }
  1014. spin_unlock_irqrestore(&info->receive_queue_lock, flags);
  1015. return ret;
  1016. }
  1017. /*
  1018. * Return a receive buffer
  1019. * Upon returning of a receive buffer, we can post new receive and extend
  1020. * more receive credits to remote peer. This is done immediately after a
  1021. * receive buffer is returned.
  1022. */
  1023. static void put_receive_buffer(
  1024. struct smbd_connection *info, struct smbd_response *response)
  1025. {
  1026. unsigned long flags;
  1027. ib_dma_unmap_single(info->id->device, response->sge.addr,
  1028. response->sge.length, DMA_FROM_DEVICE);
  1029. spin_lock_irqsave(&info->receive_queue_lock, flags);
  1030. list_add_tail(&response->list, &info->receive_queue);
  1031. info->count_receive_queue++;
  1032. info->count_put_receive_buffer++;
  1033. spin_unlock_irqrestore(&info->receive_queue_lock, flags);
  1034. queue_work(info->workqueue, &info->post_send_credits_work);
  1035. }
  1036. /* Preallocate all receive buffer on transport establishment */
  1037. static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
  1038. {
  1039. int i;
  1040. struct smbd_response *response;
  1041. INIT_LIST_HEAD(&info->reassembly_queue);
  1042. spin_lock_init(&info->reassembly_queue_lock);
  1043. info->reassembly_data_length = 0;
  1044. info->reassembly_queue_length = 0;
  1045. INIT_LIST_HEAD(&info->receive_queue);
  1046. spin_lock_init(&info->receive_queue_lock);
  1047. info->count_receive_queue = 0;
  1048. INIT_LIST_HEAD(&info->empty_packet_queue);
  1049. spin_lock_init(&info->empty_packet_queue_lock);
  1050. info->count_empty_packet_queue = 0;
  1051. init_waitqueue_head(&info->wait_receive_queues);
  1052. for (i = 0; i < num_buf; i++) {
  1053. response = mempool_alloc(info->response_mempool, GFP_KERNEL);
  1054. if (!response)
  1055. goto allocate_failed;
  1056. response->info = info;
  1057. list_add_tail(&response->list, &info->receive_queue);
  1058. info->count_receive_queue++;
  1059. }
  1060. return 0;
  1061. allocate_failed:
  1062. while (!list_empty(&info->receive_queue)) {
  1063. response = list_first_entry(
  1064. &info->receive_queue,
  1065. struct smbd_response, list);
  1066. list_del(&response->list);
  1067. info->count_receive_queue--;
  1068. mempool_free(response, info->response_mempool);
  1069. }
  1070. return -ENOMEM;
  1071. }
  1072. static void destroy_receive_buffers(struct smbd_connection *info)
  1073. {
  1074. struct smbd_response *response;
  1075. while ((response = get_receive_buffer(info)))
  1076. mempool_free(response, info->response_mempool);
  1077. while ((response = get_empty_queue_buffer(info)))
  1078. mempool_free(response, info->response_mempool);
  1079. }
  1080. /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
  1081. static void idle_connection_timer(struct work_struct *work)
  1082. {
  1083. struct smbd_connection *info = container_of(
  1084. work, struct smbd_connection,
  1085. idle_timer_work.work);
  1086. if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
  1087. log_keep_alive(ERR,
  1088. "error status info->keep_alive_requested=%d\n",
  1089. info->keep_alive_requested);
  1090. smbd_disconnect_rdma_connection(info);
  1091. return;
  1092. }
  1093. log_keep_alive(INFO, "about to send an empty idle message\n");
  1094. smbd_post_send_empty(info);
  1095. /* Setup the next idle timeout work */
  1096. queue_delayed_work(info->workqueue, &info->idle_timer_work,
  1097. info->keep_alive_interval*HZ);
  1098. }
  1099. /*
  1100. * Destroy the transport and related RDMA and memory resources
  1101. * Need to go through all the pending counters and make sure on one is using
  1102. * the transport while it is destroyed
  1103. */
  1104. void smbd_destroy(struct TCP_Server_Info *server)
  1105. {
  1106. struct smbd_connection *info = server->smbd_conn;
  1107. struct smbd_response *response;
  1108. unsigned long flags;
  1109. if (!info) {
  1110. log_rdma_event(INFO, "rdma session already destroyed\n");
  1111. return;
  1112. }
  1113. log_rdma_event(INFO, "destroying rdma session\n");
  1114. if (info->transport_status != SMBD_DISCONNECTED) {
  1115. rdma_disconnect(server->smbd_conn->id);
  1116. log_rdma_event(INFO, "wait for transport being disconnected\n");
  1117. wait_event_interruptible(
  1118. info->disconn_wait,
  1119. info->transport_status == SMBD_DISCONNECTED);
  1120. }
  1121. log_rdma_event(INFO, "destroying qp\n");
  1122. ib_drain_qp(info->id->qp);
  1123. rdma_destroy_qp(info->id);
  1124. log_rdma_event(INFO, "cancelling idle timer\n");
  1125. cancel_delayed_work_sync(&info->idle_timer_work);
  1126. log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
  1127. wait_event(info->wait_send_pending,
  1128. atomic_read(&info->send_pending) == 0);
  1129. /* It's not posssible for upper layer to get to reassembly */
  1130. log_rdma_event(INFO, "drain the reassembly queue\n");
  1131. do {
  1132. spin_lock_irqsave(&info->reassembly_queue_lock, flags);
  1133. response = _get_first_reassembly(info);
  1134. if (response) {
  1135. list_del(&response->list);
  1136. spin_unlock_irqrestore(
  1137. &info->reassembly_queue_lock, flags);
  1138. put_receive_buffer(info, response);
  1139. } else
  1140. spin_unlock_irqrestore(
  1141. &info->reassembly_queue_lock, flags);
  1142. } while (response);
  1143. info->reassembly_data_length = 0;
  1144. log_rdma_event(INFO, "free receive buffers\n");
  1145. wait_event(info->wait_receive_queues,
  1146. info->count_receive_queue + info->count_empty_packet_queue
  1147. == info->receive_credit_max);
  1148. destroy_receive_buffers(info);
  1149. /*
  1150. * For performance reasons, memory registration and deregistration
  1151. * are not locked by srv_mutex. It is possible some processes are
  1152. * blocked on transport srv_mutex while holding memory registration.
  1153. * Release the transport srv_mutex to allow them to hit the failure
  1154. * path when sending data, and then release memory registartions.
  1155. */
  1156. log_rdma_event(INFO, "freeing mr list\n");
  1157. wake_up_interruptible_all(&info->wait_mr);
  1158. while (atomic_read(&info->mr_used_count)) {
  1159. mutex_unlock(&server->srv_mutex);
  1160. msleep(1000);
  1161. mutex_lock(&server->srv_mutex);
  1162. }
  1163. destroy_mr_list(info);
  1164. ib_free_cq(info->send_cq);
  1165. ib_free_cq(info->recv_cq);
  1166. ib_dealloc_pd(info->pd);
  1167. rdma_destroy_id(info->id);
  1168. /* free mempools */
  1169. mempool_destroy(info->request_mempool);
  1170. kmem_cache_destroy(info->request_cache);
  1171. mempool_destroy(info->response_mempool);
  1172. kmem_cache_destroy(info->response_cache);
  1173. info->transport_status = SMBD_DESTROYED;
  1174. destroy_workqueue(info->workqueue);
  1175. log_rdma_event(INFO, "rdma session destroyed\n");
  1176. kfree(info);
  1177. }
  1178. /*
  1179. * Reconnect this SMBD connection, called from upper layer
  1180. * return value: 0 on success, or actual error code
  1181. */
  1182. int smbd_reconnect(struct TCP_Server_Info *server)
  1183. {
  1184. log_rdma_event(INFO, "reconnecting rdma session\n");
  1185. if (!server->smbd_conn) {
  1186. log_rdma_event(INFO, "rdma session already destroyed\n");
  1187. goto create_conn;
  1188. }
  1189. /*
  1190. * This is possible if transport is disconnected and we haven't received
  1191. * notification from RDMA, but upper layer has detected timeout
  1192. */
  1193. if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
  1194. log_rdma_event(INFO, "disconnecting transport\n");
  1195. smbd_destroy(server);
  1196. }
  1197. create_conn:
  1198. log_rdma_event(INFO, "creating rdma session\n");
  1199. server->smbd_conn = smbd_get_connection(
  1200. server, (struct sockaddr *) &server->dstaddr);
  1201. if (server->smbd_conn)
  1202. cifs_dbg(VFS, "RDMA transport re-established\n");
  1203. return server->smbd_conn ? 0 : -ENOENT;
  1204. }
  1205. static void destroy_caches_and_workqueue(struct smbd_connection *info)
  1206. {
  1207. destroy_receive_buffers(info);
  1208. destroy_workqueue(info->workqueue);
  1209. mempool_destroy(info->response_mempool);
  1210. kmem_cache_destroy(info->response_cache);
  1211. mempool_destroy(info->request_mempool);
  1212. kmem_cache_destroy(info->request_cache);
  1213. }
  1214. #define MAX_NAME_LEN 80
  1215. static int allocate_caches_and_workqueue(struct smbd_connection *info)
  1216. {
  1217. char name[MAX_NAME_LEN];
  1218. int rc;
  1219. scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
  1220. info->request_cache =
  1221. kmem_cache_create(
  1222. name,
  1223. sizeof(struct smbd_request) +
  1224. sizeof(struct smbd_data_transfer),
  1225. 0, SLAB_HWCACHE_ALIGN, NULL);
  1226. if (!info->request_cache)
  1227. return -ENOMEM;
  1228. info->request_mempool =
  1229. mempool_create(info->send_credit_target, mempool_alloc_slab,
  1230. mempool_free_slab, info->request_cache);
  1231. if (!info->request_mempool)
  1232. goto out1;
  1233. scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
  1234. info->response_cache =
  1235. kmem_cache_create(
  1236. name,
  1237. sizeof(struct smbd_response) +
  1238. info->max_receive_size,
  1239. 0, SLAB_HWCACHE_ALIGN, NULL);
  1240. if (!info->response_cache)
  1241. goto out2;
  1242. info->response_mempool =
  1243. mempool_create(info->receive_credit_max, mempool_alloc_slab,
  1244. mempool_free_slab, info->response_cache);
  1245. if (!info->response_mempool)
  1246. goto out3;
  1247. scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
  1248. info->workqueue = create_workqueue(name);
  1249. if (!info->workqueue)
  1250. goto out4;
  1251. rc = allocate_receive_buffers(info, info->receive_credit_max);
  1252. if (rc) {
  1253. log_rdma_event(ERR, "failed to allocate receive buffers\n");
  1254. goto out5;
  1255. }
  1256. return 0;
  1257. out5:
  1258. destroy_workqueue(info->workqueue);
  1259. out4:
  1260. mempool_destroy(info->response_mempool);
  1261. out3:
  1262. kmem_cache_destroy(info->response_cache);
  1263. out2:
  1264. mempool_destroy(info->request_mempool);
  1265. out1:
  1266. kmem_cache_destroy(info->request_cache);
  1267. return -ENOMEM;
  1268. }
  1269. /* Create a SMBD connection, called by upper layer */
  1270. static struct smbd_connection *_smbd_get_connection(
  1271. struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
  1272. {
  1273. int rc;
  1274. struct smbd_connection *info;
  1275. struct rdma_conn_param conn_param;
  1276. struct ib_qp_init_attr qp_attr;
  1277. struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
  1278. struct ib_port_immutable port_immutable;
  1279. u32 ird_ord_hdr[2];
  1280. info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
  1281. if (!info)
  1282. return NULL;
  1283. info->transport_status = SMBD_CONNECTING;
  1284. rc = smbd_ia_open(info, dstaddr, port);
  1285. if (rc) {
  1286. log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
  1287. goto create_id_failed;
  1288. }
  1289. if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
  1290. smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
  1291. log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
  1292. smbd_send_credit_target,
  1293. info->id->device->attrs.max_cqe,
  1294. info->id->device->attrs.max_qp_wr);
  1295. goto config_failed;
  1296. }
  1297. if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
  1298. smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
  1299. log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
  1300. smbd_receive_credit_max,
  1301. info->id->device->attrs.max_cqe,
  1302. info->id->device->attrs.max_qp_wr);
  1303. goto config_failed;
  1304. }
  1305. info->receive_credit_max = smbd_receive_credit_max;
  1306. info->send_credit_target = smbd_send_credit_target;
  1307. info->max_send_size = smbd_max_send_size;
  1308. info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
  1309. info->max_receive_size = smbd_max_receive_size;
  1310. info->keep_alive_interval = smbd_keep_alive_interval;
  1311. if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) {
  1312. log_rdma_event(ERR,
  1313. "warning: device max_send_sge = %d too small\n",
  1314. info->id->device->attrs.max_send_sge);
  1315. log_rdma_event(ERR, "Queue Pair creation may fail\n");
  1316. }
  1317. if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) {
  1318. log_rdma_event(ERR,
  1319. "warning: device max_recv_sge = %d too small\n",
  1320. info->id->device->attrs.max_recv_sge);
  1321. log_rdma_event(ERR, "Queue Pair creation may fail\n");
  1322. }
  1323. info->send_cq = NULL;
  1324. info->recv_cq = NULL;
  1325. info->send_cq =
  1326. ib_alloc_cq_any(info->id->device, info,
  1327. info->send_credit_target, IB_POLL_SOFTIRQ);
  1328. if (IS_ERR(info->send_cq)) {
  1329. info->send_cq = NULL;
  1330. goto alloc_cq_failed;
  1331. }
  1332. info->recv_cq =
  1333. ib_alloc_cq_any(info->id->device, info,
  1334. info->receive_credit_max, IB_POLL_SOFTIRQ);
  1335. if (IS_ERR(info->recv_cq)) {
  1336. info->recv_cq = NULL;
  1337. goto alloc_cq_failed;
  1338. }
  1339. memset(&qp_attr, 0, sizeof(qp_attr));
  1340. qp_attr.event_handler = smbd_qp_async_error_upcall;
  1341. qp_attr.qp_context = info;
  1342. qp_attr.cap.max_send_wr = info->send_credit_target;
  1343. qp_attr.cap.max_recv_wr = info->receive_credit_max;
  1344. qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE;
  1345. qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE;
  1346. qp_attr.cap.max_inline_data = 0;
  1347. qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
  1348. qp_attr.qp_type = IB_QPT_RC;
  1349. qp_attr.send_cq = info->send_cq;
  1350. qp_attr.recv_cq = info->recv_cq;
  1351. qp_attr.port_num = ~0;
  1352. rc = rdma_create_qp(info->id, info->pd, &qp_attr);
  1353. if (rc) {
  1354. log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
  1355. goto create_qp_failed;
  1356. }
  1357. memset(&conn_param, 0, sizeof(conn_param));
  1358. conn_param.initiator_depth = 0;
  1359. conn_param.responder_resources =
  1360. info->id->device->attrs.max_qp_rd_atom
  1361. < SMBD_CM_RESPONDER_RESOURCES ?
  1362. info->id->device->attrs.max_qp_rd_atom :
  1363. SMBD_CM_RESPONDER_RESOURCES;
  1364. info->responder_resources = conn_param.responder_resources;
  1365. log_rdma_mr(INFO, "responder_resources=%d\n",
  1366. info->responder_resources);
  1367. /* Need to send IRD/ORD in private data for iWARP */
  1368. info->id->device->ops.get_port_immutable(
  1369. info->id->device, info->id->port_num, &port_immutable);
  1370. if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
  1371. ird_ord_hdr[0] = info->responder_resources;
  1372. ird_ord_hdr[1] = 1;
  1373. conn_param.private_data = ird_ord_hdr;
  1374. conn_param.private_data_len = sizeof(ird_ord_hdr);
  1375. } else {
  1376. conn_param.private_data = NULL;
  1377. conn_param.private_data_len = 0;
  1378. }
  1379. conn_param.retry_count = SMBD_CM_RETRY;
  1380. conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
  1381. conn_param.flow_control = 0;
  1382. log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
  1383. &addr_in->sin_addr, port);
  1384. init_waitqueue_head(&info->conn_wait);
  1385. init_waitqueue_head(&info->disconn_wait);
  1386. init_waitqueue_head(&info->wait_reassembly_queue);
  1387. rc = rdma_connect(info->id, &conn_param);
  1388. if (rc) {
  1389. log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
  1390. goto rdma_connect_failed;
  1391. }
  1392. wait_event_interruptible(
  1393. info->conn_wait, info->transport_status != SMBD_CONNECTING);
  1394. if (info->transport_status != SMBD_CONNECTED) {
  1395. log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
  1396. goto rdma_connect_failed;
  1397. }
  1398. log_rdma_event(INFO, "rdma_connect connected\n");
  1399. rc = allocate_caches_and_workqueue(info);
  1400. if (rc) {
  1401. log_rdma_event(ERR, "cache allocation failed\n");
  1402. goto allocate_cache_failed;
  1403. }
  1404. init_waitqueue_head(&info->wait_send_queue);
  1405. INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
  1406. queue_delayed_work(info->workqueue, &info->idle_timer_work,
  1407. info->keep_alive_interval*HZ);
  1408. init_waitqueue_head(&info->wait_send_pending);
  1409. atomic_set(&info->send_pending, 0);
  1410. init_waitqueue_head(&info->wait_post_send);
  1411. INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
  1412. INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
  1413. info->new_credits_offered = 0;
  1414. spin_lock_init(&info->lock_new_credits_offered);
  1415. rc = smbd_negotiate(info);
  1416. if (rc) {
  1417. log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
  1418. goto negotiation_failed;
  1419. }
  1420. rc = allocate_mr_list(info);
  1421. if (rc) {
  1422. log_rdma_mr(ERR, "memory registration allocation failed\n");
  1423. goto allocate_mr_failed;
  1424. }
  1425. return info;
  1426. allocate_mr_failed:
  1427. /* At this point, need to a full transport shutdown */
  1428. smbd_destroy(server);
  1429. return NULL;
  1430. negotiation_failed:
  1431. cancel_delayed_work_sync(&info->idle_timer_work);
  1432. destroy_caches_and_workqueue(info);
  1433. info->transport_status = SMBD_NEGOTIATE_FAILED;
  1434. init_waitqueue_head(&info->conn_wait);
  1435. rdma_disconnect(info->id);
  1436. wait_event(info->conn_wait,
  1437. info->transport_status == SMBD_DISCONNECTED);
  1438. allocate_cache_failed:
  1439. rdma_connect_failed:
  1440. rdma_destroy_qp(info->id);
  1441. create_qp_failed:
  1442. alloc_cq_failed:
  1443. if (info->send_cq)
  1444. ib_free_cq(info->send_cq);
  1445. if (info->recv_cq)
  1446. ib_free_cq(info->recv_cq);
  1447. config_failed:
  1448. ib_dealloc_pd(info->pd);
  1449. rdma_destroy_id(info->id);
  1450. create_id_failed:
  1451. kfree(info);
  1452. return NULL;
  1453. }
  1454. struct smbd_connection *smbd_get_connection(
  1455. struct TCP_Server_Info *server, struct sockaddr *dstaddr)
  1456. {
  1457. struct smbd_connection *ret;
  1458. int port = SMBD_PORT;
  1459. try_again:
  1460. ret = _smbd_get_connection(server, dstaddr, port);
  1461. /* Try SMB_PORT if SMBD_PORT doesn't work */
  1462. if (!ret && port == SMBD_PORT) {
  1463. port = SMB_PORT;
  1464. goto try_again;
  1465. }
  1466. return ret;
  1467. }
  1468. /*
  1469. * Receive data from receive reassembly queue
  1470. * All the incoming data packets are placed in reassembly queue
  1471. * buf: the buffer to read data into
  1472. * size: the length of data to read
  1473. * return value: actual data read
  1474. * Note: this implementation copies the data from reassebmly queue to receive
  1475. * buffers used by upper layer. This is not the optimal code path. A better way
  1476. * to do it is to not have upper layer allocate its receive buffers but rather
  1477. * borrow the buffer from reassembly queue, and return it after data is
  1478. * consumed. But this will require more changes to upper layer code, and also
  1479. * need to consider packet boundaries while they still being reassembled.
  1480. */
  1481. static int smbd_recv_buf(struct smbd_connection *info, char *buf,
  1482. unsigned int size)
  1483. {
  1484. struct smbd_response *response;
  1485. struct smbd_data_transfer *data_transfer;
  1486. int to_copy, to_read, data_read, offset;
  1487. u32 data_length, remaining_data_length, data_offset;
  1488. int rc;
  1489. again:
  1490. /*
  1491. * No need to hold the reassembly queue lock all the time as we are
  1492. * the only one reading from the front of the queue. The transport
  1493. * may add more entries to the back of the queue at the same time
  1494. */
  1495. log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
  1496. info->reassembly_data_length);
  1497. if (info->reassembly_data_length >= size) {
  1498. int queue_length;
  1499. int queue_removed = 0;
  1500. /*
  1501. * Need to make sure reassembly_data_length is read before
  1502. * reading reassembly_queue_length and calling
  1503. * _get_first_reassembly. This call is lock free
  1504. * as we never read at the end of the queue which are being
  1505. * updated in SOFTIRQ as more data is received
  1506. */
  1507. virt_rmb();
  1508. queue_length = info->reassembly_queue_length;
  1509. data_read = 0;
  1510. to_read = size;
  1511. offset = info->first_entry_offset;
  1512. while (data_read < size) {
  1513. response = _get_first_reassembly(info);
  1514. data_transfer = smbd_response_payload(response);
  1515. data_length = le32_to_cpu(data_transfer->data_length);
  1516. remaining_data_length =
  1517. le32_to_cpu(
  1518. data_transfer->remaining_data_length);
  1519. data_offset = le32_to_cpu(data_transfer->data_offset);
  1520. /*
  1521. * The upper layer expects RFC1002 length at the
  1522. * beginning of the payload. Return it to indicate
  1523. * the total length of the packet. This minimize the
  1524. * change to upper layer packet processing logic. This
  1525. * will be eventually remove when an intermediate
  1526. * transport layer is added
  1527. */
  1528. if (response->first_segment && size == 4) {
  1529. unsigned int rfc1002_len =
  1530. data_length + remaining_data_length;
  1531. *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
  1532. data_read = 4;
  1533. response->first_segment = false;
  1534. log_read(INFO, "returning rfc1002 length %d\n",
  1535. rfc1002_len);
  1536. goto read_rfc1002_done;
  1537. }
  1538. to_copy = min_t(int, data_length - offset, to_read);
  1539. memcpy(
  1540. buf + data_read,
  1541. (char *)data_transfer + data_offset + offset,
  1542. to_copy);
  1543. /* move on to the next buffer? */
  1544. if (to_copy == data_length - offset) {
  1545. queue_length--;
  1546. /*
  1547. * No need to lock if we are not at the
  1548. * end of the queue
  1549. */
  1550. if (queue_length)
  1551. list_del(&response->list);
  1552. else {
  1553. spin_lock_irq(
  1554. &info->reassembly_queue_lock);
  1555. list_del(&response->list);
  1556. spin_unlock_irq(
  1557. &info->reassembly_queue_lock);
  1558. }
  1559. queue_removed++;
  1560. info->count_reassembly_queue--;
  1561. info->count_dequeue_reassembly_queue++;
  1562. put_receive_buffer(info, response);
  1563. offset = 0;
  1564. log_read(INFO, "put_receive_buffer offset=0\n");
  1565. } else
  1566. offset += to_copy;
  1567. to_read -= to_copy;
  1568. data_read += to_copy;
  1569. log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n",
  1570. to_copy, data_length - offset,
  1571. to_read, data_read, offset);
  1572. }
  1573. spin_lock_irq(&info->reassembly_queue_lock);
  1574. info->reassembly_data_length -= data_read;
  1575. info->reassembly_queue_length -= queue_removed;
  1576. spin_unlock_irq(&info->reassembly_queue_lock);
  1577. info->first_entry_offset = offset;
  1578. log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
  1579. data_read, info->reassembly_data_length,
  1580. info->first_entry_offset);
  1581. read_rfc1002_done:
  1582. return data_read;
  1583. }
  1584. log_read(INFO, "wait_event on more data\n");
  1585. rc = wait_event_interruptible(
  1586. info->wait_reassembly_queue,
  1587. info->reassembly_data_length >= size ||
  1588. info->transport_status != SMBD_CONNECTED);
  1589. /* Don't return any data if interrupted */
  1590. if (rc)
  1591. return rc;
  1592. if (info->transport_status != SMBD_CONNECTED) {
  1593. log_read(ERR, "disconnected\n");
  1594. return -ECONNABORTED;
  1595. }
  1596. goto again;
  1597. }
  1598. /*
  1599. * Receive a page from receive reassembly queue
  1600. * page: the page to read data into
  1601. * to_read: the length of data to read
  1602. * return value: actual data read
  1603. */
  1604. static int smbd_recv_page(struct smbd_connection *info,
  1605. struct page *page, unsigned int page_offset,
  1606. unsigned int to_read)
  1607. {
  1608. int ret;
  1609. char *to_address;
  1610. void *page_address;
  1611. /* make sure we have the page ready for read */
  1612. ret = wait_event_interruptible(
  1613. info->wait_reassembly_queue,
  1614. info->reassembly_data_length >= to_read ||
  1615. info->transport_status != SMBD_CONNECTED);
  1616. if (ret)
  1617. return ret;
  1618. /* now we can read from reassembly queue and not sleep */
  1619. page_address = kmap_atomic(page);
  1620. to_address = (char *) page_address + page_offset;
  1621. log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
  1622. page, to_address, to_read);
  1623. ret = smbd_recv_buf(info, to_address, to_read);
  1624. kunmap_atomic(page_address);
  1625. return ret;
  1626. }
  1627. /*
  1628. * Receive data from transport
  1629. * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
  1630. * return: total bytes read, or 0. SMB Direct will not do partial read.
  1631. */
  1632. int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
  1633. {
  1634. char *buf;
  1635. struct page *page;
  1636. unsigned int to_read, page_offset;
  1637. int rc;
  1638. if (iov_iter_rw(&msg->msg_iter) == WRITE) {
  1639. /* It's a bug in upper layer to get there */
  1640. cifs_dbg(VFS, "Invalid msg iter dir %u\n",
  1641. iov_iter_rw(&msg->msg_iter));
  1642. rc = -EINVAL;
  1643. goto out;
  1644. }
  1645. switch (iov_iter_type(&msg->msg_iter)) {
  1646. case ITER_KVEC:
  1647. buf = msg->msg_iter.kvec->iov_base;
  1648. to_read = msg->msg_iter.kvec->iov_len;
  1649. rc = smbd_recv_buf(info, buf, to_read);
  1650. break;
  1651. case ITER_BVEC:
  1652. page = msg->msg_iter.bvec->bv_page;
  1653. page_offset = msg->msg_iter.bvec->bv_offset;
  1654. to_read = msg->msg_iter.bvec->bv_len;
  1655. rc = smbd_recv_page(info, page, page_offset, to_read);
  1656. break;
  1657. default:
  1658. /* It's a bug in upper layer to get there */
  1659. cifs_dbg(VFS, "Invalid msg type %d\n",
  1660. iov_iter_type(&msg->msg_iter));
  1661. rc = -EINVAL;
  1662. }
  1663. out:
  1664. /* SMBDirect will read it all or nothing */
  1665. if (rc > 0)
  1666. msg->msg_iter.count = 0;
  1667. return rc;
  1668. }
  1669. /*
  1670. * Send data to transport
  1671. * Each rqst is transported as a SMBDirect payload
  1672. * rqst: the data to write
  1673. * return value: 0 if successfully write, otherwise error code
  1674. */
  1675. int smbd_send(struct TCP_Server_Info *server,
  1676. int num_rqst, struct smb_rqst *rqst_array)
  1677. {
  1678. struct smbd_connection *info = server->smbd_conn;
  1679. struct kvec vec;
  1680. int nvecs;
  1681. int size;
  1682. unsigned int buflen, remaining_data_length;
  1683. int start, i, j;
  1684. int max_iov_size =
  1685. info->max_send_size - sizeof(struct smbd_data_transfer);
  1686. struct kvec *iov;
  1687. int rc;
  1688. struct smb_rqst *rqst;
  1689. int rqst_idx;
  1690. if (info->transport_status != SMBD_CONNECTED) {
  1691. rc = -EAGAIN;
  1692. goto done;
  1693. }
  1694. /*
  1695. * Add in the page array if there is one. The caller needs to set
  1696. * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
  1697. * ends at page boundary
  1698. */
  1699. remaining_data_length = 0;
  1700. for (i = 0; i < num_rqst; i++)
  1701. remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
  1702. if (remaining_data_length > info->max_fragmented_send_size) {
  1703. log_write(ERR, "payload size %d > max size %d\n",
  1704. remaining_data_length, info->max_fragmented_send_size);
  1705. rc = -EINVAL;
  1706. goto done;
  1707. }
  1708. log_write(INFO, "num_rqst=%d total length=%u\n",
  1709. num_rqst, remaining_data_length);
  1710. rqst_idx = 0;
  1711. next_rqst:
  1712. rqst = &rqst_array[rqst_idx];
  1713. iov = rqst->rq_iov;
  1714. cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
  1715. rqst_idx, smb_rqst_len(server, rqst));
  1716. for (i = 0; i < rqst->rq_nvec; i++)
  1717. dump_smb(iov[i].iov_base, iov[i].iov_len);
  1718. log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d rq_tailsz=%d buflen=%lu\n",
  1719. rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
  1720. rqst->rq_tailsz, smb_rqst_len(server, rqst));
  1721. start = i = 0;
  1722. buflen = 0;
  1723. while (true) {
  1724. buflen += iov[i].iov_len;
  1725. if (buflen > max_iov_size) {
  1726. if (i > start) {
  1727. remaining_data_length -=
  1728. (buflen-iov[i].iov_len);
  1729. log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
  1730. start, i, i - start,
  1731. remaining_data_length);
  1732. rc = smbd_post_send_data(
  1733. info, &iov[start], i-start,
  1734. remaining_data_length);
  1735. if (rc)
  1736. goto done;
  1737. } else {
  1738. /* iov[start] is too big, break it */
  1739. nvecs = (buflen+max_iov_size-1)/max_iov_size;
  1740. log_write(INFO, "iov[%d] iov_base=%p buflen=%d break to %d vectors\n",
  1741. start, iov[start].iov_base,
  1742. buflen, nvecs);
  1743. for (j = 0; j < nvecs; j++) {
  1744. vec.iov_base =
  1745. (char *)iov[start].iov_base +
  1746. j*max_iov_size;
  1747. vec.iov_len = max_iov_size;
  1748. if (j == nvecs-1)
  1749. vec.iov_len =
  1750. buflen -
  1751. max_iov_size*(nvecs-1);
  1752. remaining_data_length -= vec.iov_len;
  1753. log_write(INFO,
  1754. "sending vec j=%d iov_base=%p iov_len=%zu remaining_data_length=%d\n",
  1755. j, vec.iov_base, vec.iov_len,
  1756. remaining_data_length);
  1757. rc = smbd_post_send_data(
  1758. info, &vec, 1,
  1759. remaining_data_length);
  1760. if (rc)
  1761. goto done;
  1762. }
  1763. i++;
  1764. if (i == rqst->rq_nvec)
  1765. break;
  1766. }
  1767. start = i;
  1768. buflen = 0;
  1769. } else {
  1770. i++;
  1771. if (i == rqst->rq_nvec) {
  1772. /* send out all remaining vecs */
  1773. remaining_data_length -= buflen;
  1774. log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
  1775. start, i, i - start,
  1776. remaining_data_length);
  1777. rc = smbd_post_send_data(info, &iov[start],
  1778. i-start, remaining_data_length);
  1779. if (rc)
  1780. goto done;
  1781. break;
  1782. }
  1783. }
  1784. log_write(INFO, "looping i=%d buflen=%d\n", i, buflen);
  1785. }
  1786. /* now sending pages if there are any */
  1787. for (i = 0; i < rqst->rq_npages; i++) {
  1788. unsigned int offset;
  1789. rqst_page_get_length(rqst, i, &buflen, &offset);
  1790. nvecs = (buflen + max_iov_size - 1) / max_iov_size;
  1791. log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
  1792. buflen, nvecs);
  1793. for (j = 0; j < nvecs; j++) {
  1794. size = max_iov_size;
  1795. if (j == nvecs-1)
  1796. size = buflen - j*max_iov_size;
  1797. remaining_data_length -= size;
  1798. log_write(INFO, "sending pages i=%d offset=%d size=%d remaining_data_length=%d\n",
  1799. i, j * max_iov_size + offset, size,
  1800. remaining_data_length);
  1801. rc = smbd_post_send_page(
  1802. info, rqst->rq_pages[i],
  1803. j*max_iov_size + offset,
  1804. size, remaining_data_length);
  1805. if (rc)
  1806. goto done;
  1807. }
  1808. }
  1809. rqst_idx++;
  1810. if (rqst_idx < num_rqst)
  1811. goto next_rqst;
  1812. done:
  1813. /*
  1814. * As an optimization, we don't wait for individual I/O to finish
  1815. * before sending the next one.
  1816. * Send them all and wait for pending send count to get to 0
  1817. * that means all the I/Os have been out and we are good to return
  1818. */
  1819. wait_event(info->wait_send_pending,
  1820. atomic_read(&info->send_pending) == 0);
  1821. return rc;
  1822. }
  1823. static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
  1824. {
  1825. struct smbd_mr *mr;
  1826. struct ib_cqe *cqe;
  1827. if (wc->status) {
  1828. log_rdma_mr(ERR, "status=%d\n", wc->status);
  1829. cqe = wc->wr_cqe;
  1830. mr = container_of(cqe, struct smbd_mr, cqe);
  1831. smbd_disconnect_rdma_connection(mr->conn);
  1832. }
  1833. }
  1834. /*
  1835. * The work queue function that recovers MRs
  1836. * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
  1837. * again. Both calls are slow, so finish them in a workqueue. This will not
  1838. * block I/O path.
  1839. * There is one workqueue that recovers MRs, there is no need to lock as the
  1840. * I/O requests calling smbd_register_mr will never update the links in the
  1841. * mr_list.
  1842. */
  1843. static void smbd_mr_recovery_work(struct work_struct *work)
  1844. {
  1845. struct smbd_connection *info =
  1846. container_of(work, struct smbd_connection, mr_recovery_work);
  1847. struct smbd_mr *smbdirect_mr;
  1848. int rc;
  1849. list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
  1850. if (smbdirect_mr->state == MR_ERROR) {
  1851. /* recover this MR entry */
  1852. rc = ib_dereg_mr(smbdirect_mr->mr);
  1853. if (rc) {
  1854. log_rdma_mr(ERR,
  1855. "ib_dereg_mr failed rc=%x\n",
  1856. rc);
  1857. smbd_disconnect_rdma_connection(info);
  1858. continue;
  1859. }
  1860. smbdirect_mr->mr = ib_alloc_mr(
  1861. info->pd, info->mr_type,
  1862. info->max_frmr_depth);
  1863. if (IS_ERR(smbdirect_mr->mr)) {
  1864. log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
  1865. info->mr_type,
  1866. info->max_frmr_depth);
  1867. smbd_disconnect_rdma_connection(info);
  1868. continue;
  1869. }
  1870. } else
  1871. /* This MR is being used, don't recover it */
  1872. continue;
  1873. smbdirect_mr->state = MR_READY;
  1874. /* smbdirect_mr->state is updated by this function
  1875. * and is read and updated by I/O issuing CPUs trying
  1876. * to get a MR, the call to atomic_inc_return
  1877. * implicates a memory barrier and guarantees this
  1878. * value is updated before waking up any calls to
  1879. * get_mr() from the I/O issuing CPUs
  1880. */
  1881. if (atomic_inc_return(&info->mr_ready_count) == 1)
  1882. wake_up_interruptible(&info->wait_mr);
  1883. }
  1884. }
  1885. static void destroy_mr_list(struct smbd_connection *info)
  1886. {
  1887. struct smbd_mr *mr, *tmp;
  1888. cancel_work_sync(&info->mr_recovery_work);
  1889. list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
  1890. if (mr->state == MR_INVALIDATED)
  1891. ib_dma_unmap_sg(info->id->device, mr->sgl,
  1892. mr->sgl_count, mr->dir);
  1893. ib_dereg_mr(mr->mr);
  1894. kfree(mr->sgl);
  1895. kfree(mr);
  1896. }
  1897. }
  1898. /*
  1899. * Allocate MRs used for RDMA read/write
  1900. * The number of MRs will not exceed hardware capability in responder_resources
  1901. * All MRs are kept in mr_list. The MR can be recovered after it's used
  1902. * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
  1903. * as MRs are used and recovered for I/O, but the list links will not change
  1904. */
  1905. static int allocate_mr_list(struct smbd_connection *info)
  1906. {
  1907. int i;
  1908. struct smbd_mr *smbdirect_mr, *tmp;
  1909. INIT_LIST_HEAD(&info->mr_list);
  1910. init_waitqueue_head(&info->wait_mr);
  1911. spin_lock_init(&info->mr_list_lock);
  1912. atomic_set(&info->mr_ready_count, 0);
  1913. atomic_set(&info->mr_used_count, 0);
  1914. init_waitqueue_head(&info->wait_for_mr_cleanup);
  1915. /* Allocate more MRs (2x) than hardware responder_resources */
  1916. for (i = 0; i < info->responder_resources * 2; i++) {
  1917. smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
  1918. if (!smbdirect_mr)
  1919. goto out;
  1920. smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
  1921. info->max_frmr_depth);
  1922. if (IS_ERR(smbdirect_mr->mr)) {
  1923. log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
  1924. info->mr_type, info->max_frmr_depth);
  1925. goto out;
  1926. }
  1927. smbdirect_mr->sgl = kcalloc(
  1928. info->max_frmr_depth,
  1929. sizeof(struct scatterlist),
  1930. GFP_KERNEL);
  1931. if (!smbdirect_mr->sgl) {
  1932. log_rdma_mr(ERR, "failed to allocate sgl\n");
  1933. ib_dereg_mr(smbdirect_mr->mr);
  1934. goto out;
  1935. }
  1936. smbdirect_mr->state = MR_READY;
  1937. smbdirect_mr->conn = info;
  1938. list_add_tail(&smbdirect_mr->list, &info->mr_list);
  1939. atomic_inc(&info->mr_ready_count);
  1940. }
  1941. INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
  1942. return 0;
  1943. out:
  1944. kfree(smbdirect_mr);
  1945. list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
  1946. ib_dereg_mr(smbdirect_mr->mr);
  1947. kfree(smbdirect_mr->sgl);
  1948. kfree(smbdirect_mr);
  1949. }
  1950. return -ENOMEM;
  1951. }
  1952. /*
  1953. * Get a MR from mr_list. This function waits until there is at least one
  1954. * MR available in the list. It may access the list while the
  1955. * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
  1956. * as they never modify the same places. However, there may be several CPUs
  1957. * issueing I/O trying to get MR at the same time, mr_list_lock is used to
  1958. * protect this situation.
  1959. */
  1960. static struct smbd_mr *get_mr(struct smbd_connection *info)
  1961. {
  1962. struct smbd_mr *ret;
  1963. int rc;
  1964. again:
  1965. rc = wait_event_interruptible(info->wait_mr,
  1966. atomic_read(&info->mr_ready_count) ||
  1967. info->transport_status != SMBD_CONNECTED);
  1968. if (rc) {
  1969. log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
  1970. return NULL;
  1971. }
  1972. if (info->transport_status != SMBD_CONNECTED) {
  1973. log_rdma_mr(ERR, "info->transport_status=%x\n",
  1974. info->transport_status);
  1975. return NULL;
  1976. }
  1977. spin_lock(&info->mr_list_lock);
  1978. list_for_each_entry(ret, &info->mr_list, list) {
  1979. if (ret->state == MR_READY) {
  1980. ret->state = MR_REGISTERED;
  1981. spin_unlock(&info->mr_list_lock);
  1982. atomic_dec(&info->mr_ready_count);
  1983. atomic_inc(&info->mr_used_count);
  1984. return ret;
  1985. }
  1986. }
  1987. spin_unlock(&info->mr_list_lock);
  1988. /*
  1989. * It is possible that we could fail to get MR because other processes may
  1990. * try to acquire a MR at the same time. If this is the case, retry it.
  1991. */
  1992. goto again;
  1993. }
  1994. /*
  1995. * Register memory for RDMA read/write
  1996. * pages[]: the list of pages to register memory with
  1997. * num_pages: the number of pages to register
  1998. * tailsz: if non-zero, the bytes to register in the last page
  1999. * writing: true if this is a RDMA write (SMB read), false for RDMA read
  2000. * need_invalidate: true if this MR needs to be locally invalidated after I/O
  2001. * return value: the MR registered, NULL if failed.
  2002. */
  2003. struct smbd_mr *smbd_register_mr(
  2004. struct smbd_connection *info, struct page *pages[], int num_pages,
  2005. int offset, int tailsz, bool writing, bool need_invalidate)
  2006. {
  2007. struct smbd_mr *smbdirect_mr;
  2008. int rc, i;
  2009. enum dma_data_direction dir;
  2010. struct ib_reg_wr *reg_wr;
  2011. if (num_pages > info->max_frmr_depth) {
  2012. log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
  2013. num_pages, info->max_frmr_depth);
  2014. return NULL;
  2015. }
  2016. smbdirect_mr = get_mr(info);
  2017. if (!smbdirect_mr) {
  2018. log_rdma_mr(ERR, "get_mr returning NULL\n");
  2019. return NULL;
  2020. }
  2021. smbdirect_mr->need_invalidate = need_invalidate;
  2022. smbdirect_mr->sgl_count = num_pages;
  2023. sg_init_table(smbdirect_mr->sgl, num_pages);
  2024. log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
  2025. num_pages, offset, tailsz);
  2026. if (num_pages == 1) {
  2027. sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
  2028. goto skip_multiple_pages;
  2029. }
  2030. /* We have at least two pages to register */
  2031. sg_set_page(
  2032. &smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
  2033. i = 1;
  2034. while (i < num_pages - 1) {
  2035. sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
  2036. i++;
  2037. }
  2038. sg_set_page(&smbdirect_mr->sgl[i], pages[i],
  2039. tailsz ? tailsz : PAGE_SIZE, 0);
  2040. skip_multiple_pages:
  2041. dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  2042. smbdirect_mr->dir = dir;
  2043. rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
  2044. if (!rc) {
  2045. log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
  2046. num_pages, dir, rc);
  2047. goto dma_map_error;
  2048. }
  2049. rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
  2050. NULL, PAGE_SIZE);
  2051. if (rc != num_pages) {
  2052. log_rdma_mr(ERR,
  2053. "ib_map_mr_sg failed rc = %d num_pages = %x\n",
  2054. rc, num_pages);
  2055. goto map_mr_error;
  2056. }
  2057. ib_update_fast_reg_key(smbdirect_mr->mr,
  2058. ib_inc_rkey(smbdirect_mr->mr->rkey));
  2059. reg_wr = &smbdirect_mr->wr;
  2060. reg_wr->wr.opcode = IB_WR_REG_MR;
  2061. smbdirect_mr->cqe.done = register_mr_done;
  2062. reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
  2063. reg_wr->wr.num_sge = 0;
  2064. reg_wr->wr.send_flags = IB_SEND_SIGNALED;
  2065. reg_wr->mr = smbdirect_mr->mr;
  2066. reg_wr->key = smbdirect_mr->mr->rkey;
  2067. reg_wr->access = writing ?
  2068. IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
  2069. IB_ACCESS_REMOTE_READ;
  2070. /*
  2071. * There is no need for waiting for complemtion on ib_post_send
  2072. * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
  2073. * on the next ib_post_send when we actaully send I/O to remote peer
  2074. */
  2075. rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
  2076. if (!rc)
  2077. return smbdirect_mr;
  2078. log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
  2079. rc, reg_wr->key);
  2080. /* If all failed, attempt to recover this MR by setting it MR_ERROR*/
  2081. map_mr_error:
  2082. ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
  2083. smbdirect_mr->sgl_count, smbdirect_mr->dir);
  2084. dma_map_error:
  2085. smbdirect_mr->state = MR_ERROR;
  2086. if (atomic_dec_and_test(&info->mr_used_count))
  2087. wake_up(&info->wait_for_mr_cleanup);
  2088. smbd_disconnect_rdma_connection(info);
  2089. return NULL;
  2090. }
  2091. static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
  2092. {
  2093. struct smbd_mr *smbdirect_mr;
  2094. struct ib_cqe *cqe;
  2095. cqe = wc->wr_cqe;
  2096. smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
  2097. smbdirect_mr->state = MR_INVALIDATED;
  2098. if (wc->status != IB_WC_SUCCESS) {
  2099. log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
  2100. smbdirect_mr->state = MR_ERROR;
  2101. }
  2102. complete(&smbdirect_mr->invalidate_done);
  2103. }
  2104. /*
  2105. * Deregister a MR after I/O is done
  2106. * This function may wait if remote invalidation is not used
  2107. * and we have to locally invalidate the buffer to prevent data is being
  2108. * modified by remote peer after upper layer consumes it
  2109. */
  2110. int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
  2111. {
  2112. struct ib_send_wr *wr;
  2113. struct smbd_connection *info = smbdirect_mr->conn;
  2114. int rc = 0;
  2115. if (smbdirect_mr->need_invalidate) {
  2116. /* Need to finish local invalidation before returning */
  2117. wr = &smbdirect_mr->inv_wr;
  2118. wr->opcode = IB_WR_LOCAL_INV;
  2119. smbdirect_mr->cqe.done = local_inv_done;
  2120. wr->wr_cqe = &smbdirect_mr->cqe;
  2121. wr->num_sge = 0;
  2122. wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
  2123. wr->send_flags = IB_SEND_SIGNALED;
  2124. init_completion(&smbdirect_mr->invalidate_done);
  2125. rc = ib_post_send(info->id->qp, wr, NULL);
  2126. if (rc) {
  2127. log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
  2128. smbd_disconnect_rdma_connection(info);
  2129. goto done;
  2130. }
  2131. wait_for_completion(&smbdirect_mr->invalidate_done);
  2132. smbdirect_mr->need_invalidate = false;
  2133. } else
  2134. /*
  2135. * For remote invalidation, just set it to MR_INVALIDATED
  2136. * and defer to mr_recovery_work to recover the MR for next use
  2137. */
  2138. smbdirect_mr->state = MR_INVALIDATED;
  2139. if (smbdirect_mr->state == MR_INVALIDATED) {
  2140. ib_dma_unmap_sg(
  2141. info->id->device, smbdirect_mr->sgl,
  2142. smbdirect_mr->sgl_count,
  2143. smbdirect_mr->dir);
  2144. smbdirect_mr->state = MR_READY;
  2145. if (atomic_inc_return(&info->mr_ready_count) == 1)
  2146. wake_up_interruptible(&info->wait_mr);
  2147. } else
  2148. /*
  2149. * Schedule the work to do MR recovery for future I/Os MR
  2150. * recovery is slow and don't want it to block current I/O
  2151. */
  2152. queue_work(info->workqueue, &info->mr_recovery_work);
  2153. done:
  2154. if (atomic_dec_and_test(&info->mr_used_count))
  2155. wake_up(&info->wait_for_mr_cleanup);
  2156. return rc;
  2157. }