smc_core.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * Basic Transport Functions exploiting Infiniband API
  6. *
  7. * Copyright IBM Corp. 2016
  8. *
  9. * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
  10. */
  11. #include <linux/socket.h>
  12. #include <linux/if_vlan.h>
  13. #include <linux/random.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/wait.h>
  16. #include <linux/reboot.h>
  17. #include <linux/mutex.h>
  18. #include <net/tcp.h>
  19. #include <net/sock.h>
  20. #include <rdma/ib_verbs.h>
  21. #include <rdma/ib_cache.h>
  22. #include "smc.h"
  23. #include "smc_clc.h"
  24. #include "smc_core.h"
  25. #include "smc_ib.h"
  26. #include "smc_wr.h"
  27. #include "smc_llc.h"
  28. #include "smc_cdc.h"
  29. #include "smc_close.h"
  30. #include "smc_ism.h"
  31. #define SMC_LGR_NUM_INCR 256
  32. #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
  33. #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
  34. static struct smc_lgr_list smc_lgr_list = { /* established link groups */
  35. .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
  36. .list = LIST_HEAD_INIT(smc_lgr_list.list),
  37. .num = 0,
  38. };
  39. static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
  40. static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
  41. static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
  42. struct smc_buf_desc *buf_desc);
  43. static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
  44. static void smc_link_down_work(struct work_struct *work);
  45. /* return head of link group list and its lock for a given link group */
  46. static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
  47. spinlock_t **lgr_lock)
  48. {
  49. if (lgr->is_smcd) {
  50. *lgr_lock = &lgr->smcd->lgr_lock;
  51. return &lgr->smcd->lgr_list;
  52. }
  53. *lgr_lock = &smc_lgr_list.lock;
  54. return &smc_lgr_list.list;
  55. }
  56. static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
  57. {
  58. /* client link group creation always follows the server link group
  59. * creation. For client use a somewhat higher removal delay time,
  60. * otherwise there is a risk of out-of-sync link groups.
  61. */
  62. if (!lgr->freeing) {
  63. mod_delayed_work(system_wq, &lgr->free_work,
  64. (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
  65. SMC_LGR_FREE_DELAY_CLNT :
  66. SMC_LGR_FREE_DELAY_SERV);
  67. }
  68. }
  69. /* Register connection's alert token in our lookup structure.
  70. * To use rbtrees we have to implement our own insert core.
  71. * Requires @conns_lock
  72. * @smc connection to register
  73. * Returns 0 on success, != otherwise.
  74. */
  75. static void smc_lgr_add_alert_token(struct smc_connection *conn)
  76. {
  77. struct rb_node **link, *parent = NULL;
  78. u32 token = conn->alert_token_local;
  79. link = &conn->lgr->conns_all.rb_node;
  80. while (*link) {
  81. struct smc_connection *cur = rb_entry(*link,
  82. struct smc_connection, alert_node);
  83. parent = *link;
  84. if (cur->alert_token_local > token)
  85. link = &parent->rb_left;
  86. else
  87. link = &parent->rb_right;
  88. }
  89. /* Put the new node there */
  90. rb_link_node(&conn->alert_node, parent, link);
  91. rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
  92. }
  93. /* assign an SMC-R link to the connection */
  94. static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
  95. {
  96. enum smc_link_state expected = first ? SMC_LNK_ACTIVATING :
  97. SMC_LNK_ACTIVE;
  98. int i, j;
  99. /* do link balancing */
  100. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  101. struct smc_link *lnk = &conn->lgr->lnk[i];
  102. if (lnk->state != expected || lnk->link_is_asym)
  103. continue;
  104. if (conn->lgr->role == SMC_CLNT) {
  105. conn->lnk = lnk; /* temporary, SMC server assigns link*/
  106. break;
  107. }
  108. if (conn->lgr->conns_num % 2) {
  109. for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) {
  110. struct smc_link *lnk2;
  111. lnk2 = &conn->lgr->lnk[j];
  112. if (lnk2->state == expected &&
  113. !lnk2->link_is_asym) {
  114. conn->lnk = lnk2;
  115. break;
  116. }
  117. }
  118. }
  119. if (!conn->lnk)
  120. conn->lnk = lnk;
  121. break;
  122. }
  123. if (!conn->lnk)
  124. return SMC_CLC_DECL_NOACTLINK;
  125. return 0;
  126. }
  127. /* Register connection in link group by assigning an alert token
  128. * registered in a search tree.
  129. * Requires @conns_lock
  130. * Note that '0' is a reserved value and not assigned.
  131. */
  132. static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
  133. {
  134. struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
  135. static atomic_t nexttoken = ATOMIC_INIT(0);
  136. int rc;
  137. if (!conn->lgr->is_smcd) {
  138. rc = smcr_lgr_conn_assign_link(conn, first);
  139. if (rc)
  140. return rc;
  141. }
  142. /* find a new alert_token_local value not yet used by some connection
  143. * in this link group
  144. */
  145. sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
  146. while (!conn->alert_token_local) {
  147. conn->alert_token_local = atomic_inc_return(&nexttoken);
  148. if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
  149. conn->alert_token_local = 0;
  150. }
  151. smc_lgr_add_alert_token(conn);
  152. conn->lgr->conns_num++;
  153. return 0;
  154. }
  155. /* Unregister connection and reset the alert token of the given connection<
  156. */
  157. static void __smc_lgr_unregister_conn(struct smc_connection *conn)
  158. {
  159. struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
  160. struct smc_link_group *lgr = conn->lgr;
  161. rb_erase(&conn->alert_node, &lgr->conns_all);
  162. lgr->conns_num--;
  163. conn->alert_token_local = 0;
  164. sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
  165. }
  166. /* Unregister connection from lgr
  167. */
  168. static void smc_lgr_unregister_conn(struct smc_connection *conn)
  169. {
  170. struct smc_link_group *lgr = conn->lgr;
  171. if (!lgr)
  172. return;
  173. write_lock_bh(&lgr->conns_lock);
  174. if (conn->alert_token_local) {
  175. __smc_lgr_unregister_conn(conn);
  176. }
  177. write_unlock_bh(&lgr->conns_lock);
  178. conn->lgr = NULL;
  179. }
  180. void smc_lgr_cleanup_early(struct smc_connection *conn)
  181. {
  182. struct smc_link_group *lgr = conn->lgr;
  183. spinlock_t *lgr_lock;
  184. if (!lgr)
  185. return;
  186. smc_conn_free(conn);
  187. smc_lgr_list_head(lgr, &lgr_lock);
  188. spin_lock_bh(lgr_lock);
  189. /* do not use this link group for new connections */
  190. if (!list_empty(&lgr->list))
  191. list_del_init(&lgr->list);
  192. spin_unlock_bh(lgr_lock);
  193. __smc_lgr_terminate(lgr, true);
  194. }
  195. static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
  196. {
  197. int i;
  198. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  199. struct smc_link *lnk = &lgr->lnk[i];
  200. if (smc_link_sendable(lnk))
  201. lnk->state = SMC_LNK_INACTIVE;
  202. }
  203. wake_up_all(&lgr->llc_msg_waiter);
  204. wake_up_all(&lgr->llc_flow_waiter);
  205. }
  206. static void smc_lgr_free(struct smc_link_group *lgr);
  207. static void smc_lgr_free_work(struct work_struct *work)
  208. {
  209. struct smc_link_group *lgr = container_of(to_delayed_work(work),
  210. struct smc_link_group,
  211. free_work);
  212. spinlock_t *lgr_lock;
  213. bool conns;
  214. smc_lgr_list_head(lgr, &lgr_lock);
  215. spin_lock_bh(lgr_lock);
  216. if (lgr->freeing) {
  217. spin_unlock_bh(lgr_lock);
  218. return;
  219. }
  220. read_lock_bh(&lgr->conns_lock);
  221. conns = RB_EMPTY_ROOT(&lgr->conns_all);
  222. read_unlock_bh(&lgr->conns_lock);
  223. if (!conns) { /* number of lgr connections is no longer zero */
  224. spin_unlock_bh(lgr_lock);
  225. return;
  226. }
  227. list_del_init(&lgr->list); /* remove from smc_lgr_list */
  228. lgr->freeing = 1; /* this instance does the freeing, no new schedule */
  229. spin_unlock_bh(lgr_lock);
  230. cancel_delayed_work(&lgr->free_work);
  231. if (!lgr->is_smcd && !lgr->terminating)
  232. smc_llc_send_link_delete_all(lgr, true,
  233. SMC_LLC_DEL_PROG_INIT_TERM);
  234. if (lgr->is_smcd && !lgr->terminating)
  235. smc_ism_signal_shutdown(lgr);
  236. if (!lgr->is_smcd)
  237. smcr_lgr_link_deactivate_all(lgr);
  238. smc_lgr_free(lgr);
  239. }
  240. static void smc_lgr_terminate_work(struct work_struct *work)
  241. {
  242. struct smc_link_group *lgr = container_of(work, struct smc_link_group,
  243. terminate_work);
  244. __smc_lgr_terminate(lgr, true);
  245. }
  246. /* return next unique link id for the lgr */
  247. static u8 smcr_next_link_id(struct smc_link_group *lgr)
  248. {
  249. u8 link_id;
  250. int i;
  251. while (1) {
  252. again:
  253. link_id = ++lgr->next_link_id;
  254. if (!link_id) /* skip zero as link_id */
  255. link_id = ++lgr->next_link_id;
  256. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  257. if (smc_link_usable(&lgr->lnk[i]) &&
  258. lgr->lnk[i].link_id == link_id)
  259. goto again;
  260. }
  261. break;
  262. }
  263. return link_id;
  264. }
  265. int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
  266. u8 link_idx, struct smc_init_info *ini)
  267. {
  268. u8 rndvec[3];
  269. int rc;
  270. get_device(&ini->ib_dev->ibdev->dev);
  271. atomic_inc(&ini->ib_dev->lnk_cnt);
  272. lnk->link_id = smcr_next_link_id(lgr);
  273. lnk->lgr = lgr;
  274. lnk->link_idx = link_idx;
  275. lnk->smcibdev = ini->ib_dev;
  276. lnk->ibport = ini->ib_port;
  277. lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
  278. smc_llc_link_set_uid(lnk);
  279. INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
  280. if (!ini->ib_dev->initialized) {
  281. rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
  282. if (rc)
  283. goto out;
  284. }
  285. get_random_bytes(rndvec, sizeof(rndvec));
  286. lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
  287. (rndvec[2] << 16);
  288. rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
  289. ini->vlan_id, lnk->gid, &lnk->sgid_index);
  290. if (rc)
  291. goto out;
  292. rc = smc_llc_link_init(lnk);
  293. if (rc)
  294. goto out;
  295. rc = smc_wr_alloc_link_mem(lnk);
  296. if (rc)
  297. goto clear_llc_lnk;
  298. rc = smc_ib_create_protection_domain(lnk);
  299. if (rc)
  300. goto free_link_mem;
  301. rc = smc_ib_create_queue_pair(lnk);
  302. if (rc)
  303. goto dealloc_pd;
  304. rc = smc_wr_create_link(lnk);
  305. if (rc)
  306. goto destroy_qp;
  307. lnk->state = SMC_LNK_ACTIVATING;
  308. return 0;
  309. destroy_qp:
  310. smc_ib_destroy_queue_pair(lnk);
  311. dealloc_pd:
  312. smc_ib_dealloc_protection_domain(lnk);
  313. free_link_mem:
  314. smc_wr_free_link_mem(lnk);
  315. clear_llc_lnk:
  316. smc_llc_link_clear(lnk, false);
  317. out:
  318. put_device(&ini->ib_dev->ibdev->dev);
  319. memset(lnk, 0, sizeof(struct smc_link));
  320. lnk->state = SMC_LNK_UNUSED;
  321. if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
  322. wake_up(&ini->ib_dev->lnks_deleted);
  323. return rc;
  324. }
  325. /* create a new SMC link group */
  326. static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
  327. {
  328. struct smc_link_group *lgr;
  329. struct list_head *lgr_list;
  330. struct smc_link *lnk;
  331. spinlock_t *lgr_lock;
  332. u8 link_idx;
  333. int rc = 0;
  334. int i;
  335. if (ini->is_smcd && ini->vlan_id) {
  336. if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
  337. ini->vlan_id)) {
  338. rc = SMC_CLC_DECL_ISMVLANERR;
  339. goto out;
  340. }
  341. }
  342. lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
  343. if (!lgr) {
  344. rc = SMC_CLC_DECL_MEM;
  345. goto ism_put_vlan;
  346. }
  347. lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
  348. SMC_LGR_ID_SIZE, &lgr->id);
  349. if (!lgr->tx_wq) {
  350. rc = -ENOMEM;
  351. goto free_lgr;
  352. }
  353. lgr->is_smcd = ini->is_smcd;
  354. lgr->sync_err = 0;
  355. lgr->terminating = 0;
  356. lgr->freeing = 0;
  357. lgr->vlan_id = ini->vlan_id;
  358. mutex_init(&lgr->sndbufs_lock);
  359. mutex_init(&lgr->rmbs_lock);
  360. rwlock_init(&lgr->conns_lock);
  361. for (i = 0; i < SMC_RMBE_SIZES; i++) {
  362. INIT_LIST_HEAD(&lgr->sndbufs[i]);
  363. INIT_LIST_HEAD(&lgr->rmbs[i]);
  364. }
  365. lgr->next_link_id = 0;
  366. smc_lgr_list.num += SMC_LGR_NUM_INCR;
  367. memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
  368. INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
  369. INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
  370. lgr->conns_all = RB_ROOT;
  371. if (ini->is_smcd) {
  372. /* SMC-D specific settings */
  373. get_device(&ini->ism_dev[ini->ism_selected]->dev);
  374. lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
  375. lgr->smcd = ini->ism_dev[ini->ism_selected];
  376. lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
  377. lgr_lock = &lgr->smcd->lgr_lock;
  378. lgr->smc_version = ini->smcd_version;
  379. lgr->peer_shutdown = 0;
  380. atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
  381. } else {
  382. /* SMC-R specific settings */
  383. lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
  384. memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
  385. SMC_SYSTEMID_LEN);
  386. memcpy(lgr->pnet_id, ini->ib_dev->pnetid[ini->ib_port - 1],
  387. SMC_MAX_PNETID_LEN);
  388. smc_llc_lgr_init(lgr, smc);
  389. link_idx = SMC_SINGLE_LINK;
  390. lnk = &lgr->lnk[link_idx];
  391. rc = smcr_link_init(lgr, lnk, link_idx, ini);
  392. if (rc)
  393. goto free_wq;
  394. lgr_list = &smc_lgr_list.list;
  395. lgr_lock = &smc_lgr_list.lock;
  396. atomic_inc(&lgr_cnt);
  397. }
  398. smc->conn.lgr = lgr;
  399. spin_lock_bh(lgr_lock);
  400. list_add_tail(&lgr->list, lgr_list);
  401. spin_unlock_bh(lgr_lock);
  402. return 0;
  403. free_wq:
  404. destroy_workqueue(lgr->tx_wq);
  405. free_lgr:
  406. kfree(lgr);
  407. ism_put_vlan:
  408. if (ini->is_smcd && ini->vlan_id)
  409. smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
  410. out:
  411. if (rc < 0) {
  412. if (rc == -ENOMEM)
  413. rc = SMC_CLC_DECL_MEM;
  414. else
  415. rc = SMC_CLC_DECL_INTERR;
  416. }
  417. return rc;
  418. }
  419. static int smc_write_space(struct smc_connection *conn)
  420. {
  421. int buffer_len = conn->peer_rmbe_size;
  422. union smc_host_cursor prod;
  423. union smc_host_cursor cons;
  424. int space;
  425. smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
  426. smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
  427. /* determine rx_buf space */
  428. space = buffer_len - smc_curs_diff(buffer_len, &cons, &prod);
  429. return space;
  430. }
  431. static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
  432. struct smc_wr_buf *wr_buf)
  433. {
  434. struct smc_connection *conn = &smc->conn;
  435. union smc_host_cursor cons, fin;
  436. int rc = 0;
  437. int diff;
  438. smc_curs_copy(&conn->tx_curs_sent, &conn->tx_curs_fin, conn);
  439. smc_curs_copy(&fin, &conn->local_tx_ctrl_fin, conn);
  440. /* set prod cursor to old state, enforce tx_rdma_writes() */
  441. smc_curs_copy(&conn->local_tx_ctrl.prod, &fin, conn);
  442. smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
  443. if (smc_curs_comp(conn->peer_rmbe_size, &cons, &fin) < 0) {
  444. /* cons cursor advanced more than fin, and prod was set
  445. * fin above, so now prod is smaller than cons. Fix that.
  446. */
  447. diff = smc_curs_diff(conn->peer_rmbe_size, &fin, &cons);
  448. smc_curs_add(conn->sndbuf_desc->len,
  449. &conn->tx_curs_sent, diff);
  450. smc_curs_add(conn->sndbuf_desc->len,
  451. &conn->tx_curs_fin, diff);
  452. smp_mb__before_atomic();
  453. atomic_add(diff, &conn->sndbuf_space);
  454. smp_mb__after_atomic();
  455. smc_curs_add(conn->peer_rmbe_size,
  456. &conn->local_tx_ctrl.prod, diff);
  457. smc_curs_add(conn->peer_rmbe_size,
  458. &conn->local_tx_ctrl_fin, diff);
  459. }
  460. /* recalculate, value is used by tx_rdma_writes() */
  461. atomic_set(&smc->conn.peer_rmbe_space, smc_write_space(conn));
  462. if (smc->sk.sk_state != SMC_INIT &&
  463. smc->sk.sk_state != SMC_CLOSED) {
  464. rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
  465. if (!rc) {
  466. queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
  467. smc->sk.sk_data_ready(&smc->sk);
  468. }
  469. } else {
  470. smc_wr_tx_put_slot(conn->lnk,
  471. (struct smc_wr_tx_pend_priv *)pend);
  472. }
  473. return rc;
  474. }
  475. struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
  476. struct smc_link *from_lnk, bool is_dev_err)
  477. {
  478. struct smc_link *to_lnk = NULL;
  479. struct smc_cdc_tx_pend *pend;
  480. struct smc_connection *conn;
  481. struct smc_wr_buf *wr_buf;
  482. struct smc_sock *smc;
  483. struct rb_node *node;
  484. int i, rc = 0;
  485. /* link is inactive, wake up tx waiters */
  486. smc_wr_wakeup_tx_wait(from_lnk);
  487. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  488. if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
  489. continue;
  490. if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
  491. from_lnk->ibport == lgr->lnk[i].ibport) {
  492. continue;
  493. }
  494. to_lnk = &lgr->lnk[i];
  495. break;
  496. }
  497. if (!to_lnk || !smc_wr_tx_link_hold(to_lnk)) {
  498. smc_lgr_terminate_sched(lgr);
  499. return NULL;
  500. }
  501. again:
  502. read_lock_bh(&lgr->conns_lock);
  503. for (node = rb_first(&lgr->conns_all); node; node = rb_next(node)) {
  504. conn = rb_entry(node, struct smc_connection, alert_node);
  505. if (conn->lnk != from_lnk)
  506. continue;
  507. smc = container_of(conn, struct smc_sock, conn);
  508. /* conn->lnk not yet set in SMC_INIT state */
  509. if (smc->sk.sk_state == SMC_INIT)
  510. continue;
  511. if (smc->sk.sk_state == SMC_CLOSED ||
  512. smc->sk.sk_state == SMC_PEERCLOSEWAIT1 ||
  513. smc->sk.sk_state == SMC_PEERCLOSEWAIT2 ||
  514. smc->sk.sk_state == SMC_APPFINCLOSEWAIT ||
  515. smc->sk.sk_state == SMC_APPCLOSEWAIT1 ||
  516. smc->sk.sk_state == SMC_APPCLOSEWAIT2 ||
  517. smc->sk.sk_state == SMC_PEERFINCLOSEWAIT ||
  518. smc->sk.sk_state == SMC_PEERABORTWAIT ||
  519. smc->sk.sk_state == SMC_PROCESSABORT) {
  520. spin_lock_bh(&conn->send_lock);
  521. conn->lnk = to_lnk;
  522. spin_unlock_bh(&conn->send_lock);
  523. continue;
  524. }
  525. sock_hold(&smc->sk);
  526. read_unlock_bh(&lgr->conns_lock);
  527. /* pre-fetch buffer outside of send_lock, might sleep */
  528. rc = smc_cdc_get_free_slot(conn, to_lnk, &wr_buf, NULL, &pend);
  529. if (rc)
  530. goto err_out;
  531. /* avoid race with smcr_tx_sndbuf_nonempty() */
  532. spin_lock_bh(&conn->send_lock);
  533. conn->lnk = to_lnk;
  534. rc = smc_switch_cursor(smc, pend, wr_buf);
  535. spin_unlock_bh(&conn->send_lock);
  536. sock_put(&smc->sk);
  537. if (rc)
  538. goto err_out;
  539. goto again;
  540. }
  541. read_unlock_bh(&lgr->conns_lock);
  542. smc_wr_tx_link_put(to_lnk);
  543. return to_lnk;
  544. err_out:
  545. smcr_link_down_cond_sched(to_lnk);
  546. smc_wr_tx_link_put(to_lnk);
  547. return NULL;
  548. }
  549. static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
  550. struct smc_link_group *lgr)
  551. {
  552. int rc;
  553. if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
  554. /* unregister rmb with peer */
  555. rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
  556. if (!rc) {
  557. /* protect against smc_llc_cli_rkey_exchange() */
  558. mutex_lock(&lgr->llc_conf_mutex);
  559. smc_llc_do_delete_rkey(lgr, rmb_desc);
  560. rmb_desc->is_conf_rkey = false;
  561. mutex_unlock(&lgr->llc_conf_mutex);
  562. smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
  563. }
  564. }
  565. if (rmb_desc->is_reg_err) {
  566. /* buf registration failed, reuse not possible */
  567. mutex_lock(&lgr->rmbs_lock);
  568. list_del(&rmb_desc->list);
  569. mutex_unlock(&lgr->rmbs_lock);
  570. smc_buf_free(lgr, true, rmb_desc);
  571. } else {
  572. rmb_desc->used = 0;
  573. }
  574. }
  575. static void smc_buf_unuse(struct smc_connection *conn,
  576. struct smc_link_group *lgr)
  577. {
  578. if (conn->sndbuf_desc)
  579. conn->sndbuf_desc->used = 0;
  580. if (conn->rmb_desc && lgr->is_smcd)
  581. conn->rmb_desc->used = 0;
  582. else if (conn->rmb_desc)
  583. smcr_buf_unuse(conn->rmb_desc, lgr);
  584. }
  585. /* remove a finished connection from its link group */
  586. void smc_conn_free(struct smc_connection *conn)
  587. {
  588. struct smc_link_group *lgr = conn->lgr;
  589. if (!lgr)
  590. return;
  591. if (lgr->is_smcd) {
  592. if (!list_empty(&lgr->list))
  593. smc_ism_unset_conn(conn);
  594. tasklet_kill(&conn->rx_tsklet);
  595. } else {
  596. smc_cdc_wait_pend_tx_wr(conn);
  597. if (current_work() != &conn->abort_work)
  598. cancel_work_sync(&conn->abort_work);
  599. }
  600. if (!list_empty(&lgr->list)) {
  601. smc_buf_unuse(conn, lgr); /* allow buffer reuse */
  602. smc_lgr_unregister_conn(conn);
  603. }
  604. if (!lgr->conns_num)
  605. smc_lgr_schedule_free_work(lgr);
  606. }
  607. /* unregister a link from a buf_desc */
  608. static void smcr_buf_unmap_link(struct smc_buf_desc *buf_desc, bool is_rmb,
  609. struct smc_link *lnk)
  610. {
  611. if (is_rmb)
  612. buf_desc->is_reg_mr[lnk->link_idx] = false;
  613. if (!buf_desc->is_map_ib[lnk->link_idx])
  614. return;
  615. if (is_rmb) {
  616. if (buf_desc->mr_rx[lnk->link_idx]) {
  617. smc_ib_put_memory_region(
  618. buf_desc->mr_rx[lnk->link_idx]);
  619. buf_desc->mr_rx[lnk->link_idx] = NULL;
  620. }
  621. smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
  622. } else {
  623. smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
  624. }
  625. sg_free_table(&buf_desc->sgt[lnk->link_idx]);
  626. buf_desc->is_map_ib[lnk->link_idx] = false;
  627. }
  628. /* unmap all buffers of lgr for a deleted link */
  629. static void smcr_buf_unmap_lgr(struct smc_link *lnk)
  630. {
  631. struct smc_link_group *lgr = lnk->lgr;
  632. struct smc_buf_desc *buf_desc, *bf;
  633. int i;
  634. for (i = 0; i < SMC_RMBE_SIZES; i++) {
  635. mutex_lock(&lgr->rmbs_lock);
  636. list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
  637. smcr_buf_unmap_link(buf_desc, true, lnk);
  638. mutex_unlock(&lgr->rmbs_lock);
  639. mutex_lock(&lgr->sndbufs_lock);
  640. list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
  641. list)
  642. smcr_buf_unmap_link(buf_desc, false, lnk);
  643. mutex_unlock(&lgr->sndbufs_lock);
  644. }
  645. }
  646. static void smcr_rtoken_clear_link(struct smc_link *lnk)
  647. {
  648. struct smc_link_group *lgr = lnk->lgr;
  649. int i;
  650. for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
  651. lgr->rtokens[i][lnk->link_idx].rkey = 0;
  652. lgr->rtokens[i][lnk->link_idx].dma_addr = 0;
  653. }
  654. }
  655. /* must be called under lgr->llc_conf_mutex lock */
  656. void smcr_link_clear(struct smc_link *lnk, bool log)
  657. {
  658. struct smc_ib_device *smcibdev;
  659. if (!lnk->lgr || lnk->state == SMC_LNK_UNUSED)
  660. return;
  661. lnk->peer_qpn = 0;
  662. smc_llc_link_clear(lnk, log);
  663. smcr_buf_unmap_lgr(lnk);
  664. smcr_rtoken_clear_link(lnk);
  665. smc_ib_modify_qp_error(lnk);
  666. smc_wr_free_link(lnk);
  667. smc_ib_destroy_queue_pair(lnk);
  668. smc_ib_dealloc_protection_domain(lnk);
  669. smc_wr_free_link_mem(lnk);
  670. put_device(&lnk->smcibdev->ibdev->dev);
  671. smcibdev = lnk->smcibdev;
  672. memset(lnk, 0, sizeof(struct smc_link));
  673. lnk->state = SMC_LNK_UNUSED;
  674. if (!atomic_dec_return(&smcibdev->lnk_cnt))
  675. wake_up(&smcibdev->lnks_deleted);
  676. }
  677. static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
  678. struct smc_buf_desc *buf_desc)
  679. {
  680. int i;
  681. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
  682. smcr_buf_unmap_link(buf_desc, is_rmb, &lgr->lnk[i]);
  683. if (buf_desc->pages)
  684. __free_pages(buf_desc->pages, buf_desc->order);
  685. kfree(buf_desc);
  686. }
  687. static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
  688. struct smc_buf_desc *buf_desc)
  689. {
  690. if (is_dmb) {
  691. /* restore original buf len */
  692. buf_desc->len += sizeof(struct smcd_cdc_msg);
  693. smc_ism_unregister_dmb(lgr->smcd, buf_desc);
  694. } else {
  695. kfree(buf_desc->cpu_addr);
  696. }
  697. kfree(buf_desc);
  698. }
  699. static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
  700. struct smc_buf_desc *buf_desc)
  701. {
  702. if (lgr->is_smcd)
  703. smcd_buf_free(lgr, is_rmb, buf_desc);
  704. else
  705. smcr_buf_free(lgr, is_rmb, buf_desc);
  706. }
  707. static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
  708. {
  709. struct smc_buf_desc *buf_desc, *bf_desc;
  710. struct list_head *buf_list;
  711. int i;
  712. for (i = 0; i < SMC_RMBE_SIZES; i++) {
  713. if (is_rmb)
  714. buf_list = &lgr->rmbs[i];
  715. else
  716. buf_list = &lgr->sndbufs[i];
  717. list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
  718. list) {
  719. list_del(&buf_desc->list);
  720. smc_buf_free(lgr, is_rmb, buf_desc);
  721. }
  722. }
  723. }
  724. static void smc_lgr_free_bufs(struct smc_link_group *lgr)
  725. {
  726. /* free send buffers */
  727. __smc_lgr_free_bufs(lgr, false);
  728. /* free rmbs */
  729. __smc_lgr_free_bufs(lgr, true);
  730. }
  731. /* remove a link group */
  732. static void smc_lgr_free(struct smc_link_group *lgr)
  733. {
  734. int i;
  735. if (!lgr->is_smcd) {
  736. mutex_lock(&lgr->llc_conf_mutex);
  737. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  738. if (lgr->lnk[i].state != SMC_LNK_UNUSED)
  739. smcr_link_clear(&lgr->lnk[i], false);
  740. }
  741. mutex_unlock(&lgr->llc_conf_mutex);
  742. smc_llc_lgr_clear(lgr);
  743. }
  744. smc_lgr_free_bufs(lgr);
  745. destroy_workqueue(lgr->tx_wq);
  746. if (lgr->is_smcd) {
  747. smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
  748. put_device(&lgr->smcd->dev);
  749. if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
  750. wake_up(&lgr->smcd->lgrs_deleted);
  751. } else {
  752. if (!atomic_dec_return(&lgr_cnt))
  753. wake_up(&lgrs_deleted);
  754. }
  755. kfree(lgr);
  756. }
  757. static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
  758. {
  759. int i;
  760. for (i = 0; i < SMC_RMBE_SIZES; i++) {
  761. struct smc_buf_desc *buf_desc;
  762. list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
  763. buf_desc->len += sizeof(struct smcd_cdc_msg);
  764. smc_ism_unregister_dmb(lgr->smcd, buf_desc);
  765. }
  766. }
  767. }
  768. static void smc_sk_wake_ups(struct smc_sock *smc)
  769. {
  770. smc->sk.sk_write_space(&smc->sk);
  771. smc->sk.sk_data_ready(&smc->sk);
  772. smc->sk.sk_state_change(&smc->sk);
  773. }
  774. /* kill a connection */
  775. static void smc_conn_kill(struct smc_connection *conn, bool soft)
  776. {
  777. struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
  778. if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
  779. conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
  780. else
  781. smc_close_abort(conn);
  782. conn->killed = 1;
  783. smc->sk.sk_err = ECONNABORTED;
  784. smc_sk_wake_ups(smc);
  785. if (conn->lgr->is_smcd) {
  786. smc_ism_unset_conn(conn);
  787. if (soft)
  788. tasklet_kill(&conn->rx_tsklet);
  789. else
  790. tasklet_unlock_wait(&conn->rx_tsklet);
  791. } else {
  792. smc_cdc_wait_pend_tx_wr(conn);
  793. }
  794. smc_lgr_unregister_conn(conn);
  795. smc_close_active_abort(smc);
  796. }
  797. static void smc_lgr_cleanup(struct smc_link_group *lgr)
  798. {
  799. if (lgr->is_smcd) {
  800. smc_ism_signal_shutdown(lgr);
  801. smcd_unregister_all_dmbs(lgr);
  802. } else {
  803. u32 rsn = lgr->llc_termination_rsn;
  804. if (!rsn)
  805. rsn = SMC_LLC_DEL_PROG_INIT_TERM;
  806. smc_llc_send_link_delete_all(lgr, false, rsn);
  807. smcr_lgr_link_deactivate_all(lgr);
  808. }
  809. }
  810. /* terminate link group
  811. * @soft: true if link group shutdown can take its time
  812. * false if immediate link group shutdown is required
  813. */
  814. static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
  815. {
  816. struct smc_connection *conn;
  817. struct smc_sock *smc;
  818. struct rb_node *node;
  819. if (lgr->terminating)
  820. return; /* lgr already terminating */
  821. /* cancel free_work sync, will terminate when lgr->freeing is set */
  822. cancel_delayed_work_sync(&lgr->free_work);
  823. lgr->terminating = 1;
  824. /* kill remaining link group connections */
  825. read_lock_bh(&lgr->conns_lock);
  826. node = rb_first(&lgr->conns_all);
  827. while (node) {
  828. read_unlock_bh(&lgr->conns_lock);
  829. conn = rb_entry(node, struct smc_connection, alert_node);
  830. smc = container_of(conn, struct smc_sock, conn);
  831. sock_hold(&smc->sk); /* sock_put below */
  832. lock_sock(&smc->sk);
  833. smc_conn_kill(conn, soft);
  834. release_sock(&smc->sk);
  835. sock_put(&smc->sk); /* sock_hold above */
  836. read_lock_bh(&lgr->conns_lock);
  837. node = rb_first(&lgr->conns_all);
  838. }
  839. read_unlock_bh(&lgr->conns_lock);
  840. smc_lgr_cleanup(lgr);
  841. smc_lgr_free(lgr);
  842. }
  843. /* unlink link group and schedule termination */
  844. void smc_lgr_terminate_sched(struct smc_link_group *lgr)
  845. {
  846. spinlock_t *lgr_lock;
  847. smc_lgr_list_head(lgr, &lgr_lock);
  848. spin_lock_bh(lgr_lock);
  849. if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
  850. spin_unlock_bh(lgr_lock);
  851. return; /* lgr already terminating */
  852. }
  853. list_del_init(&lgr->list);
  854. lgr->freeing = 1;
  855. spin_unlock_bh(lgr_lock);
  856. schedule_work(&lgr->terminate_work);
  857. }
  858. /* Called when peer lgr shutdown (regularly or abnormally) is received */
  859. void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
  860. {
  861. struct smc_link_group *lgr, *l;
  862. LIST_HEAD(lgr_free_list);
  863. /* run common cleanup function and build free list */
  864. spin_lock_bh(&dev->lgr_lock);
  865. list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
  866. if ((!peer_gid || lgr->peer_gid == peer_gid) &&
  867. (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
  868. if (peer_gid) /* peer triggered termination */
  869. lgr->peer_shutdown = 1;
  870. list_move(&lgr->list, &lgr_free_list);
  871. lgr->freeing = 1;
  872. }
  873. }
  874. spin_unlock_bh(&dev->lgr_lock);
  875. /* cancel the regular free workers and actually free lgrs */
  876. list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
  877. list_del_init(&lgr->list);
  878. schedule_work(&lgr->terminate_work);
  879. }
  880. }
  881. /* Called when an SMCD device is removed or the smc module is unloaded */
  882. void smc_smcd_terminate_all(struct smcd_dev *smcd)
  883. {
  884. struct smc_link_group *lgr, *lg;
  885. LIST_HEAD(lgr_free_list);
  886. spin_lock_bh(&smcd->lgr_lock);
  887. list_splice_init(&smcd->lgr_list, &lgr_free_list);
  888. list_for_each_entry(lgr, &lgr_free_list, list)
  889. lgr->freeing = 1;
  890. spin_unlock_bh(&smcd->lgr_lock);
  891. list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
  892. list_del_init(&lgr->list);
  893. __smc_lgr_terminate(lgr, false);
  894. }
  895. if (atomic_read(&smcd->lgr_cnt))
  896. wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
  897. }
  898. /* Called when an SMCR device is removed or the smc module is unloaded.
  899. * If smcibdev is given, all SMCR link groups using this device are terminated.
  900. * If smcibdev is NULL, all SMCR link groups are terminated.
  901. */
  902. void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
  903. {
  904. struct smc_link_group *lgr, *lg;
  905. LIST_HEAD(lgr_free_list);
  906. int i;
  907. spin_lock_bh(&smc_lgr_list.lock);
  908. if (!smcibdev) {
  909. list_splice_init(&smc_lgr_list.list, &lgr_free_list);
  910. list_for_each_entry(lgr, &lgr_free_list, list)
  911. lgr->freeing = 1;
  912. } else {
  913. list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
  914. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  915. if (lgr->lnk[i].smcibdev == smcibdev)
  916. smcr_link_down_cond_sched(&lgr->lnk[i]);
  917. }
  918. }
  919. }
  920. spin_unlock_bh(&smc_lgr_list.lock);
  921. list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
  922. list_del_init(&lgr->list);
  923. smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_OP_INIT_TERM);
  924. __smc_lgr_terminate(lgr, false);
  925. }
  926. if (smcibdev) {
  927. if (atomic_read(&smcibdev->lnk_cnt))
  928. wait_event(smcibdev->lnks_deleted,
  929. !atomic_read(&smcibdev->lnk_cnt));
  930. } else {
  931. if (atomic_read(&lgr_cnt))
  932. wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
  933. }
  934. }
  935. /* set new lgr type and clear all asymmetric link tagging */
  936. void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
  937. {
  938. char *lgr_type = "";
  939. int i;
  940. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
  941. if (smc_link_usable(&lgr->lnk[i]))
  942. lgr->lnk[i].link_is_asym = false;
  943. if (lgr->type == new_type)
  944. return;
  945. lgr->type = new_type;
  946. switch (lgr->type) {
  947. case SMC_LGR_NONE:
  948. lgr_type = "NONE";
  949. break;
  950. case SMC_LGR_SINGLE:
  951. lgr_type = "SINGLE";
  952. break;
  953. case SMC_LGR_SYMMETRIC:
  954. lgr_type = "SYMMETRIC";
  955. break;
  956. case SMC_LGR_ASYMMETRIC_PEER:
  957. lgr_type = "ASYMMETRIC_PEER";
  958. break;
  959. case SMC_LGR_ASYMMETRIC_LOCAL:
  960. lgr_type = "ASYMMETRIC_LOCAL";
  961. break;
  962. }
  963. pr_warn_ratelimited("smc: SMC-R lg %*phN state changed: "
  964. "%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
  965. lgr_type, lgr->pnet_id);
  966. }
  967. /* set new lgr type and tag a link as asymmetric */
  968. void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
  969. enum smc_lgr_type new_type, int asym_lnk_idx)
  970. {
  971. smcr_lgr_set_type(lgr, new_type);
  972. lgr->lnk[asym_lnk_idx].link_is_asym = true;
  973. }
  974. /* abort connection, abort_work scheduled from tasklet context */
  975. static void smc_conn_abort_work(struct work_struct *work)
  976. {
  977. struct smc_connection *conn = container_of(work,
  978. struct smc_connection,
  979. abort_work);
  980. struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
  981. lock_sock(&smc->sk);
  982. smc_conn_kill(conn, true);
  983. release_sock(&smc->sk);
  984. sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
  985. }
  986. void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
  987. {
  988. struct smc_link_group *lgr, *n;
  989. list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
  990. struct smc_link *link;
  991. if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
  992. SMC_MAX_PNETID_LEN) ||
  993. lgr->type == SMC_LGR_SYMMETRIC ||
  994. lgr->type == SMC_LGR_ASYMMETRIC_PEER)
  995. continue;
  996. /* trigger local add link processing */
  997. link = smc_llc_usable_link(lgr);
  998. if (link)
  999. smc_llc_add_link_local(link);
  1000. }
  1001. }
  1002. /* link is down - switch connections to alternate link,
  1003. * must be called under lgr->llc_conf_mutex lock
  1004. */
  1005. static void smcr_link_down(struct smc_link *lnk)
  1006. {
  1007. struct smc_link_group *lgr = lnk->lgr;
  1008. struct smc_link *to_lnk;
  1009. int del_link_id;
  1010. if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
  1011. return;
  1012. to_lnk = smc_switch_conns(lgr, lnk, true);
  1013. if (!to_lnk) { /* no backup link available */
  1014. smcr_link_clear(lnk, true);
  1015. return;
  1016. }
  1017. smcr_lgr_set_type(lgr, SMC_LGR_SINGLE);
  1018. del_link_id = lnk->link_id;
  1019. if (lgr->role == SMC_SERV) {
  1020. /* trigger local delete link processing */
  1021. smc_llc_srv_delete_link_local(to_lnk, del_link_id);
  1022. } else {
  1023. if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
  1024. /* another llc task is ongoing */
  1025. mutex_unlock(&lgr->llc_conf_mutex);
  1026. wait_event_timeout(lgr->llc_flow_waiter,
  1027. (list_empty(&lgr->list) ||
  1028. lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
  1029. SMC_LLC_WAIT_TIME);
  1030. mutex_lock(&lgr->llc_conf_mutex);
  1031. }
  1032. if (!list_empty(&lgr->list)) {
  1033. smc_llc_send_delete_link(to_lnk, del_link_id,
  1034. SMC_LLC_REQ, true,
  1035. SMC_LLC_DEL_LOST_PATH);
  1036. smcr_link_clear(lnk, true);
  1037. }
  1038. wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
  1039. }
  1040. }
  1041. /* must be called under lgr->llc_conf_mutex lock */
  1042. void smcr_link_down_cond(struct smc_link *lnk)
  1043. {
  1044. if (smc_link_downing(&lnk->state))
  1045. smcr_link_down(lnk);
  1046. }
  1047. /* will get the lgr->llc_conf_mutex lock */
  1048. void smcr_link_down_cond_sched(struct smc_link *lnk)
  1049. {
  1050. if (smc_link_downing(&lnk->state))
  1051. schedule_work(&lnk->link_down_wrk);
  1052. }
  1053. void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
  1054. {
  1055. struct smc_link_group *lgr, *n;
  1056. int i;
  1057. list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
  1058. if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
  1059. SMC_MAX_PNETID_LEN))
  1060. continue; /* lgr is not affected */
  1061. if (list_empty(&lgr->list))
  1062. continue;
  1063. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  1064. struct smc_link *lnk = &lgr->lnk[i];
  1065. if (smc_link_usable(lnk) &&
  1066. lnk->smcibdev == smcibdev && lnk->ibport == ibport)
  1067. smcr_link_down_cond_sched(lnk);
  1068. }
  1069. }
  1070. }
  1071. static void smc_link_down_work(struct work_struct *work)
  1072. {
  1073. struct smc_link *link = container_of(work, struct smc_link,
  1074. link_down_wrk);
  1075. struct smc_link_group *lgr = link->lgr;
  1076. if (list_empty(&lgr->list))
  1077. return;
  1078. wake_up_all(&lgr->llc_msg_waiter);
  1079. mutex_lock(&lgr->llc_conf_mutex);
  1080. smcr_link_down(link);
  1081. mutex_unlock(&lgr->llc_conf_mutex);
  1082. }
  1083. static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
  1084. struct netdev_nested_priv *priv)
  1085. {
  1086. unsigned short *vlan_id = (unsigned short *)priv->data;
  1087. if (is_vlan_dev(lower_dev)) {
  1088. *vlan_id = vlan_dev_vlan_id(lower_dev);
  1089. return 1;
  1090. }
  1091. return 0;
  1092. }
  1093. /* Determine vlan of internal TCP socket. */
  1094. int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
  1095. {
  1096. struct dst_entry *dst = sk_dst_get(clcsock->sk);
  1097. struct netdev_nested_priv priv;
  1098. struct net_device *ndev;
  1099. int rc = 0;
  1100. ini->vlan_id = 0;
  1101. if (!dst) {
  1102. rc = -ENOTCONN;
  1103. goto out;
  1104. }
  1105. if (!dst->dev) {
  1106. rc = -ENODEV;
  1107. goto out_rel;
  1108. }
  1109. ndev = dst->dev;
  1110. if (is_vlan_dev(ndev)) {
  1111. ini->vlan_id = vlan_dev_vlan_id(ndev);
  1112. goto out_rel;
  1113. }
  1114. priv.data = (void *)&ini->vlan_id;
  1115. rtnl_lock();
  1116. netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
  1117. rtnl_unlock();
  1118. out_rel:
  1119. dst_release(dst);
  1120. out:
  1121. return rc;
  1122. }
  1123. static bool smcr_lgr_match(struct smc_link_group *lgr,
  1124. struct smc_clc_msg_local *lcl,
  1125. enum smc_lgr_role role, u32 clcqpn)
  1126. {
  1127. int i;
  1128. if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
  1129. lgr->role != role)
  1130. return false;
  1131. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  1132. if (!smc_link_active(&lgr->lnk[i]))
  1133. continue;
  1134. if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
  1135. !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
  1136. !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
  1137. return true;
  1138. }
  1139. return false;
  1140. }
  1141. static bool smcd_lgr_match(struct smc_link_group *lgr,
  1142. struct smcd_dev *smcismdev, u64 peer_gid)
  1143. {
  1144. return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
  1145. }
  1146. /* create a new SMC connection (and a new link group if necessary) */
  1147. int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
  1148. {
  1149. struct smc_connection *conn = &smc->conn;
  1150. struct list_head *lgr_list;
  1151. struct smc_link_group *lgr;
  1152. enum smc_lgr_role role;
  1153. spinlock_t *lgr_lock;
  1154. int rc = 0;
  1155. lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
  1156. &smc_lgr_list.list;
  1157. lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
  1158. &smc_lgr_list.lock;
  1159. ini->first_contact_local = 1;
  1160. role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
  1161. if (role == SMC_CLNT && ini->first_contact_peer)
  1162. /* create new link group as well */
  1163. goto create;
  1164. /* determine if an existing link group can be reused */
  1165. spin_lock_bh(lgr_lock);
  1166. list_for_each_entry(lgr, lgr_list, list) {
  1167. write_lock_bh(&lgr->conns_lock);
  1168. if ((ini->is_smcd ?
  1169. smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
  1170. ini->ism_peer_gid[ini->ism_selected]) :
  1171. smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
  1172. !lgr->sync_err &&
  1173. (ini->smcd_version == SMC_V2 ||
  1174. lgr->vlan_id == ini->vlan_id) &&
  1175. (role == SMC_CLNT || ini->is_smcd ||
  1176. (lgr->conns_num < SMC_RMBS_PER_LGR_MAX &&
  1177. !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
  1178. /* link group found */
  1179. ini->first_contact_local = 0;
  1180. conn->lgr = lgr;
  1181. rc = smc_lgr_register_conn(conn, false);
  1182. write_unlock_bh(&lgr->conns_lock);
  1183. if (!rc && delayed_work_pending(&lgr->free_work))
  1184. cancel_delayed_work(&lgr->free_work);
  1185. break;
  1186. }
  1187. write_unlock_bh(&lgr->conns_lock);
  1188. }
  1189. spin_unlock_bh(lgr_lock);
  1190. if (rc)
  1191. return rc;
  1192. if (role == SMC_CLNT && !ini->first_contact_peer &&
  1193. ini->first_contact_local) {
  1194. /* Server reuses a link group, but Client wants to start
  1195. * a new one
  1196. * send out_of_sync decline, reason synchr. error
  1197. */
  1198. return SMC_CLC_DECL_SYNCERR;
  1199. }
  1200. create:
  1201. if (ini->first_contact_local) {
  1202. rc = smc_lgr_create(smc, ini);
  1203. if (rc)
  1204. goto out;
  1205. lgr = conn->lgr;
  1206. write_lock_bh(&lgr->conns_lock);
  1207. rc = smc_lgr_register_conn(conn, true);
  1208. write_unlock_bh(&lgr->conns_lock);
  1209. if (rc)
  1210. goto out;
  1211. }
  1212. conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
  1213. conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
  1214. conn->urg_state = SMC_URG_READ;
  1215. init_waitqueue_head(&conn->cdc_pend_tx_wq);
  1216. INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
  1217. if (ini->is_smcd) {
  1218. conn->rx_off = sizeof(struct smcd_cdc_msg);
  1219. smcd_cdc_rx_init(conn); /* init tasklet for this conn */
  1220. } else {
  1221. conn->rx_off = 0;
  1222. }
  1223. #ifndef KERNEL_HAS_ATOMIC64
  1224. spin_lock_init(&conn->acurs_lock);
  1225. #endif
  1226. out:
  1227. return rc;
  1228. }
  1229. /* convert the RMB size into the compressed notation - minimum 16K.
  1230. * In contrast to plain ilog2, this rounds towards the next power of 2,
  1231. * so the socket application gets at least its desired sndbuf / rcvbuf size.
  1232. */
  1233. static u8 smc_compress_bufsize(int size)
  1234. {
  1235. u8 compressed;
  1236. if (size <= SMC_BUF_MIN_SIZE)
  1237. return 0;
  1238. size = (size - 1) >> 14;
  1239. compressed = ilog2(size) + 1;
  1240. if (compressed >= SMC_RMBE_SIZES)
  1241. compressed = SMC_RMBE_SIZES - 1;
  1242. return compressed;
  1243. }
  1244. /* convert the RMB size from compressed notation into integer */
  1245. int smc_uncompress_bufsize(u8 compressed)
  1246. {
  1247. u32 size;
  1248. size = 0x00000001 << (((int)compressed) + 14);
  1249. return (int)size;
  1250. }
  1251. /* try to reuse a sndbuf or rmb description slot for a certain
  1252. * buffer size; if not available, return NULL
  1253. */
  1254. static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
  1255. struct mutex *lock,
  1256. struct list_head *buf_list)
  1257. {
  1258. struct smc_buf_desc *buf_slot;
  1259. mutex_lock(lock);
  1260. list_for_each_entry(buf_slot, buf_list, list) {
  1261. if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
  1262. mutex_unlock(lock);
  1263. return buf_slot;
  1264. }
  1265. }
  1266. mutex_unlock(lock);
  1267. return NULL;
  1268. }
  1269. /* one of the conditions for announcing a receiver's current window size is
  1270. * that it "results in a minimum increase in the window size of 10% of the
  1271. * receive buffer space" [RFC7609]
  1272. */
  1273. static inline int smc_rmb_wnd_update_limit(int rmbe_size)
  1274. {
  1275. return max_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
  1276. }
  1277. /* map an rmb buf to a link */
  1278. static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
  1279. struct smc_link *lnk)
  1280. {
  1281. int rc;
  1282. if (buf_desc->is_map_ib[lnk->link_idx])
  1283. return 0;
  1284. rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
  1285. if (rc)
  1286. return rc;
  1287. sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
  1288. buf_desc->cpu_addr, buf_desc->len);
  1289. /* map sg table to DMA address */
  1290. rc = smc_ib_buf_map_sg(lnk, buf_desc,
  1291. is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
  1292. /* SMC protocol depends on mapping to one DMA address only */
  1293. if (rc != 1) {
  1294. rc = -EAGAIN;
  1295. goto free_table;
  1296. }
  1297. /* create a new memory region for the RMB */
  1298. if (is_rmb) {
  1299. rc = smc_ib_get_memory_region(lnk->roce_pd,
  1300. IB_ACCESS_REMOTE_WRITE |
  1301. IB_ACCESS_LOCAL_WRITE,
  1302. buf_desc, lnk->link_idx);
  1303. if (rc)
  1304. goto buf_unmap;
  1305. smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
  1306. }
  1307. buf_desc->is_map_ib[lnk->link_idx] = true;
  1308. return 0;
  1309. buf_unmap:
  1310. smc_ib_buf_unmap_sg(lnk, buf_desc,
  1311. is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
  1312. free_table:
  1313. sg_free_table(&buf_desc->sgt[lnk->link_idx]);
  1314. return rc;
  1315. }
  1316. /* register a new rmb on IB device,
  1317. * must be called under lgr->llc_conf_mutex lock
  1318. */
  1319. int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
  1320. {
  1321. if (list_empty(&link->lgr->list))
  1322. return -ENOLINK;
  1323. if (!rmb_desc->is_reg_mr[link->link_idx]) {
  1324. /* register memory region for new rmb */
  1325. if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
  1326. rmb_desc->is_reg_err = true;
  1327. return -EFAULT;
  1328. }
  1329. rmb_desc->is_reg_mr[link->link_idx] = true;
  1330. }
  1331. return 0;
  1332. }
  1333. static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
  1334. struct list_head *lst, bool is_rmb)
  1335. {
  1336. struct smc_buf_desc *buf_desc, *bf;
  1337. int rc = 0;
  1338. mutex_lock(lock);
  1339. list_for_each_entry_safe(buf_desc, bf, lst, list) {
  1340. if (!buf_desc->used)
  1341. continue;
  1342. rc = smcr_buf_map_link(buf_desc, is_rmb, lnk);
  1343. if (rc)
  1344. goto out;
  1345. }
  1346. out:
  1347. mutex_unlock(lock);
  1348. return rc;
  1349. }
  1350. /* map all used buffers of lgr for a new link */
  1351. int smcr_buf_map_lgr(struct smc_link *lnk)
  1352. {
  1353. struct smc_link_group *lgr = lnk->lgr;
  1354. int i, rc = 0;
  1355. for (i = 0; i < SMC_RMBE_SIZES; i++) {
  1356. rc = _smcr_buf_map_lgr(lnk, &lgr->rmbs_lock,
  1357. &lgr->rmbs[i], true);
  1358. if (rc)
  1359. return rc;
  1360. rc = _smcr_buf_map_lgr(lnk, &lgr->sndbufs_lock,
  1361. &lgr->sndbufs[i], false);
  1362. if (rc)
  1363. return rc;
  1364. }
  1365. return 0;
  1366. }
  1367. /* register all used buffers of lgr for a new link,
  1368. * must be called under lgr->llc_conf_mutex lock
  1369. */
  1370. int smcr_buf_reg_lgr(struct smc_link *lnk)
  1371. {
  1372. struct smc_link_group *lgr = lnk->lgr;
  1373. struct smc_buf_desc *buf_desc, *bf;
  1374. int i, rc = 0;
  1375. mutex_lock(&lgr->rmbs_lock);
  1376. for (i = 0; i < SMC_RMBE_SIZES; i++) {
  1377. list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
  1378. if (!buf_desc->used)
  1379. continue;
  1380. rc = smcr_link_reg_rmb(lnk, buf_desc);
  1381. if (rc)
  1382. goto out;
  1383. }
  1384. }
  1385. out:
  1386. mutex_unlock(&lgr->rmbs_lock);
  1387. return rc;
  1388. }
  1389. static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
  1390. bool is_rmb, int bufsize)
  1391. {
  1392. struct smc_buf_desc *buf_desc;
  1393. /* try to alloc a new buffer */
  1394. buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
  1395. if (!buf_desc)
  1396. return ERR_PTR(-ENOMEM);
  1397. buf_desc->order = get_order(bufsize);
  1398. buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
  1399. __GFP_NOMEMALLOC | __GFP_COMP |
  1400. __GFP_NORETRY | __GFP_ZERO,
  1401. buf_desc->order);
  1402. if (!buf_desc->pages) {
  1403. kfree(buf_desc);
  1404. return ERR_PTR(-EAGAIN);
  1405. }
  1406. buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
  1407. buf_desc->len = bufsize;
  1408. return buf_desc;
  1409. }
  1410. /* map buf_desc on all usable links,
  1411. * unused buffers stay mapped as long as the link is up
  1412. */
  1413. static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
  1414. struct smc_buf_desc *buf_desc, bool is_rmb)
  1415. {
  1416. int i, rc = 0;
  1417. /* protect against parallel link reconfiguration */
  1418. mutex_lock(&lgr->llc_conf_mutex);
  1419. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  1420. struct smc_link *lnk = &lgr->lnk[i];
  1421. if (!smc_link_usable(lnk))
  1422. continue;
  1423. if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
  1424. rc = -ENOMEM;
  1425. goto out;
  1426. }
  1427. }
  1428. out:
  1429. mutex_unlock(&lgr->llc_conf_mutex);
  1430. return rc;
  1431. }
  1432. #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
  1433. static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
  1434. bool is_dmb, int bufsize)
  1435. {
  1436. struct smc_buf_desc *buf_desc;
  1437. int rc;
  1438. if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
  1439. return ERR_PTR(-EAGAIN);
  1440. /* try to alloc a new DMB */
  1441. buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
  1442. if (!buf_desc)
  1443. return ERR_PTR(-ENOMEM);
  1444. if (is_dmb) {
  1445. rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
  1446. if (rc) {
  1447. kfree(buf_desc);
  1448. if (rc == -ENOMEM)
  1449. return ERR_PTR(-EAGAIN);
  1450. if (rc == -ENOSPC)
  1451. return ERR_PTR(-ENOSPC);
  1452. return ERR_PTR(-EIO);
  1453. }
  1454. buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
  1455. /* CDC header stored in buf. So, pretend it was smaller */
  1456. buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
  1457. } else {
  1458. buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
  1459. __GFP_NOWARN | __GFP_NORETRY |
  1460. __GFP_NOMEMALLOC);
  1461. if (!buf_desc->cpu_addr) {
  1462. kfree(buf_desc);
  1463. return ERR_PTR(-EAGAIN);
  1464. }
  1465. buf_desc->len = bufsize;
  1466. }
  1467. return buf_desc;
  1468. }
  1469. static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
  1470. {
  1471. struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
  1472. struct smc_connection *conn = &smc->conn;
  1473. struct smc_link_group *lgr = conn->lgr;
  1474. struct list_head *buf_list;
  1475. int bufsize, bufsize_short;
  1476. struct mutex *lock; /* lock buffer list */
  1477. int sk_buf_size;
  1478. if (is_rmb)
  1479. /* use socket recv buffer size (w/o overhead) as start value */
  1480. sk_buf_size = smc->sk.sk_rcvbuf / 2;
  1481. else
  1482. /* use socket send buffer size (w/o overhead) as start value */
  1483. sk_buf_size = smc->sk.sk_sndbuf / 2;
  1484. for (bufsize_short = smc_compress_bufsize(sk_buf_size);
  1485. bufsize_short >= 0; bufsize_short--) {
  1486. if (is_rmb) {
  1487. lock = &lgr->rmbs_lock;
  1488. buf_list = &lgr->rmbs[bufsize_short];
  1489. } else {
  1490. lock = &lgr->sndbufs_lock;
  1491. buf_list = &lgr->sndbufs[bufsize_short];
  1492. }
  1493. bufsize = smc_uncompress_bufsize(bufsize_short);
  1494. if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
  1495. continue;
  1496. /* check for reusable slot in the link group */
  1497. buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
  1498. if (buf_desc) {
  1499. memset(buf_desc->cpu_addr, 0, bufsize);
  1500. break; /* found reusable slot */
  1501. }
  1502. if (is_smcd)
  1503. buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
  1504. else
  1505. buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
  1506. if (PTR_ERR(buf_desc) == -ENOMEM)
  1507. break;
  1508. if (IS_ERR(buf_desc))
  1509. continue;
  1510. buf_desc->used = 1;
  1511. mutex_lock(lock);
  1512. list_add(&buf_desc->list, buf_list);
  1513. mutex_unlock(lock);
  1514. break; /* found */
  1515. }
  1516. if (IS_ERR(buf_desc))
  1517. return PTR_ERR(buf_desc);
  1518. if (!is_smcd) {
  1519. if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
  1520. smcr_buf_unuse(buf_desc, lgr);
  1521. return -ENOMEM;
  1522. }
  1523. }
  1524. if (is_rmb) {
  1525. conn->rmb_desc = buf_desc;
  1526. conn->rmbe_size_short = bufsize_short;
  1527. smc->sk.sk_rcvbuf = bufsize * 2;
  1528. atomic_set(&conn->bytes_to_rcv, 0);
  1529. conn->rmbe_update_limit =
  1530. smc_rmb_wnd_update_limit(buf_desc->len);
  1531. if (is_smcd)
  1532. smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
  1533. } else {
  1534. conn->sndbuf_desc = buf_desc;
  1535. smc->sk.sk_sndbuf = bufsize * 2;
  1536. atomic_set(&conn->sndbuf_space, bufsize);
  1537. }
  1538. return 0;
  1539. }
  1540. void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
  1541. {
  1542. if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
  1543. return;
  1544. smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
  1545. }
  1546. void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
  1547. {
  1548. if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
  1549. return;
  1550. smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
  1551. }
  1552. void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
  1553. {
  1554. int i;
  1555. if (!conn->lgr || conn->lgr->is_smcd)
  1556. return;
  1557. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  1558. if (!smc_link_active(&conn->lgr->lnk[i]))
  1559. continue;
  1560. smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
  1561. DMA_FROM_DEVICE);
  1562. }
  1563. }
  1564. void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
  1565. {
  1566. int i;
  1567. if (!conn->lgr || conn->lgr->is_smcd)
  1568. return;
  1569. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  1570. if (!smc_link_active(&conn->lgr->lnk[i]))
  1571. continue;
  1572. smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
  1573. DMA_FROM_DEVICE);
  1574. }
  1575. }
  1576. /* create the send and receive buffer for an SMC socket;
  1577. * receive buffers are called RMBs;
  1578. * (even though the SMC protocol allows more than one RMB-element per RMB,
  1579. * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
  1580. * extra RMB for every connection in a link group
  1581. */
  1582. int smc_buf_create(struct smc_sock *smc, bool is_smcd)
  1583. {
  1584. int rc;
  1585. /* create send buffer */
  1586. rc = __smc_buf_create(smc, is_smcd, false);
  1587. if (rc)
  1588. return rc;
  1589. /* create rmb */
  1590. rc = __smc_buf_create(smc, is_smcd, true);
  1591. if (rc) {
  1592. mutex_lock(&smc->conn.lgr->sndbufs_lock);
  1593. list_del(&smc->conn.sndbuf_desc->list);
  1594. mutex_unlock(&smc->conn.lgr->sndbufs_lock);
  1595. smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
  1596. smc->conn.sndbuf_desc = NULL;
  1597. }
  1598. return rc;
  1599. }
  1600. static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
  1601. {
  1602. int i;
  1603. for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
  1604. if (!test_and_set_bit(i, lgr->rtokens_used_mask))
  1605. return i;
  1606. }
  1607. return -ENOSPC;
  1608. }
  1609. static int smc_rtoken_find_by_link(struct smc_link_group *lgr, int lnk_idx,
  1610. u32 rkey)
  1611. {
  1612. int i;
  1613. for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
  1614. if (test_bit(i, lgr->rtokens_used_mask) &&
  1615. lgr->rtokens[i][lnk_idx].rkey == rkey)
  1616. return i;
  1617. }
  1618. return -ENOENT;
  1619. }
  1620. /* set rtoken for a new link to an existing rmb */
  1621. void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
  1622. __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey)
  1623. {
  1624. int rtok_idx;
  1625. rtok_idx = smc_rtoken_find_by_link(lgr, link_idx, ntohl(nw_rkey_known));
  1626. if (rtok_idx == -ENOENT)
  1627. return;
  1628. lgr->rtokens[rtok_idx][link_idx_new].rkey = ntohl(nw_rkey);
  1629. lgr->rtokens[rtok_idx][link_idx_new].dma_addr = be64_to_cpu(nw_vaddr);
  1630. }
  1631. /* set rtoken for a new link whose link_id is given */
  1632. void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
  1633. __be64 nw_vaddr, __be32 nw_rkey)
  1634. {
  1635. u64 dma_addr = be64_to_cpu(nw_vaddr);
  1636. u32 rkey = ntohl(nw_rkey);
  1637. bool found = false;
  1638. int link_idx;
  1639. for (link_idx = 0; link_idx < SMC_LINKS_PER_LGR_MAX; link_idx++) {
  1640. if (lgr->lnk[link_idx].link_id == link_id) {
  1641. found = true;
  1642. break;
  1643. }
  1644. }
  1645. if (!found)
  1646. return;
  1647. lgr->rtokens[rtok_idx][link_idx].rkey = rkey;
  1648. lgr->rtokens[rtok_idx][link_idx].dma_addr = dma_addr;
  1649. }
  1650. /* add a new rtoken from peer */
  1651. int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
  1652. {
  1653. struct smc_link_group *lgr = smc_get_lgr(lnk);
  1654. u64 dma_addr = be64_to_cpu(nw_vaddr);
  1655. u32 rkey = ntohl(nw_rkey);
  1656. int i;
  1657. for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
  1658. if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
  1659. lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
  1660. test_bit(i, lgr->rtokens_used_mask)) {
  1661. /* already in list */
  1662. return i;
  1663. }
  1664. }
  1665. i = smc_rmb_reserve_rtoken_idx(lgr);
  1666. if (i < 0)
  1667. return i;
  1668. lgr->rtokens[i][lnk->link_idx].rkey = rkey;
  1669. lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
  1670. return i;
  1671. }
  1672. /* delete an rtoken from all links */
  1673. int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
  1674. {
  1675. struct smc_link_group *lgr = smc_get_lgr(lnk);
  1676. u32 rkey = ntohl(nw_rkey);
  1677. int i, j;
  1678. for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
  1679. if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
  1680. test_bit(i, lgr->rtokens_used_mask)) {
  1681. for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
  1682. lgr->rtokens[i][j].rkey = 0;
  1683. lgr->rtokens[i][j].dma_addr = 0;
  1684. }
  1685. clear_bit(i, lgr->rtokens_used_mask);
  1686. return 0;
  1687. }
  1688. }
  1689. return -ENOENT;
  1690. }
  1691. /* save rkey and dma_addr received from peer during clc handshake */
  1692. int smc_rmb_rtoken_handling(struct smc_connection *conn,
  1693. struct smc_link *lnk,
  1694. struct smc_clc_msg_accept_confirm *clc)
  1695. {
  1696. conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
  1697. clc->r0.rmb_rkey);
  1698. if (conn->rtoken_idx < 0)
  1699. return conn->rtoken_idx;
  1700. return 0;
  1701. }
  1702. static void smc_core_going_away(void)
  1703. {
  1704. struct smc_ib_device *smcibdev;
  1705. struct smcd_dev *smcd;
  1706. mutex_lock(&smc_ib_devices.mutex);
  1707. list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
  1708. int i;
  1709. for (i = 0; i < SMC_MAX_PORTS; i++)
  1710. set_bit(i, smcibdev->ports_going_away);
  1711. }
  1712. mutex_unlock(&smc_ib_devices.mutex);
  1713. mutex_lock(&smcd_dev_list.mutex);
  1714. list_for_each_entry(smcd, &smcd_dev_list.list, list) {
  1715. smcd->going_away = 1;
  1716. }
  1717. mutex_unlock(&smcd_dev_list.mutex);
  1718. }
  1719. /* Clean up all SMC link groups */
  1720. static void smc_lgrs_shutdown(void)
  1721. {
  1722. struct smcd_dev *smcd;
  1723. smc_core_going_away();
  1724. smc_smcr_terminate_all(NULL);
  1725. mutex_lock(&smcd_dev_list.mutex);
  1726. list_for_each_entry(smcd, &smcd_dev_list.list, list)
  1727. smc_smcd_terminate_all(smcd);
  1728. mutex_unlock(&smcd_dev_list.mutex);
  1729. }
  1730. static int smc_core_reboot_event(struct notifier_block *this,
  1731. unsigned long event, void *ptr)
  1732. {
  1733. smc_lgrs_shutdown();
  1734. smc_ib_unregister_client();
  1735. return 0;
  1736. }
  1737. static struct notifier_block smc_reboot_notifier = {
  1738. .notifier_call = smc_core_reboot_event,
  1739. };
  1740. int __init smc_core_init(void)
  1741. {
  1742. return register_reboot_notifier(&smc_reboot_notifier);
  1743. }
  1744. /* Called (from smc_exit) when module is removed */
  1745. void smc_core_exit(void)
  1746. {
  1747. unregister_reboot_notifier(&smc_reboot_notifier);
  1748. smc_lgrs_shutdown();
  1749. }