af_smc.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * AF_SMC protocol family socket handler keeping the AF_INET sock address type
  6. * applies to SOCK_STREAM sockets only
  7. * offers an alternative communication option for TCP-protocol sockets
  8. * applicable with RoCE-cards only
  9. *
  10. * Initial restrictions:
  11. * - support for alternate links postponed
  12. *
  13. * Copyright IBM Corp. 2016, 2018
  14. *
  15. * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
  16. * based on prototype from Frank Blaschka
  17. */
  18. #define KMSG_COMPONENT "smc"
  19. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20. #include <linux/module.h>
  21. #include <linux/socket.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/in.h>
  24. #include <linux/sched/signal.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/rcupdate_wait.h>
  27. #include <linux/ctype.h>
  28. #include <net/sock.h>
  29. #include <net/tcp.h>
  30. #include <net/smc.h>
  31. #include <asm/ioctls.h>
  32. #include <net/net_namespace.h>
  33. #include <net/netns/generic.h>
  34. #include "smc_netns.h"
  35. #include "smc.h"
  36. #include "smc_clc.h"
  37. #include "smc_llc.h"
  38. #include "smc_cdc.h"
  39. #include "smc_core.h"
  40. #include "smc_ib.h"
  41. #include "smc_ism.h"
  42. #include "smc_pnet.h"
  43. #include "smc_tx.h"
  44. #include "smc_rx.h"
  45. #include "smc_close.h"
  46. static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
  47. * creation on server
  48. */
  49. static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
  50. * creation on client
  51. */
  52. struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
  53. struct workqueue_struct *smc_close_wq; /* wq for close work */
  54. static void smc_tcp_listen_work(struct work_struct *);
  55. static void smc_connect_work(struct work_struct *);
  56. static void smc_set_keepalive(struct sock *sk, int val)
  57. {
  58. struct smc_sock *smc = smc_sk(sk);
  59. smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
  60. }
  61. static struct smc_hashinfo smc_v4_hashinfo = {
  62. .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
  63. };
  64. static struct smc_hashinfo smc_v6_hashinfo = {
  65. .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
  66. };
  67. int smc_hash_sk(struct sock *sk)
  68. {
  69. struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
  70. struct hlist_head *head;
  71. head = &h->ht;
  72. write_lock_bh(&h->lock);
  73. sk_add_node(sk, head);
  74. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  75. write_unlock_bh(&h->lock);
  76. return 0;
  77. }
  78. EXPORT_SYMBOL_GPL(smc_hash_sk);
  79. void smc_unhash_sk(struct sock *sk)
  80. {
  81. struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
  82. write_lock_bh(&h->lock);
  83. if (sk_del_node_init(sk))
  84. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  85. write_unlock_bh(&h->lock);
  86. }
  87. EXPORT_SYMBOL_GPL(smc_unhash_sk);
  88. struct proto smc_proto = {
  89. .name = "SMC",
  90. .owner = THIS_MODULE,
  91. .keepalive = smc_set_keepalive,
  92. .hash = smc_hash_sk,
  93. .unhash = smc_unhash_sk,
  94. .obj_size = sizeof(struct smc_sock),
  95. .h.smc_hash = &smc_v4_hashinfo,
  96. .slab_flags = SLAB_TYPESAFE_BY_RCU,
  97. };
  98. EXPORT_SYMBOL_GPL(smc_proto);
  99. struct proto smc_proto6 = {
  100. .name = "SMC6",
  101. .owner = THIS_MODULE,
  102. .keepalive = smc_set_keepalive,
  103. .hash = smc_hash_sk,
  104. .unhash = smc_unhash_sk,
  105. .obj_size = sizeof(struct smc_sock),
  106. .h.smc_hash = &smc_v6_hashinfo,
  107. .slab_flags = SLAB_TYPESAFE_BY_RCU,
  108. };
  109. EXPORT_SYMBOL_GPL(smc_proto6);
  110. static void smc_restore_fallback_changes(struct smc_sock *smc)
  111. {
  112. if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
  113. smc->clcsock->file->private_data = smc->sk.sk_socket;
  114. smc->clcsock->file = NULL;
  115. }
  116. }
  117. static int __smc_release(struct smc_sock *smc)
  118. {
  119. struct sock *sk = &smc->sk;
  120. int rc = 0;
  121. if (!smc->use_fallback) {
  122. rc = smc_close_active(smc);
  123. sock_set_flag(sk, SOCK_DEAD);
  124. sk->sk_shutdown |= SHUTDOWN_MASK;
  125. } else {
  126. if (sk->sk_state != SMC_CLOSED) {
  127. if (sk->sk_state != SMC_LISTEN &&
  128. sk->sk_state != SMC_INIT)
  129. sock_put(sk); /* passive closing */
  130. if (sk->sk_state == SMC_LISTEN) {
  131. /* wake up clcsock accept */
  132. rc = kernel_sock_shutdown(smc->clcsock,
  133. SHUT_RDWR);
  134. }
  135. sk->sk_state = SMC_CLOSED;
  136. sk->sk_state_change(sk);
  137. }
  138. smc_restore_fallback_changes(smc);
  139. }
  140. sk->sk_prot->unhash(sk);
  141. if (sk->sk_state == SMC_CLOSED) {
  142. if (smc->clcsock) {
  143. release_sock(sk);
  144. smc_clcsock_release(smc);
  145. lock_sock(sk);
  146. }
  147. if (!smc->use_fallback)
  148. smc_conn_free(&smc->conn);
  149. }
  150. return rc;
  151. }
  152. static int smc_release(struct socket *sock)
  153. {
  154. struct sock *sk = sock->sk;
  155. struct smc_sock *smc;
  156. int old_state, rc = 0;
  157. if (!sk)
  158. goto out;
  159. sock_hold(sk); /* sock_put below */
  160. smc = smc_sk(sk);
  161. old_state = sk->sk_state;
  162. /* cleanup for a dangling non-blocking connect */
  163. if (smc->connect_nonblock && old_state == SMC_INIT)
  164. tcp_abort(smc->clcsock->sk, ECONNABORTED);
  165. if (cancel_work_sync(&smc->connect_work))
  166. sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
  167. if (sk->sk_state == SMC_LISTEN)
  168. /* smc_close_non_accepted() is called and acquires
  169. * sock lock for child sockets again
  170. */
  171. lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
  172. else
  173. lock_sock(sk);
  174. if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE &&
  175. !smc->use_fallback)
  176. smc_close_active_abort(smc);
  177. rc = __smc_release(smc);
  178. /* detach socket */
  179. sock_orphan(sk);
  180. sock->sk = NULL;
  181. release_sock(sk);
  182. sock_put(sk); /* sock_hold above */
  183. sock_put(sk); /* final sock_put */
  184. out:
  185. return rc;
  186. }
  187. static void smc_destruct(struct sock *sk)
  188. {
  189. if (sk->sk_state != SMC_CLOSED)
  190. return;
  191. if (!sock_flag(sk, SOCK_DEAD))
  192. return;
  193. sk_refcnt_debug_dec(sk);
  194. }
  195. static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
  196. int protocol)
  197. {
  198. struct smc_sock *smc;
  199. struct proto *prot;
  200. struct sock *sk;
  201. prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
  202. sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
  203. if (!sk)
  204. return NULL;
  205. sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
  206. sk->sk_state = SMC_INIT;
  207. sk->sk_destruct = smc_destruct;
  208. sk->sk_protocol = protocol;
  209. smc = smc_sk(sk);
  210. INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
  211. INIT_WORK(&smc->connect_work, smc_connect_work);
  212. INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
  213. INIT_LIST_HEAD(&smc->accept_q);
  214. spin_lock_init(&smc->accept_q_lock);
  215. spin_lock_init(&smc->conn.send_lock);
  216. sk->sk_prot->hash(sk);
  217. sk_refcnt_debug_inc(sk);
  218. mutex_init(&smc->clcsock_release_lock);
  219. return sk;
  220. }
  221. static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
  222. int addr_len)
  223. {
  224. struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
  225. struct sock *sk = sock->sk;
  226. struct smc_sock *smc;
  227. int rc;
  228. smc = smc_sk(sk);
  229. /* replicate tests from inet_bind(), to be safe wrt. future changes */
  230. rc = -EINVAL;
  231. if (addr_len < sizeof(struct sockaddr_in))
  232. goto out;
  233. rc = -EAFNOSUPPORT;
  234. if (addr->sin_family != AF_INET &&
  235. addr->sin_family != AF_INET6 &&
  236. addr->sin_family != AF_UNSPEC)
  237. goto out;
  238. /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
  239. if (addr->sin_family == AF_UNSPEC &&
  240. addr->sin_addr.s_addr != htonl(INADDR_ANY))
  241. goto out;
  242. lock_sock(sk);
  243. /* Check if socket is already active */
  244. rc = -EINVAL;
  245. if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
  246. goto out_rel;
  247. smc->clcsock->sk->sk_reuse = sk->sk_reuse;
  248. rc = kernel_bind(smc->clcsock, uaddr, addr_len);
  249. out_rel:
  250. release_sock(sk);
  251. out:
  252. return rc;
  253. }
  254. static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
  255. unsigned long mask)
  256. {
  257. /* options we don't get control via setsockopt for */
  258. nsk->sk_type = osk->sk_type;
  259. nsk->sk_sndbuf = osk->sk_sndbuf;
  260. nsk->sk_rcvbuf = osk->sk_rcvbuf;
  261. nsk->sk_sndtimeo = osk->sk_sndtimeo;
  262. nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
  263. nsk->sk_mark = osk->sk_mark;
  264. nsk->sk_priority = osk->sk_priority;
  265. nsk->sk_rcvlowat = osk->sk_rcvlowat;
  266. nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
  267. nsk->sk_err = osk->sk_err;
  268. nsk->sk_flags &= ~mask;
  269. nsk->sk_flags |= osk->sk_flags & mask;
  270. }
  271. #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
  272. (1UL << SOCK_KEEPOPEN) | \
  273. (1UL << SOCK_LINGER) | \
  274. (1UL << SOCK_BROADCAST) | \
  275. (1UL << SOCK_TIMESTAMP) | \
  276. (1UL << SOCK_DBG) | \
  277. (1UL << SOCK_RCVTSTAMP) | \
  278. (1UL << SOCK_RCVTSTAMPNS) | \
  279. (1UL << SOCK_LOCALROUTE) | \
  280. (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
  281. (1UL << SOCK_RXQ_OVFL) | \
  282. (1UL << SOCK_WIFI_STATUS) | \
  283. (1UL << SOCK_NOFCS) | \
  284. (1UL << SOCK_FILTER_LOCKED) | \
  285. (1UL << SOCK_TSTAMP_NEW))
  286. /* copy only relevant settings and flags of SOL_SOCKET level from smc to
  287. * clc socket (since smc is not called for these options from net/core)
  288. */
  289. static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
  290. {
  291. smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
  292. }
  293. #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
  294. (1UL << SOCK_KEEPOPEN) | \
  295. (1UL << SOCK_LINGER) | \
  296. (1UL << SOCK_DBG))
  297. /* copy only settings and flags relevant for smc from clc to smc socket */
  298. static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
  299. {
  300. smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
  301. }
  302. /* register the new rmb on all links */
  303. static int smcr_lgr_reg_rmbs(struct smc_link *link,
  304. struct smc_buf_desc *rmb_desc)
  305. {
  306. struct smc_link_group *lgr = link->lgr;
  307. int i, rc = 0;
  308. rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
  309. if (rc)
  310. return rc;
  311. /* protect against parallel smc_llc_cli_rkey_exchange() and
  312. * parallel smcr_link_reg_rmb()
  313. */
  314. mutex_lock(&lgr->llc_conf_mutex);
  315. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  316. if (!smc_link_active(&lgr->lnk[i]))
  317. continue;
  318. rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
  319. if (rc)
  320. goto out;
  321. }
  322. /* exchange confirm_rkey msg with peer */
  323. rc = smc_llc_do_confirm_rkey(link, rmb_desc);
  324. if (rc) {
  325. rc = -EFAULT;
  326. goto out;
  327. }
  328. rmb_desc->is_conf_rkey = true;
  329. out:
  330. mutex_unlock(&lgr->llc_conf_mutex);
  331. smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
  332. return rc;
  333. }
  334. static int smcr_clnt_conf_first_link(struct smc_sock *smc)
  335. {
  336. struct smc_link *link = smc->conn.lnk;
  337. struct smc_llc_qentry *qentry;
  338. int rc;
  339. /* receive CONFIRM LINK request from server over RoCE fabric */
  340. qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
  341. SMC_LLC_CONFIRM_LINK);
  342. if (!qentry) {
  343. struct smc_clc_msg_decline dclc;
  344. rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
  345. SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
  346. return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
  347. }
  348. smc_llc_save_peer_uid(qentry);
  349. rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
  350. smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
  351. if (rc)
  352. return SMC_CLC_DECL_RMBE_EC;
  353. rc = smc_ib_modify_qp_rts(link);
  354. if (rc)
  355. return SMC_CLC_DECL_ERR_RDYLNK;
  356. smc_wr_remember_qp_attr(link);
  357. if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
  358. return SMC_CLC_DECL_ERR_REGRMB;
  359. /* confirm_rkey is implicit on 1st contact */
  360. smc->conn.rmb_desc->is_conf_rkey = true;
  361. /* send CONFIRM LINK response over RoCE fabric */
  362. rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
  363. if (rc < 0)
  364. return SMC_CLC_DECL_TIMEOUT_CL;
  365. smc_llc_link_active(link);
  366. smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
  367. /* optional 2nd link, receive ADD LINK request from server */
  368. qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
  369. SMC_LLC_ADD_LINK);
  370. if (!qentry) {
  371. struct smc_clc_msg_decline dclc;
  372. rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
  373. SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
  374. if (rc == -EAGAIN)
  375. rc = 0; /* no DECLINE received, go with one link */
  376. return rc;
  377. }
  378. smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
  379. smc_llc_cli_add_link(link, qentry);
  380. return 0;
  381. }
  382. static void smcr_conn_save_peer_info(struct smc_sock *smc,
  383. struct smc_clc_msg_accept_confirm *clc)
  384. {
  385. int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
  386. smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
  387. smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
  388. smc->conn.peer_rmbe_size = bufsize;
  389. atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
  390. smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
  391. }
  392. static bool smc_isascii(char *hostname)
  393. {
  394. int i;
  395. for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
  396. if (!isascii(hostname[i]))
  397. return false;
  398. return true;
  399. }
  400. static void smcd_conn_save_peer_info(struct smc_sock *smc,
  401. struct smc_clc_msg_accept_confirm *clc)
  402. {
  403. int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
  404. smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
  405. smc->conn.peer_token = clc->d0.token;
  406. /* msg header takes up space in the buffer */
  407. smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
  408. atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
  409. smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
  410. if (clc->hdr.version > SMC_V1 &&
  411. (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) {
  412. struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
  413. (struct smc_clc_msg_accept_confirm_v2 *)clc;
  414. struct smc_clc_first_contact_ext *fce =
  415. (struct smc_clc_first_contact_ext *)
  416. (((u8 *)clc_v2) + sizeof(*clc_v2));
  417. memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid,
  418. SMC_MAX_EID_LEN);
  419. smc->conn.lgr->peer_os = fce->os_type;
  420. smc->conn.lgr->peer_smc_release = fce->release;
  421. if (smc_isascii(fce->hostname))
  422. memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
  423. SMC_MAX_HOSTNAME_LEN);
  424. }
  425. }
  426. static void smc_conn_save_peer_info(struct smc_sock *smc,
  427. struct smc_clc_msg_accept_confirm *clc)
  428. {
  429. if (smc->conn.lgr->is_smcd)
  430. smcd_conn_save_peer_info(smc, clc);
  431. else
  432. smcr_conn_save_peer_info(smc, clc);
  433. }
  434. static void smc_link_save_peer_info(struct smc_link *link,
  435. struct smc_clc_msg_accept_confirm *clc)
  436. {
  437. link->peer_qpn = ntoh24(clc->r0.qpn);
  438. memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE);
  439. memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac));
  440. link->peer_psn = ntoh24(clc->r0.psn);
  441. link->peer_mtu = clc->r0.qp_mtu;
  442. }
  443. static void smc_switch_to_fallback(struct smc_sock *smc)
  444. {
  445. wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
  446. wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
  447. unsigned long flags;
  448. smc->use_fallback = true;
  449. if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
  450. smc->clcsock->file = smc->sk.sk_socket->file;
  451. smc->clcsock->file->private_data = smc->clcsock;
  452. smc->clcsock->wq.fasync_list =
  453. smc->sk.sk_socket->wq.fasync_list;
  454. /* There may be some entries remaining in
  455. * smc socket->wq, which should be removed
  456. * to clcsocket->wq during the fallback.
  457. */
  458. spin_lock_irqsave(&smc_wait->lock, flags);
  459. spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
  460. list_splice_init(&smc_wait->head, &clc_wait->head);
  461. spin_unlock(&clc_wait->lock);
  462. spin_unlock_irqrestore(&smc_wait->lock, flags);
  463. }
  464. }
  465. /* fall back during connect */
  466. static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
  467. {
  468. smc_switch_to_fallback(smc);
  469. smc->fallback_rsn = reason_code;
  470. smc_copy_sock_settings_to_clc(smc);
  471. smc->connect_nonblock = 0;
  472. if (smc->sk.sk_state == SMC_INIT)
  473. smc->sk.sk_state = SMC_ACTIVE;
  474. return 0;
  475. }
  476. /* decline and fall back during connect */
  477. static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
  478. u8 version)
  479. {
  480. int rc;
  481. if (reason_code < 0) { /* error, fallback is not possible */
  482. if (smc->sk.sk_state == SMC_INIT)
  483. sock_put(&smc->sk); /* passive closing */
  484. return reason_code;
  485. }
  486. if (reason_code != SMC_CLC_DECL_PEERDECL) {
  487. rc = smc_clc_send_decline(smc, reason_code, version);
  488. if (rc < 0) {
  489. if (smc->sk.sk_state == SMC_INIT)
  490. sock_put(&smc->sk); /* passive closing */
  491. return rc;
  492. }
  493. }
  494. return smc_connect_fallback(smc, reason_code);
  495. }
  496. /* abort connecting */
  497. static void smc_connect_abort(struct smc_sock *smc, int local_first)
  498. {
  499. if (local_first)
  500. smc_lgr_cleanup_early(&smc->conn);
  501. else
  502. smc_conn_free(&smc->conn);
  503. }
  504. /* check if there is a rdma device available for this connection. */
  505. /* called for connect and listen */
  506. static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
  507. {
  508. /* PNET table look up: search active ib_device and port
  509. * within same PNETID that also contains the ethernet device
  510. * used for the internal TCP socket
  511. */
  512. smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
  513. if (!ini->ib_dev)
  514. return SMC_CLC_DECL_NOSMCRDEV;
  515. return 0;
  516. }
  517. /* check if there is an ISM device available for this connection. */
  518. /* called for connect and listen */
  519. static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
  520. {
  521. /* Find ISM device with same PNETID as connecting interface */
  522. smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
  523. if (!ini->ism_dev[0])
  524. return SMC_CLC_DECL_NOSMCDDEV;
  525. else
  526. ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
  527. return 0;
  528. }
  529. /* is chid unique for the ism devices that are already determined? */
  530. static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
  531. int cnt)
  532. {
  533. int i = (!ini->ism_dev[0]) ? 1 : 0;
  534. for (; i < cnt; i++)
  535. if (ini->ism_chid[i] == chid)
  536. return false;
  537. return true;
  538. }
  539. /* determine possible V2 ISM devices (either without PNETID or with PNETID plus
  540. * PNETID matching net_device)
  541. */
  542. static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
  543. struct smc_init_info *ini)
  544. {
  545. int rc = SMC_CLC_DECL_NOSMCDDEV;
  546. struct smcd_dev *smcd;
  547. int i = 1;
  548. u16 chid;
  549. if (smcd_indicated(ini->smc_type_v1))
  550. rc = 0; /* already initialized for V1 */
  551. mutex_lock(&smcd_dev_list.mutex);
  552. list_for_each_entry(smcd, &smcd_dev_list.list, list) {
  553. if (smcd->going_away || smcd == ini->ism_dev[0])
  554. continue;
  555. chid = smc_ism_get_chid(smcd);
  556. if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
  557. continue;
  558. if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
  559. smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
  560. ini->ism_dev[i] = smcd;
  561. ini->ism_chid[i] = chid;
  562. ini->is_smcd = true;
  563. rc = 0;
  564. i++;
  565. if (i > SMC_MAX_ISM_DEVS)
  566. break;
  567. }
  568. }
  569. mutex_unlock(&smcd_dev_list.mutex);
  570. ini->ism_offered_cnt = i - 1;
  571. if (!ini->ism_dev[0] && !ini->ism_dev[1])
  572. ini->smcd_version = 0;
  573. return rc;
  574. }
  575. /* Check for VLAN ID and register it on ISM device just for CLC handshake */
  576. static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
  577. struct smc_init_info *ini)
  578. {
  579. if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
  580. return SMC_CLC_DECL_ISMVLANERR;
  581. return 0;
  582. }
  583. static int smc_find_proposal_devices(struct smc_sock *smc,
  584. struct smc_init_info *ini)
  585. {
  586. int rc = 0;
  587. /* check if there is an ism device available */
  588. if (ini->smcd_version & SMC_V1) {
  589. if (smc_find_ism_device(smc, ini) ||
  590. smc_connect_ism_vlan_setup(smc, ini)) {
  591. if (ini->smc_type_v1 == SMC_TYPE_B)
  592. ini->smc_type_v1 = SMC_TYPE_R;
  593. else
  594. ini->smc_type_v1 = SMC_TYPE_N;
  595. } /* else ISM V1 is supported for this connection */
  596. if (smc_find_rdma_device(smc, ini)) {
  597. if (ini->smc_type_v1 == SMC_TYPE_B)
  598. ini->smc_type_v1 = SMC_TYPE_D;
  599. else
  600. ini->smc_type_v1 = SMC_TYPE_N;
  601. } /* else RDMA is supported for this connection */
  602. }
  603. if (smc_ism_v2_capable && smc_find_ism_v2_device_clnt(smc, ini))
  604. ini->smc_type_v2 = SMC_TYPE_N;
  605. /* if neither ISM nor RDMA are supported, fallback */
  606. if (!smcr_indicated(ini->smc_type_v1) &&
  607. ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
  608. rc = SMC_CLC_DECL_NOSMCDEV;
  609. return rc;
  610. }
  611. /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
  612. * used, the VLAN ID will be registered again during the connection setup.
  613. */
  614. static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
  615. struct smc_init_info *ini)
  616. {
  617. if (!smcd_indicated(ini->smc_type_v1))
  618. return 0;
  619. if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
  620. return SMC_CLC_DECL_CNFERR;
  621. return 0;
  622. }
  623. #define SMC_CLC_MAX_ACCEPT_LEN \
  624. (sizeof(struct smc_clc_msg_accept_confirm_v2) + \
  625. sizeof(struct smc_clc_first_contact_ext) + \
  626. sizeof(struct smc_clc_msg_trail))
  627. /* CLC handshake during connect */
  628. static int smc_connect_clc(struct smc_sock *smc,
  629. struct smc_clc_msg_accept_confirm_v2 *aclc2,
  630. struct smc_init_info *ini)
  631. {
  632. int rc = 0;
  633. /* do inband token exchange */
  634. rc = smc_clc_send_proposal(smc, ini);
  635. if (rc)
  636. return rc;
  637. /* receive SMC Accept CLC message */
  638. return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
  639. SMC_CLC_ACCEPT, CLC_WAIT_TIME);
  640. }
  641. /* setup for RDMA connection of client */
  642. static int smc_connect_rdma(struct smc_sock *smc,
  643. struct smc_clc_msg_accept_confirm *aclc,
  644. struct smc_init_info *ini)
  645. {
  646. int i, reason_code = 0;
  647. struct smc_link *link;
  648. ini->is_smcd = false;
  649. ini->ib_lcl = &aclc->r0.lcl;
  650. ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
  651. ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
  652. mutex_lock(&smc_client_lgr_pending);
  653. reason_code = smc_conn_create(smc, ini);
  654. if (reason_code) {
  655. mutex_unlock(&smc_client_lgr_pending);
  656. return reason_code;
  657. }
  658. smc_conn_save_peer_info(smc, aclc);
  659. if (ini->first_contact_local) {
  660. link = smc->conn.lnk;
  661. } else {
  662. /* set link that was assigned by server */
  663. link = NULL;
  664. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  665. struct smc_link *l = &smc->conn.lgr->lnk[i];
  666. if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
  667. !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
  668. SMC_GID_SIZE) &&
  669. !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
  670. sizeof(l->peer_mac))) {
  671. link = l;
  672. break;
  673. }
  674. }
  675. if (!link) {
  676. reason_code = SMC_CLC_DECL_NOSRVLINK;
  677. goto connect_abort;
  678. }
  679. smc->conn.lnk = link;
  680. }
  681. /* create send buffer and rmb */
  682. if (smc_buf_create(smc, false)) {
  683. reason_code = SMC_CLC_DECL_MEM;
  684. goto connect_abort;
  685. }
  686. if (ini->first_contact_local)
  687. smc_link_save_peer_info(link, aclc);
  688. if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
  689. reason_code = SMC_CLC_DECL_ERR_RTOK;
  690. goto connect_abort;
  691. }
  692. smc_close_init(smc);
  693. smc_rx_init(smc);
  694. if (ini->first_contact_local) {
  695. if (smc_ib_ready_link(link)) {
  696. reason_code = SMC_CLC_DECL_ERR_RDYLNK;
  697. goto connect_abort;
  698. }
  699. } else {
  700. if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
  701. reason_code = SMC_CLC_DECL_ERR_REGRMB;
  702. goto connect_abort;
  703. }
  704. }
  705. smc_rmb_sync_sg_for_device(&smc->conn);
  706. reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
  707. SMC_V1);
  708. if (reason_code)
  709. goto connect_abort;
  710. smc_tx_init(smc);
  711. if (ini->first_contact_local) {
  712. /* QP confirmation over RoCE fabric */
  713. smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
  714. reason_code = smcr_clnt_conf_first_link(smc);
  715. smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
  716. if (reason_code)
  717. goto connect_abort;
  718. }
  719. mutex_unlock(&smc_client_lgr_pending);
  720. smc_copy_sock_settings_to_clc(smc);
  721. smc->connect_nonblock = 0;
  722. if (smc->sk.sk_state == SMC_INIT)
  723. smc->sk.sk_state = SMC_ACTIVE;
  724. return 0;
  725. connect_abort:
  726. smc_connect_abort(smc, ini->first_contact_local);
  727. mutex_unlock(&smc_client_lgr_pending);
  728. smc->connect_nonblock = 0;
  729. return reason_code;
  730. }
  731. /* The server has chosen one of the proposed ISM devices for the communication.
  732. * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
  733. */
  734. static int
  735. smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc,
  736. struct smc_init_info *ini)
  737. {
  738. int i;
  739. for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
  740. if (ini->ism_chid[i] == ntohs(aclc->chid)) {
  741. ini->ism_selected = i;
  742. return 0;
  743. }
  744. }
  745. return -EPROTO;
  746. }
  747. /* setup for ISM connection of client */
  748. static int smc_connect_ism(struct smc_sock *smc,
  749. struct smc_clc_msg_accept_confirm *aclc,
  750. struct smc_init_info *ini)
  751. {
  752. int rc = 0;
  753. ini->is_smcd = true;
  754. ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
  755. if (aclc->hdr.version == SMC_V2) {
  756. struct smc_clc_msg_accept_confirm_v2 *aclc_v2 =
  757. (struct smc_clc_msg_accept_confirm_v2 *)aclc;
  758. rc = smc_v2_determine_accepted_chid(aclc_v2, ini);
  759. if (rc)
  760. return rc;
  761. }
  762. ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
  763. /* there is only one lgr role for SMC-D; use server lock */
  764. mutex_lock(&smc_server_lgr_pending);
  765. rc = smc_conn_create(smc, ini);
  766. if (rc) {
  767. mutex_unlock(&smc_server_lgr_pending);
  768. return rc;
  769. }
  770. /* Create send and receive buffers */
  771. rc = smc_buf_create(smc, true);
  772. if (rc) {
  773. rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
  774. goto connect_abort;
  775. }
  776. smc_conn_save_peer_info(smc, aclc);
  777. smc_close_init(smc);
  778. smc_rx_init(smc);
  779. smc_tx_init(smc);
  780. rc = smc_clc_send_confirm(smc, ini->first_contact_local,
  781. aclc->hdr.version);
  782. if (rc)
  783. goto connect_abort;
  784. mutex_unlock(&smc_server_lgr_pending);
  785. smc_copy_sock_settings_to_clc(smc);
  786. smc->connect_nonblock = 0;
  787. if (smc->sk.sk_state == SMC_INIT)
  788. smc->sk.sk_state = SMC_ACTIVE;
  789. return 0;
  790. connect_abort:
  791. smc_connect_abort(smc, ini->first_contact_local);
  792. mutex_unlock(&smc_server_lgr_pending);
  793. smc->connect_nonblock = 0;
  794. return rc;
  795. }
  796. /* check if received accept type and version matches a proposed one */
  797. static int smc_connect_check_aclc(struct smc_init_info *ini,
  798. struct smc_clc_msg_accept_confirm *aclc)
  799. {
  800. if ((aclc->hdr.typev1 == SMC_TYPE_R &&
  801. !smcr_indicated(ini->smc_type_v1)) ||
  802. (aclc->hdr.typev1 == SMC_TYPE_D &&
  803. ((!smcd_indicated(ini->smc_type_v1) &&
  804. !smcd_indicated(ini->smc_type_v2)) ||
  805. (aclc->hdr.version == SMC_V1 &&
  806. !smcd_indicated(ini->smc_type_v1)) ||
  807. (aclc->hdr.version == SMC_V2 &&
  808. !smcd_indicated(ini->smc_type_v2)))))
  809. return SMC_CLC_DECL_MODEUNSUPP;
  810. return 0;
  811. }
  812. /* perform steps before actually connecting */
  813. static int __smc_connect(struct smc_sock *smc)
  814. {
  815. u8 version = smc_ism_v2_capable ? SMC_V2 : SMC_V1;
  816. struct smc_clc_msg_accept_confirm_v2 *aclc2;
  817. struct smc_clc_msg_accept_confirm *aclc;
  818. struct smc_init_info *ini = NULL;
  819. u8 *buf = NULL;
  820. int rc = 0;
  821. if (smc->use_fallback)
  822. return smc_connect_fallback(smc, smc->fallback_rsn);
  823. /* if peer has not signalled SMC-capability, fall back */
  824. if (!tcp_sk(smc->clcsock->sk)->syn_smc)
  825. return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
  826. /* IPSec connections opt out of SMC optimizations */
  827. if (using_ipsec(smc))
  828. return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
  829. version);
  830. ini = kzalloc(sizeof(*ini), GFP_KERNEL);
  831. if (!ini)
  832. return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
  833. version);
  834. ini->smcd_version = SMC_V1;
  835. ini->smcd_version |= smc_ism_v2_capable ? SMC_V2 : 0;
  836. ini->smc_type_v1 = SMC_TYPE_B;
  837. ini->smc_type_v2 = smc_ism_v2_capable ? SMC_TYPE_D : SMC_TYPE_N;
  838. /* get vlan id from IP device */
  839. if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
  840. ini->smcd_version &= ~SMC_V1;
  841. ini->smc_type_v1 = SMC_TYPE_N;
  842. if (!ini->smcd_version) {
  843. rc = SMC_CLC_DECL_GETVLANERR;
  844. goto fallback;
  845. }
  846. }
  847. rc = smc_find_proposal_devices(smc, ini);
  848. if (rc)
  849. goto fallback;
  850. buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
  851. if (!buf) {
  852. rc = SMC_CLC_DECL_MEM;
  853. goto fallback;
  854. }
  855. aclc2 = (struct smc_clc_msg_accept_confirm_v2 *)buf;
  856. aclc = (struct smc_clc_msg_accept_confirm *)aclc2;
  857. /* perform CLC handshake */
  858. rc = smc_connect_clc(smc, aclc2, ini);
  859. if (rc)
  860. goto vlan_cleanup;
  861. /* check if smc modes and versions of CLC proposal and accept match */
  862. rc = smc_connect_check_aclc(ini, aclc);
  863. version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
  864. ini->smcd_version = version;
  865. if (rc)
  866. goto vlan_cleanup;
  867. /* depending on previous steps, connect using rdma or ism */
  868. if (aclc->hdr.typev1 == SMC_TYPE_R)
  869. rc = smc_connect_rdma(smc, aclc, ini);
  870. else if (aclc->hdr.typev1 == SMC_TYPE_D)
  871. rc = smc_connect_ism(smc, aclc, ini);
  872. if (rc)
  873. goto vlan_cleanup;
  874. smc_connect_ism_vlan_cleanup(smc, ini);
  875. kfree(buf);
  876. kfree(ini);
  877. return 0;
  878. vlan_cleanup:
  879. smc_connect_ism_vlan_cleanup(smc, ini);
  880. kfree(buf);
  881. fallback:
  882. kfree(ini);
  883. return smc_connect_decline_fallback(smc, rc, version);
  884. }
  885. static void smc_connect_work(struct work_struct *work)
  886. {
  887. struct smc_sock *smc = container_of(work, struct smc_sock,
  888. connect_work);
  889. long timeo = smc->sk.sk_sndtimeo;
  890. int rc = 0;
  891. if (!timeo)
  892. timeo = MAX_SCHEDULE_TIMEOUT;
  893. lock_sock(smc->clcsock->sk);
  894. if (smc->clcsock->sk->sk_err) {
  895. smc->sk.sk_err = smc->clcsock->sk->sk_err;
  896. } else if ((1 << smc->clcsock->sk->sk_state) &
  897. (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
  898. rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
  899. if ((rc == -EPIPE) &&
  900. ((1 << smc->clcsock->sk->sk_state) &
  901. (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
  902. rc = 0;
  903. }
  904. release_sock(smc->clcsock->sk);
  905. lock_sock(&smc->sk);
  906. if (rc != 0 || smc->sk.sk_err) {
  907. smc->sk.sk_state = SMC_CLOSED;
  908. if (rc == -EPIPE || rc == -EAGAIN)
  909. smc->sk.sk_err = EPIPE;
  910. else if (signal_pending(current))
  911. smc->sk.sk_err = -sock_intr_errno(timeo);
  912. sock_put(&smc->sk); /* passive closing */
  913. goto out;
  914. }
  915. rc = __smc_connect(smc);
  916. if (rc < 0)
  917. smc->sk.sk_err = -rc;
  918. out:
  919. if (!sock_flag(&smc->sk, SOCK_DEAD)) {
  920. if (smc->sk.sk_err) {
  921. smc->sk.sk_state_change(&smc->sk);
  922. } else { /* allow polling before and after fallback decision */
  923. smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
  924. smc->sk.sk_write_space(&smc->sk);
  925. }
  926. }
  927. release_sock(&smc->sk);
  928. }
  929. static int smc_connect(struct socket *sock, struct sockaddr *addr,
  930. int alen, int flags)
  931. {
  932. struct sock *sk = sock->sk;
  933. struct smc_sock *smc;
  934. int rc = -EINVAL;
  935. smc = smc_sk(sk);
  936. /* separate smc parameter checking to be safe */
  937. if (alen < sizeof(addr->sa_family))
  938. goto out_err;
  939. if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
  940. goto out_err;
  941. lock_sock(sk);
  942. switch (sk->sk_state) {
  943. default:
  944. goto out;
  945. case SMC_ACTIVE:
  946. rc = -EISCONN;
  947. goto out;
  948. case SMC_INIT:
  949. rc = 0;
  950. break;
  951. }
  952. smc_copy_sock_settings_to_clc(smc);
  953. tcp_sk(smc->clcsock->sk)->syn_smc = 1;
  954. if (smc->connect_nonblock) {
  955. rc = -EALREADY;
  956. goto out;
  957. }
  958. rc = kernel_connect(smc->clcsock, addr, alen, flags);
  959. if (rc && rc != -EINPROGRESS)
  960. goto out;
  961. sock_hold(&smc->sk); /* sock put in passive closing */
  962. if (smc->use_fallback)
  963. goto out;
  964. if (flags & O_NONBLOCK) {
  965. if (queue_work(smc_hs_wq, &smc->connect_work))
  966. smc->connect_nonblock = 1;
  967. rc = -EINPROGRESS;
  968. } else {
  969. rc = __smc_connect(smc);
  970. if (rc < 0)
  971. goto out;
  972. else
  973. rc = 0; /* success cases including fallback */
  974. }
  975. out:
  976. release_sock(sk);
  977. out_err:
  978. return rc;
  979. }
  980. static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
  981. {
  982. struct socket *new_clcsock = NULL;
  983. struct sock *lsk = &lsmc->sk;
  984. struct sock *new_sk;
  985. int rc = -EINVAL;
  986. release_sock(lsk);
  987. new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
  988. if (!new_sk) {
  989. rc = -ENOMEM;
  990. lsk->sk_err = ENOMEM;
  991. *new_smc = NULL;
  992. lock_sock(lsk);
  993. goto out;
  994. }
  995. *new_smc = smc_sk(new_sk);
  996. mutex_lock(&lsmc->clcsock_release_lock);
  997. if (lsmc->clcsock)
  998. rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
  999. mutex_unlock(&lsmc->clcsock_release_lock);
  1000. lock_sock(lsk);
  1001. if (rc < 0 && rc != -EAGAIN)
  1002. lsk->sk_err = -rc;
  1003. if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
  1004. new_sk->sk_prot->unhash(new_sk);
  1005. if (new_clcsock)
  1006. sock_release(new_clcsock);
  1007. new_sk->sk_state = SMC_CLOSED;
  1008. sock_set_flag(new_sk, SOCK_DEAD);
  1009. sock_put(new_sk); /* final */
  1010. *new_smc = NULL;
  1011. goto out;
  1012. }
  1013. /* new clcsock has inherited the smc listen-specific sk_data_ready
  1014. * function; switch it back to the original sk_data_ready function
  1015. */
  1016. new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
  1017. (*new_smc)->clcsock = new_clcsock;
  1018. out:
  1019. return rc;
  1020. }
  1021. /* add a just created sock to the accept queue of the listen sock as
  1022. * candidate for a following socket accept call from user space
  1023. */
  1024. static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
  1025. {
  1026. struct smc_sock *par = smc_sk(parent);
  1027. sock_hold(sk); /* sock_put in smc_accept_unlink () */
  1028. spin_lock(&par->accept_q_lock);
  1029. list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
  1030. spin_unlock(&par->accept_q_lock);
  1031. sk_acceptq_added(parent);
  1032. }
  1033. /* remove a socket from the accept queue of its parental listening socket */
  1034. static void smc_accept_unlink(struct sock *sk)
  1035. {
  1036. struct smc_sock *par = smc_sk(sk)->listen_smc;
  1037. spin_lock(&par->accept_q_lock);
  1038. list_del_init(&smc_sk(sk)->accept_q);
  1039. spin_unlock(&par->accept_q_lock);
  1040. sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
  1041. sock_put(sk); /* sock_hold in smc_accept_enqueue */
  1042. }
  1043. /* remove a sock from the accept queue to bind it to a new socket created
  1044. * for a socket accept call from user space
  1045. */
  1046. struct sock *smc_accept_dequeue(struct sock *parent,
  1047. struct socket *new_sock)
  1048. {
  1049. struct smc_sock *isk, *n;
  1050. struct sock *new_sk;
  1051. list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
  1052. new_sk = (struct sock *)isk;
  1053. smc_accept_unlink(new_sk);
  1054. if (new_sk->sk_state == SMC_CLOSED) {
  1055. new_sk->sk_prot->unhash(new_sk);
  1056. if (isk->clcsock) {
  1057. sock_release(isk->clcsock);
  1058. isk->clcsock = NULL;
  1059. }
  1060. sock_put(new_sk); /* final */
  1061. continue;
  1062. }
  1063. if (new_sock) {
  1064. sock_graft(new_sk, new_sock);
  1065. if (isk->use_fallback) {
  1066. smc_sk(new_sk)->clcsock->file = new_sock->file;
  1067. isk->clcsock->file->private_data = isk->clcsock;
  1068. }
  1069. }
  1070. return new_sk;
  1071. }
  1072. return NULL;
  1073. }
  1074. /* clean up for a created but never accepted sock */
  1075. void smc_close_non_accepted(struct sock *sk)
  1076. {
  1077. struct smc_sock *smc = smc_sk(sk);
  1078. sock_hold(sk); /* sock_put below */
  1079. lock_sock(sk);
  1080. if (!sk->sk_lingertime)
  1081. /* wait for peer closing */
  1082. sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
  1083. __smc_release(smc);
  1084. release_sock(sk);
  1085. sock_put(sk); /* sock_hold above */
  1086. sock_put(sk); /* final sock_put */
  1087. }
  1088. static int smcr_serv_conf_first_link(struct smc_sock *smc)
  1089. {
  1090. struct smc_link *link = smc->conn.lnk;
  1091. struct smc_llc_qentry *qentry;
  1092. int rc;
  1093. if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
  1094. return SMC_CLC_DECL_ERR_REGRMB;
  1095. /* send CONFIRM LINK request to client over the RoCE fabric */
  1096. rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
  1097. if (rc < 0)
  1098. return SMC_CLC_DECL_TIMEOUT_CL;
  1099. /* receive CONFIRM LINK response from client over the RoCE fabric */
  1100. qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
  1101. SMC_LLC_CONFIRM_LINK);
  1102. if (!qentry) {
  1103. struct smc_clc_msg_decline dclc;
  1104. rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
  1105. SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
  1106. return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
  1107. }
  1108. smc_llc_save_peer_uid(qentry);
  1109. rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
  1110. smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
  1111. if (rc)
  1112. return SMC_CLC_DECL_RMBE_EC;
  1113. /* confirm_rkey is implicit on 1st contact */
  1114. smc->conn.rmb_desc->is_conf_rkey = true;
  1115. smc_llc_link_active(link);
  1116. smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
  1117. /* initial contact - try to establish second link */
  1118. smc_llc_srv_add_link(link);
  1119. return 0;
  1120. }
  1121. /* listen worker: finish */
  1122. static void smc_listen_out(struct smc_sock *new_smc)
  1123. {
  1124. struct smc_sock *lsmc = new_smc->listen_smc;
  1125. struct sock *newsmcsk = &new_smc->sk;
  1126. if (lsmc->sk.sk_state == SMC_LISTEN) {
  1127. lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
  1128. smc_accept_enqueue(&lsmc->sk, newsmcsk);
  1129. release_sock(&lsmc->sk);
  1130. } else { /* no longer listening */
  1131. smc_close_non_accepted(newsmcsk);
  1132. }
  1133. /* Wake up accept */
  1134. lsmc->sk.sk_data_ready(&lsmc->sk);
  1135. sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
  1136. }
  1137. /* listen worker: finish in state connected */
  1138. static void smc_listen_out_connected(struct smc_sock *new_smc)
  1139. {
  1140. struct sock *newsmcsk = &new_smc->sk;
  1141. sk_refcnt_debug_inc(newsmcsk);
  1142. if (newsmcsk->sk_state == SMC_INIT)
  1143. newsmcsk->sk_state = SMC_ACTIVE;
  1144. smc_listen_out(new_smc);
  1145. }
  1146. /* listen worker: finish in error state */
  1147. static void smc_listen_out_err(struct smc_sock *new_smc)
  1148. {
  1149. struct sock *newsmcsk = &new_smc->sk;
  1150. if (newsmcsk->sk_state == SMC_INIT)
  1151. sock_put(&new_smc->sk); /* passive closing */
  1152. newsmcsk->sk_state = SMC_CLOSED;
  1153. smc_listen_out(new_smc);
  1154. }
  1155. /* listen worker: decline and fall back if possible */
  1156. static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
  1157. int local_first, u8 version)
  1158. {
  1159. /* RDMA setup failed, switch back to TCP */
  1160. if (local_first)
  1161. smc_lgr_cleanup_early(&new_smc->conn);
  1162. else
  1163. smc_conn_free(&new_smc->conn);
  1164. if (reason_code < 0) { /* error, no fallback possible */
  1165. smc_listen_out_err(new_smc);
  1166. return;
  1167. }
  1168. smc_switch_to_fallback(new_smc);
  1169. new_smc->fallback_rsn = reason_code;
  1170. if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
  1171. if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
  1172. smc_listen_out_err(new_smc);
  1173. return;
  1174. }
  1175. }
  1176. smc_listen_out_connected(new_smc);
  1177. }
  1178. /* listen worker: version checking */
  1179. static int smc_listen_v2_check(struct smc_sock *new_smc,
  1180. struct smc_clc_msg_proposal *pclc,
  1181. struct smc_init_info *ini)
  1182. {
  1183. struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
  1184. struct smc_clc_v2_extension *pclc_v2_ext;
  1185. ini->smc_type_v1 = pclc->hdr.typev1;
  1186. ini->smc_type_v2 = pclc->hdr.typev2;
  1187. ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0;
  1188. if (pclc->hdr.version > SMC_V1)
  1189. ini->smcd_version |=
  1190. ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0;
  1191. if (!smc_ism_v2_capable) {
  1192. ini->smcd_version &= ~SMC_V2;
  1193. goto out;
  1194. }
  1195. pclc_v2_ext = smc_get_clc_v2_ext(pclc);
  1196. if (!pclc_v2_ext) {
  1197. ini->smcd_version &= ~SMC_V2;
  1198. goto out;
  1199. }
  1200. pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
  1201. if (!pclc_smcd_v2_ext)
  1202. ini->smcd_version &= ~SMC_V2;
  1203. out:
  1204. if (!ini->smcd_version) {
  1205. if (pclc->hdr.typev1 == SMC_TYPE_B ||
  1206. pclc->hdr.typev2 == SMC_TYPE_B)
  1207. return SMC_CLC_DECL_NOSMCDEV;
  1208. if (pclc->hdr.typev1 == SMC_TYPE_D ||
  1209. pclc->hdr.typev2 == SMC_TYPE_D)
  1210. return SMC_CLC_DECL_NOSMCDDEV;
  1211. return SMC_CLC_DECL_NOSMCRDEV;
  1212. }
  1213. return 0;
  1214. }
  1215. /* listen worker: check prefixes */
  1216. static int smc_listen_prfx_check(struct smc_sock *new_smc,
  1217. struct smc_clc_msg_proposal *pclc)
  1218. {
  1219. struct smc_clc_msg_proposal_prefix *pclc_prfx;
  1220. struct socket *newclcsock = new_smc->clcsock;
  1221. if (pclc->hdr.typev1 == SMC_TYPE_N)
  1222. return 0;
  1223. pclc_prfx = smc_clc_proposal_get_prefix(pclc);
  1224. if (smc_clc_prfx_match(newclcsock, pclc_prfx))
  1225. return SMC_CLC_DECL_DIFFPREFIX;
  1226. return 0;
  1227. }
  1228. /* listen worker: initialize connection and buffers */
  1229. static int smc_listen_rdma_init(struct smc_sock *new_smc,
  1230. struct smc_init_info *ini)
  1231. {
  1232. int rc;
  1233. /* allocate connection / link group */
  1234. rc = smc_conn_create(new_smc, ini);
  1235. if (rc)
  1236. return rc;
  1237. /* create send buffer and rmb */
  1238. if (smc_buf_create(new_smc, false))
  1239. return SMC_CLC_DECL_MEM;
  1240. return 0;
  1241. }
  1242. /* listen worker: initialize connection and buffers for SMC-D */
  1243. static int smc_listen_ism_init(struct smc_sock *new_smc,
  1244. struct smc_init_info *ini)
  1245. {
  1246. int rc;
  1247. rc = smc_conn_create(new_smc, ini);
  1248. if (rc)
  1249. return rc;
  1250. /* Create send and receive buffers */
  1251. rc = smc_buf_create(new_smc, true);
  1252. if (rc) {
  1253. if (ini->first_contact_local)
  1254. smc_lgr_cleanup_early(&new_smc->conn);
  1255. else
  1256. smc_conn_free(&new_smc->conn);
  1257. return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
  1258. SMC_CLC_DECL_MEM;
  1259. }
  1260. return 0;
  1261. }
  1262. static bool smc_is_already_selected(struct smcd_dev *smcd,
  1263. struct smc_init_info *ini,
  1264. int matches)
  1265. {
  1266. int i;
  1267. for (i = 0; i < matches; i++)
  1268. if (smcd == ini->ism_dev[i])
  1269. return true;
  1270. return false;
  1271. }
  1272. /* check for ISM devices matching proposed ISM devices */
  1273. static void smc_check_ism_v2_match(struct smc_init_info *ini,
  1274. u16 proposed_chid, u64 proposed_gid,
  1275. unsigned int *matches)
  1276. {
  1277. struct smcd_dev *smcd;
  1278. list_for_each_entry(smcd, &smcd_dev_list.list, list) {
  1279. if (smcd->going_away)
  1280. continue;
  1281. if (smc_is_already_selected(smcd, ini, *matches))
  1282. continue;
  1283. if (smc_ism_get_chid(smcd) == proposed_chid &&
  1284. !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
  1285. ini->ism_peer_gid[*matches] = proposed_gid;
  1286. ini->ism_dev[*matches] = smcd;
  1287. (*matches)++;
  1288. break;
  1289. }
  1290. }
  1291. }
  1292. static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
  1293. struct smc_clc_msg_proposal *pclc,
  1294. struct smc_init_info *ini)
  1295. {
  1296. struct smc_clc_smcd_v2_extension *smcd_v2_ext;
  1297. struct smc_clc_v2_extension *smc_v2_ext;
  1298. struct smc_clc_msg_smcd *pclc_smcd;
  1299. unsigned int matches = 0;
  1300. u8 smcd_version;
  1301. u8 *eid = NULL;
  1302. int i;
  1303. if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
  1304. goto not_found;
  1305. pclc_smcd = smc_get_clc_msg_smcd(pclc);
  1306. smc_v2_ext = smc_get_clc_v2_ext(pclc);
  1307. smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
  1308. if (!smcd_v2_ext ||
  1309. !smc_v2_ext->hdr.flag.seid) /* no system EID support for SMCD */
  1310. goto not_found;
  1311. mutex_lock(&smcd_dev_list.mutex);
  1312. if (pclc_smcd->ism.chid)
  1313. /* check for ISM device matching proposed native ISM device */
  1314. smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
  1315. ntohll(pclc_smcd->ism.gid), &matches);
  1316. for (i = 1; i <= smc_v2_ext->hdr.ism_gid_cnt; i++) {
  1317. /* check for ISM devices matching proposed non-native ISM
  1318. * devices
  1319. */
  1320. smc_check_ism_v2_match(ini,
  1321. ntohs(smcd_v2_ext->gidchid[i - 1].chid),
  1322. ntohll(smcd_v2_ext->gidchid[i - 1].gid),
  1323. &matches);
  1324. }
  1325. mutex_unlock(&smcd_dev_list.mutex);
  1326. if (ini->ism_dev[0]) {
  1327. smc_ism_get_system_eid(ini->ism_dev[0], &eid);
  1328. if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN))
  1329. goto not_found;
  1330. } else {
  1331. goto not_found;
  1332. }
  1333. /* separate - outside the smcd_dev_list.lock */
  1334. smcd_version = ini->smcd_version;
  1335. for (i = 0; i < matches; i++) {
  1336. ini->smcd_version = SMC_V2;
  1337. ini->is_smcd = true;
  1338. ini->ism_selected = i;
  1339. if (smc_listen_ism_init(new_smc, ini))
  1340. /* try next active ISM device */
  1341. continue;
  1342. return; /* matching and usable V2 ISM device found */
  1343. }
  1344. /* no V2 ISM device could be initialized */
  1345. ini->smcd_version = smcd_version; /* restore original value */
  1346. not_found:
  1347. ini->smcd_version &= ~SMC_V2;
  1348. ini->ism_dev[0] = NULL;
  1349. ini->is_smcd = false;
  1350. }
  1351. static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
  1352. struct smc_clc_msg_proposal *pclc,
  1353. struct smc_init_info *ini)
  1354. {
  1355. struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
  1356. /* check if ISM V1 is available */
  1357. if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
  1358. goto not_found;
  1359. ini->is_smcd = true; /* prepare ISM check */
  1360. ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid);
  1361. if (smc_find_ism_device(new_smc, ini))
  1362. goto not_found;
  1363. ini->ism_selected = 0;
  1364. if (!smc_listen_ism_init(new_smc, ini))
  1365. return; /* V1 ISM device found */
  1366. not_found:
  1367. ini->ism_dev[0] = NULL;
  1368. ini->is_smcd = false;
  1369. }
  1370. /* listen worker: register buffers */
  1371. static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
  1372. {
  1373. struct smc_connection *conn = &new_smc->conn;
  1374. if (!local_first) {
  1375. if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
  1376. return SMC_CLC_DECL_ERR_REGRMB;
  1377. }
  1378. smc_rmb_sync_sg_for_device(&new_smc->conn);
  1379. return 0;
  1380. }
  1381. static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
  1382. struct smc_clc_msg_proposal *pclc,
  1383. struct smc_init_info *ini)
  1384. {
  1385. int rc;
  1386. if (!smcr_indicated(ini->smc_type_v1))
  1387. return SMC_CLC_DECL_NOSMCDEV;
  1388. /* prepare RDMA check */
  1389. ini->ib_lcl = &pclc->lcl;
  1390. rc = smc_find_rdma_device(new_smc, ini);
  1391. if (rc) {
  1392. /* no RDMA device found */
  1393. if (ini->smc_type_v1 == SMC_TYPE_B)
  1394. /* neither ISM nor RDMA device found */
  1395. rc = SMC_CLC_DECL_NOSMCDEV;
  1396. return rc;
  1397. }
  1398. rc = smc_listen_rdma_init(new_smc, ini);
  1399. if (rc)
  1400. return rc;
  1401. return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
  1402. }
  1403. /* determine the local device matching to proposal */
  1404. static int smc_listen_find_device(struct smc_sock *new_smc,
  1405. struct smc_clc_msg_proposal *pclc,
  1406. struct smc_init_info *ini)
  1407. {
  1408. int rc;
  1409. /* check for ISM device matching V2 proposed device */
  1410. smc_find_ism_v2_device_serv(new_smc, pclc, ini);
  1411. if (ini->ism_dev[0])
  1412. return 0;
  1413. if (!(ini->smcd_version & SMC_V1))
  1414. return SMC_CLC_DECL_NOSMCDEV;
  1415. /* check for matching IP prefix and subnet length */
  1416. rc = smc_listen_prfx_check(new_smc, pclc);
  1417. if (rc)
  1418. return rc;
  1419. /* get vlan id from IP device */
  1420. if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
  1421. return SMC_CLC_DECL_GETVLANERR;
  1422. /* check for ISM device matching V1 proposed device */
  1423. smc_find_ism_v1_device_serv(new_smc, pclc, ini);
  1424. if (ini->ism_dev[0])
  1425. return 0;
  1426. if (pclc->hdr.typev1 == SMC_TYPE_D)
  1427. return SMC_CLC_DECL_NOSMCDDEV; /* skip RDMA and decline */
  1428. /* check if RDMA is available */
  1429. return smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
  1430. }
  1431. /* listen worker: finish RDMA setup */
  1432. static int smc_listen_rdma_finish(struct smc_sock *new_smc,
  1433. struct smc_clc_msg_accept_confirm *cclc,
  1434. bool local_first)
  1435. {
  1436. struct smc_link *link = new_smc->conn.lnk;
  1437. int reason_code = 0;
  1438. if (local_first)
  1439. smc_link_save_peer_info(link, cclc);
  1440. if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
  1441. return SMC_CLC_DECL_ERR_RTOK;
  1442. if (local_first) {
  1443. if (smc_ib_ready_link(link))
  1444. return SMC_CLC_DECL_ERR_RDYLNK;
  1445. /* QP confirmation over RoCE fabric */
  1446. smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
  1447. reason_code = smcr_serv_conf_first_link(new_smc);
  1448. smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
  1449. }
  1450. return reason_code;
  1451. }
  1452. /* setup for connection of server */
  1453. static void smc_listen_work(struct work_struct *work)
  1454. {
  1455. struct smc_sock *new_smc = container_of(work, struct smc_sock,
  1456. smc_listen_work);
  1457. u8 version = smc_ism_v2_capable ? SMC_V2 : SMC_V1;
  1458. struct socket *newclcsock = new_smc->clcsock;
  1459. struct smc_clc_msg_accept_confirm *cclc;
  1460. struct smc_clc_msg_proposal_area *buf;
  1461. struct smc_clc_msg_proposal *pclc;
  1462. struct smc_init_info *ini = NULL;
  1463. int rc = 0;
  1464. if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
  1465. return smc_listen_out_err(new_smc);
  1466. if (new_smc->use_fallback) {
  1467. smc_listen_out_connected(new_smc);
  1468. return;
  1469. }
  1470. /* check if peer is smc capable */
  1471. if (!tcp_sk(newclcsock->sk)->syn_smc) {
  1472. smc_switch_to_fallback(new_smc);
  1473. new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
  1474. smc_listen_out_connected(new_smc);
  1475. return;
  1476. }
  1477. /* do inband token exchange -
  1478. * wait for and receive SMC Proposal CLC message
  1479. */
  1480. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  1481. if (!buf) {
  1482. rc = SMC_CLC_DECL_MEM;
  1483. goto out_decl;
  1484. }
  1485. pclc = (struct smc_clc_msg_proposal *)buf;
  1486. rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
  1487. SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
  1488. if (rc)
  1489. goto out_decl;
  1490. version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version;
  1491. /* IPSec connections opt out of SMC optimizations */
  1492. if (using_ipsec(new_smc)) {
  1493. rc = SMC_CLC_DECL_IPSEC;
  1494. goto out_decl;
  1495. }
  1496. ini = kzalloc(sizeof(*ini), GFP_KERNEL);
  1497. if (!ini) {
  1498. rc = SMC_CLC_DECL_MEM;
  1499. goto out_decl;
  1500. }
  1501. /* initial version checking */
  1502. rc = smc_listen_v2_check(new_smc, pclc, ini);
  1503. if (rc)
  1504. goto out_decl;
  1505. mutex_lock(&smc_server_lgr_pending);
  1506. smc_close_init(new_smc);
  1507. smc_rx_init(new_smc);
  1508. smc_tx_init(new_smc);
  1509. /* determine ISM or RoCE device used for connection */
  1510. rc = smc_listen_find_device(new_smc, pclc, ini);
  1511. if (rc)
  1512. goto out_unlock;
  1513. /* send SMC Accept CLC message */
  1514. rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
  1515. ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1);
  1516. if (rc)
  1517. goto out_unlock;
  1518. /* SMC-D does not need this lock any more */
  1519. if (ini->is_smcd)
  1520. mutex_unlock(&smc_server_lgr_pending);
  1521. /* receive SMC Confirm CLC message */
  1522. memset(buf, 0, sizeof(*buf));
  1523. cclc = (struct smc_clc_msg_accept_confirm *)buf;
  1524. rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
  1525. SMC_CLC_CONFIRM, CLC_WAIT_TIME);
  1526. if (rc) {
  1527. if (!ini->is_smcd)
  1528. goto out_unlock;
  1529. goto out_decl;
  1530. }
  1531. /* finish worker */
  1532. if (!ini->is_smcd) {
  1533. rc = smc_listen_rdma_finish(new_smc, cclc,
  1534. ini->first_contact_local);
  1535. if (rc)
  1536. goto out_unlock;
  1537. mutex_unlock(&smc_server_lgr_pending);
  1538. }
  1539. smc_conn_save_peer_info(new_smc, cclc);
  1540. smc_listen_out_connected(new_smc);
  1541. goto out_free;
  1542. out_unlock:
  1543. mutex_unlock(&smc_server_lgr_pending);
  1544. out_decl:
  1545. smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
  1546. version);
  1547. out_free:
  1548. kfree(ini);
  1549. kfree(buf);
  1550. }
  1551. static void smc_tcp_listen_work(struct work_struct *work)
  1552. {
  1553. struct smc_sock *lsmc = container_of(work, struct smc_sock,
  1554. tcp_listen_work);
  1555. struct sock *lsk = &lsmc->sk;
  1556. struct smc_sock *new_smc;
  1557. int rc = 0;
  1558. lock_sock(lsk);
  1559. while (lsk->sk_state == SMC_LISTEN) {
  1560. rc = smc_clcsock_accept(lsmc, &new_smc);
  1561. if (rc) /* clcsock accept queue empty or error */
  1562. goto out;
  1563. if (!new_smc)
  1564. continue;
  1565. new_smc->listen_smc = lsmc;
  1566. new_smc->use_fallback = lsmc->use_fallback;
  1567. new_smc->fallback_rsn = lsmc->fallback_rsn;
  1568. sock_hold(lsk); /* sock_put in smc_listen_work */
  1569. INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
  1570. smc_copy_sock_settings_to_smc(new_smc);
  1571. new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
  1572. new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
  1573. sock_hold(&new_smc->sk); /* sock_put in passive closing */
  1574. if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
  1575. sock_put(&new_smc->sk);
  1576. }
  1577. out:
  1578. release_sock(lsk);
  1579. sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
  1580. }
  1581. static void smc_clcsock_data_ready(struct sock *listen_clcsock)
  1582. {
  1583. struct smc_sock *lsmc;
  1584. lsmc = (struct smc_sock *)
  1585. ((uintptr_t)listen_clcsock->sk_user_data & ~SK_USER_DATA_NOCOPY);
  1586. if (!lsmc)
  1587. return;
  1588. lsmc->clcsk_data_ready(listen_clcsock);
  1589. if (lsmc->sk.sk_state == SMC_LISTEN) {
  1590. sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
  1591. if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work))
  1592. sock_put(&lsmc->sk);
  1593. }
  1594. }
  1595. static int smc_listen(struct socket *sock, int backlog)
  1596. {
  1597. struct sock *sk = sock->sk;
  1598. struct smc_sock *smc;
  1599. int rc;
  1600. smc = smc_sk(sk);
  1601. lock_sock(sk);
  1602. rc = -EINVAL;
  1603. if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
  1604. smc->connect_nonblock)
  1605. goto out;
  1606. rc = 0;
  1607. if (sk->sk_state == SMC_LISTEN) {
  1608. sk->sk_max_ack_backlog = backlog;
  1609. goto out;
  1610. }
  1611. /* some socket options are handled in core, so we could not apply
  1612. * them to the clc socket -- copy smc socket options to clc socket
  1613. */
  1614. smc_copy_sock_settings_to_clc(smc);
  1615. if (!smc->use_fallback)
  1616. tcp_sk(smc->clcsock->sk)->syn_smc = 1;
  1617. /* save original sk_data_ready function and establish
  1618. * smc-specific sk_data_ready function
  1619. */
  1620. smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
  1621. smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
  1622. smc->clcsock->sk->sk_user_data =
  1623. (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
  1624. rc = kernel_listen(smc->clcsock, backlog);
  1625. if (rc) {
  1626. smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
  1627. goto out;
  1628. }
  1629. sk->sk_max_ack_backlog = backlog;
  1630. sk->sk_ack_backlog = 0;
  1631. sk->sk_state = SMC_LISTEN;
  1632. out:
  1633. release_sock(sk);
  1634. return rc;
  1635. }
  1636. static int smc_accept(struct socket *sock, struct socket *new_sock,
  1637. int flags, bool kern)
  1638. {
  1639. struct sock *sk = sock->sk, *nsk;
  1640. DECLARE_WAITQUEUE(wait, current);
  1641. struct smc_sock *lsmc;
  1642. long timeo;
  1643. int rc = 0;
  1644. lsmc = smc_sk(sk);
  1645. sock_hold(sk); /* sock_put below */
  1646. lock_sock(sk);
  1647. if (lsmc->sk.sk_state != SMC_LISTEN) {
  1648. rc = -EINVAL;
  1649. release_sock(sk);
  1650. goto out;
  1651. }
  1652. /* Wait for an incoming connection */
  1653. timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
  1654. add_wait_queue_exclusive(sk_sleep(sk), &wait);
  1655. while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
  1656. set_current_state(TASK_INTERRUPTIBLE);
  1657. if (!timeo) {
  1658. rc = -EAGAIN;
  1659. break;
  1660. }
  1661. release_sock(sk);
  1662. timeo = schedule_timeout(timeo);
  1663. /* wakeup by sk_data_ready in smc_listen_work() */
  1664. sched_annotate_sleep();
  1665. lock_sock(sk);
  1666. if (signal_pending(current)) {
  1667. rc = sock_intr_errno(timeo);
  1668. break;
  1669. }
  1670. }
  1671. set_current_state(TASK_RUNNING);
  1672. remove_wait_queue(sk_sleep(sk), &wait);
  1673. if (!rc)
  1674. rc = sock_error(nsk);
  1675. release_sock(sk);
  1676. if (rc)
  1677. goto out;
  1678. if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
  1679. /* wait till data arrives on the socket */
  1680. timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
  1681. MSEC_PER_SEC);
  1682. if (smc_sk(nsk)->use_fallback) {
  1683. struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
  1684. lock_sock(clcsk);
  1685. if (skb_queue_empty(&clcsk->sk_receive_queue))
  1686. sk_wait_data(clcsk, &timeo, NULL);
  1687. release_sock(clcsk);
  1688. } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
  1689. lock_sock(nsk);
  1690. smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
  1691. release_sock(nsk);
  1692. }
  1693. }
  1694. out:
  1695. sock_put(sk); /* sock_hold above */
  1696. return rc;
  1697. }
  1698. static int smc_getname(struct socket *sock, struct sockaddr *addr,
  1699. int peer)
  1700. {
  1701. struct smc_sock *smc;
  1702. if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
  1703. (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
  1704. return -ENOTCONN;
  1705. smc = smc_sk(sock->sk);
  1706. return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
  1707. }
  1708. static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  1709. {
  1710. struct sock *sk = sock->sk;
  1711. struct smc_sock *smc;
  1712. int rc = -EPIPE;
  1713. smc = smc_sk(sk);
  1714. lock_sock(sk);
  1715. if ((sk->sk_state != SMC_ACTIVE) &&
  1716. (sk->sk_state != SMC_APPCLOSEWAIT1) &&
  1717. (sk->sk_state != SMC_INIT))
  1718. goto out;
  1719. if (msg->msg_flags & MSG_FASTOPEN) {
  1720. if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
  1721. smc_switch_to_fallback(smc);
  1722. smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
  1723. } else {
  1724. rc = -EINVAL;
  1725. goto out;
  1726. }
  1727. }
  1728. if (smc->use_fallback)
  1729. rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
  1730. else
  1731. rc = smc_tx_sendmsg(smc, msg, len);
  1732. out:
  1733. release_sock(sk);
  1734. return rc;
  1735. }
  1736. static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  1737. int flags)
  1738. {
  1739. struct sock *sk = sock->sk;
  1740. struct smc_sock *smc;
  1741. int rc = -ENOTCONN;
  1742. smc = smc_sk(sk);
  1743. lock_sock(sk);
  1744. if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
  1745. /* socket was connected before, no more data to read */
  1746. rc = 0;
  1747. goto out;
  1748. }
  1749. if ((sk->sk_state == SMC_INIT) ||
  1750. (sk->sk_state == SMC_LISTEN) ||
  1751. (sk->sk_state == SMC_CLOSED))
  1752. goto out;
  1753. if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
  1754. rc = 0;
  1755. goto out;
  1756. }
  1757. if (smc->use_fallback) {
  1758. rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
  1759. } else {
  1760. msg->msg_namelen = 0;
  1761. rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
  1762. }
  1763. out:
  1764. release_sock(sk);
  1765. return rc;
  1766. }
  1767. static __poll_t smc_accept_poll(struct sock *parent)
  1768. {
  1769. struct smc_sock *isk = smc_sk(parent);
  1770. __poll_t mask = 0;
  1771. spin_lock(&isk->accept_q_lock);
  1772. if (!list_empty(&isk->accept_q))
  1773. mask = EPOLLIN | EPOLLRDNORM;
  1774. spin_unlock(&isk->accept_q_lock);
  1775. return mask;
  1776. }
  1777. static __poll_t smc_poll(struct file *file, struct socket *sock,
  1778. poll_table *wait)
  1779. {
  1780. struct sock *sk = sock->sk;
  1781. struct smc_sock *smc;
  1782. __poll_t mask = 0;
  1783. if (!sk)
  1784. return EPOLLNVAL;
  1785. smc = smc_sk(sock->sk);
  1786. if (smc->use_fallback) {
  1787. /* delegate to CLC child sock */
  1788. mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
  1789. sk->sk_err = smc->clcsock->sk->sk_err;
  1790. } else {
  1791. if (sk->sk_state != SMC_CLOSED)
  1792. sock_poll_wait(file, sock, wait);
  1793. if (sk->sk_err)
  1794. mask |= EPOLLERR;
  1795. if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
  1796. (sk->sk_state == SMC_CLOSED))
  1797. mask |= EPOLLHUP;
  1798. if (sk->sk_state == SMC_LISTEN) {
  1799. /* woken up by sk_data_ready in smc_listen_work() */
  1800. mask |= smc_accept_poll(sk);
  1801. } else if (smc->use_fallback) { /* as result of connect_work()*/
  1802. mask |= smc->clcsock->ops->poll(file, smc->clcsock,
  1803. wait);
  1804. sk->sk_err = smc->clcsock->sk->sk_err;
  1805. } else {
  1806. if ((sk->sk_state != SMC_INIT &&
  1807. atomic_read(&smc->conn.sndbuf_space)) ||
  1808. sk->sk_shutdown & SEND_SHUTDOWN) {
  1809. mask |= EPOLLOUT | EPOLLWRNORM;
  1810. } else {
  1811. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  1812. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  1813. }
  1814. if (atomic_read(&smc->conn.bytes_to_rcv))
  1815. mask |= EPOLLIN | EPOLLRDNORM;
  1816. if (sk->sk_shutdown & RCV_SHUTDOWN)
  1817. mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
  1818. if (sk->sk_state == SMC_APPCLOSEWAIT1)
  1819. mask |= EPOLLIN;
  1820. if (smc->conn.urg_state == SMC_URG_VALID)
  1821. mask |= EPOLLPRI;
  1822. }
  1823. }
  1824. return mask;
  1825. }
  1826. static int smc_shutdown(struct socket *sock, int how)
  1827. {
  1828. struct sock *sk = sock->sk;
  1829. bool do_shutdown = true;
  1830. struct smc_sock *smc;
  1831. int rc = -EINVAL;
  1832. int old_state;
  1833. int rc1 = 0;
  1834. smc = smc_sk(sk);
  1835. if ((how < SHUT_RD) || (how > SHUT_RDWR))
  1836. return rc;
  1837. lock_sock(sk);
  1838. rc = -ENOTCONN;
  1839. if ((sk->sk_state != SMC_ACTIVE) &&
  1840. (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
  1841. (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
  1842. (sk->sk_state != SMC_APPCLOSEWAIT1) &&
  1843. (sk->sk_state != SMC_APPCLOSEWAIT2) &&
  1844. (sk->sk_state != SMC_APPFINCLOSEWAIT))
  1845. goto out;
  1846. if (smc->use_fallback) {
  1847. rc = kernel_sock_shutdown(smc->clcsock, how);
  1848. sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
  1849. if (sk->sk_shutdown == SHUTDOWN_MASK) {
  1850. sk->sk_state = SMC_CLOSED;
  1851. sock_put(sk);
  1852. }
  1853. goto out;
  1854. }
  1855. switch (how) {
  1856. case SHUT_RDWR: /* shutdown in both directions */
  1857. old_state = sk->sk_state;
  1858. rc = smc_close_active(smc);
  1859. if (old_state == SMC_ACTIVE &&
  1860. sk->sk_state == SMC_PEERCLOSEWAIT1)
  1861. do_shutdown = false;
  1862. break;
  1863. case SHUT_WR:
  1864. rc = smc_close_shutdown_write(smc);
  1865. break;
  1866. case SHUT_RD:
  1867. rc = 0;
  1868. /* nothing more to do because peer is not involved */
  1869. break;
  1870. }
  1871. if (do_shutdown && smc->clcsock)
  1872. rc1 = kernel_sock_shutdown(smc->clcsock, how);
  1873. /* map sock_shutdown_cmd constants to sk_shutdown value range */
  1874. sk->sk_shutdown |= how + 1;
  1875. out:
  1876. release_sock(sk);
  1877. return rc ? rc : rc1;
  1878. }
  1879. static int smc_setsockopt(struct socket *sock, int level, int optname,
  1880. sockptr_t optval, unsigned int optlen)
  1881. {
  1882. struct sock *sk = sock->sk;
  1883. struct smc_sock *smc;
  1884. int val, rc;
  1885. if (level == SOL_TCP && optname == TCP_ULP)
  1886. return -EOPNOTSUPP;
  1887. smc = smc_sk(sk);
  1888. /* generic setsockopts reaching us here always apply to the
  1889. * CLC socket
  1890. */
  1891. if (unlikely(!smc->clcsock->ops->setsockopt))
  1892. rc = -EOPNOTSUPP;
  1893. else
  1894. rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
  1895. optval, optlen);
  1896. if (smc->clcsock->sk->sk_err) {
  1897. sk->sk_err = smc->clcsock->sk->sk_err;
  1898. sk->sk_error_report(sk);
  1899. }
  1900. if (optlen < sizeof(int))
  1901. return -EINVAL;
  1902. if (copy_from_sockptr(&val, optval, sizeof(int)))
  1903. return -EFAULT;
  1904. lock_sock(sk);
  1905. if (rc || smc->use_fallback)
  1906. goto out;
  1907. switch (optname) {
  1908. case TCP_FASTOPEN:
  1909. case TCP_FASTOPEN_CONNECT:
  1910. case TCP_FASTOPEN_KEY:
  1911. case TCP_FASTOPEN_NO_COOKIE:
  1912. /* option not supported by SMC */
  1913. if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
  1914. smc_switch_to_fallback(smc);
  1915. smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
  1916. } else {
  1917. rc = -EINVAL;
  1918. }
  1919. break;
  1920. case TCP_NODELAY:
  1921. if (sk->sk_state != SMC_INIT &&
  1922. sk->sk_state != SMC_LISTEN &&
  1923. sk->sk_state != SMC_CLOSED) {
  1924. if (val)
  1925. mod_delayed_work(smc->conn.lgr->tx_wq,
  1926. &smc->conn.tx_work, 0);
  1927. }
  1928. break;
  1929. case TCP_CORK:
  1930. if (sk->sk_state != SMC_INIT &&
  1931. sk->sk_state != SMC_LISTEN &&
  1932. sk->sk_state != SMC_CLOSED) {
  1933. if (!val)
  1934. mod_delayed_work(smc->conn.lgr->tx_wq,
  1935. &smc->conn.tx_work, 0);
  1936. }
  1937. break;
  1938. case TCP_DEFER_ACCEPT:
  1939. smc->sockopt_defer_accept = val;
  1940. break;
  1941. default:
  1942. break;
  1943. }
  1944. out:
  1945. release_sock(sk);
  1946. return rc;
  1947. }
  1948. static int smc_getsockopt(struct socket *sock, int level, int optname,
  1949. char __user *optval, int __user *optlen)
  1950. {
  1951. struct smc_sock *smc;
  1952. smc = smc_sk(sock->sk);
  1953. /* socket options apply to the CLC socket */
  1954. if (unlikely(!smc->clcsock->ops->getsockopt))
  1955. return -EOPNOTSUPP;
  1956. return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
  1957. optval, optlen);
  1958. }
  1959. static int smc_ioctl(struct socket *sock, unsigned int cmd,
  1960. unsigned long arg)
  1961. {
  1962. union smc_host_cursor cons, urg;
  1963. struct smc_connection *conn;
  1964. struct smc_sock *smc;
  1965. int answ;
  1966. smc = smc_sk(sock->sk);
  1967. conn = &smc->conn;
  1968. lock_sock(&smc->sk);
  1969. if (smc->use_fallback) {
  1970. if (!smc->clcsock) {
  1971. release_sock(&smc->sk);
  1972. return -EBADF;
  1973. }
  1974. answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
  1975. release_sock(&smc->sk);
  1976. return answ;
  1977. }
  1978. switch (cmd) {
  1979. case SIOCINQ: /* same as FIONREAD */
  1980. if (smc->sk.sk_state == SMC_LISTEN) {
  1981. release_sock(&smc->sk);
  1982. return -EINVAL;
  1983. }
  1984. if (smc->sk.sk_state == SMC_INIT ||
  1985. smc->sk.sk_state == SMC_CLOSED)
  1986. answ = 0;
  1987. else
  1988. answ = atomic_read(&smc->conn.bytes_to_rcv);
  1989. break;
  1990. case SIOCOUTQ:
  1991. /* output queue size (not send + not acked) */
  1992. if (smc->sk.sk_state == SMC_LISTEN) {
  1993. release_sock(&smc->sk);
  1994. return -EINVAL;
  1995. }
  1996. if (smc->sk.sk_state == SMC_INIT ||
  1997. smc->sk.sk_state == SMC_CLOSED)
  1998. answ = 0;
  1999. else
  2000. answ = smc->conn.sndbuf_desc->len -
  2001. atomic_read(&smc->conn.sndbuf_space);
  2002. break;
  2003. case SIOCOUTQNSD:
  2004. /* output queue size (not send only) */
  2005. if (smc->sk.sk_state == SMC_LISTEN) {
  2006. release_sock(&smc->sk);
  2007. return -EINVAL;
  2008. }
  2009. if (smc->sk.sk_state == SMC_INIT ||
  2010. smc->sk.sk_state == SMC_CLOSED)
  2011. answ = 0;
  2012. else
  2013. answ = smc_tx_prepared_sends(&smc->conn);
  2014. break;
  2015. case SIOCATMARK:
  2016. if (smc->sk.sk_state == SMC_LISTEN) {
  2017. release_sock(&smc->sk);
  2018. return -EINVAL;
  2019. }
  2020. if (smc->sk.sk_state == SMC_INIT ||
  2021. smc->sk.sk_state == SMC_CLOSED) {
  2022. answ = 0;
  2023. } else {
  2024. smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
  2025. smc_curs_copy(&urg, &conn->urg_curs, conn);
  2026. answ = smc_curs_diff(conn->rmb_desc->len,
  2027. &cons, &urg) == 1;
  2028. }
  2029. break;
  2030. default:
  2031. release_sock(&smc->sk);
  2032. return -ENOIOCTLCMD;
  2033. }
  2034. release_sock(&smc->sk);
  2035. return put_user(answ, (int __user *)arg);
  2036. }
  2037. static ssize_t smc_sendpage(struct socket *sock, struct page *page,
  2038. int offset, size_t size, int flags)
  2039. {
  2040. struct sock *sk = sock->sk;
  2041. struct smc_sock *smc;
  2042. int rc = -EPIPE;
  2043. smc = smc_sk(sk);
  2044. lock_sock(sk);
  2045. if (sk->sk_state != SMC_ACTIVE) {
  2046. release_sock(sk);
  2047. goto out;
  2048. }
  2049. release_sock(sk);
  2050. if (smc->use_fallback)
  2051. rc = kernel_sendpage(smc->clcsock, page, offset,
  2052. size, flags);
  2053. else
  2054. rc = sock_no_sendpage(sock, page, offset, size, flags);
  2055. out:
  2056. return rc;
  2057. }
  2058. /* Map the affected portions of the rmbe into an spd, note the number of bytes
  2059. * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
  2060. * updates till whenever a respective page has been fully processed.
  2061. * Note that subsequent recv() calls have to wait till all splice() processing
  2062. * completed.
  2063. */
  2064. static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
  2065. struct pipe_inode_info *pipe, size_t len,
  2066. unsigned int flags)
  2067. {
  2068. struct sock *sk = sock->sk;
  2069. struct smc_sock *smc;
  2070. int rc = -ENOTCONN;
  2071. smc = smc_sk(sk);
  2072. lock_sock(sk);
  2073. if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
  2074. /* socket was connected before, no more data to read */
  2075. rc = 0;
  2076. goto out;
  2077. }
  2078. if (sk->sk_state == SMC_INIT ||
  2079. sk->sk_state == SMC_LISTEN ||
  2080. sk->sk_state == SMC_CLOSED)
  2081. goto out;
  2082. if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
  2083. rc = 0;
  2084. goto out;
  2085. }
  2086. if (smc->use_fallback) {
  2087. rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
  2088. pipe, len, flags);
  2089. } else {
  2090. if (*ppos) {
  2091. rc = -ESPIPE;
  2092. goto out;
  2093. }
  2094. if (flags & SPLICE_F_NONBLOCK)
  2095. flags = MSG_DONTWAIT;
  2096. else
  2097. flags = 0;
  2098. rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
  2099. }
  2100. out:
  2101. release_sock(sk);
  2102. return rc;
  2103. }
  2104. /* must look like tcp */
  2105. static const struct proto_ops smc_sock_ops = {
  2106. .family = PF_SMC,
  2107. .owner = THIS_MODULE,
  2108. .release = smc_release,
  2109. .bind = smc_bind,
  2110. .connect = smc_connect,
  2111. .socketpair = sock_no_socketpair,
  2112. .accept = smc_accept,
  2113. .getname = smc_getname,
  2114. .poll = smc_poll,
  2115. .ioctl = smc_ioctl,
  2116. .listen = smc_listen,
  2117. .shutdown = smc_shutdown,
  2118. .setsockopt = smc_setsockopt,
  2119. .getsockopt = smc_getsockopt,
  2120. .sendmsg = smc_sendmsg,
  2121. .recvmsg = smc_recvmsg,
  2122. .mmap = sock_no_mmap,
  2123. .sendpage = smc_sendpage,
  2124. .splice_read = smc_splice_read,
  2125. };
  2126. static int smc_create(struct net *net, struct socket *sock, int protocol,
  2127. int kern)
  2128. {
  2129. int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
  2130. struct smc_sock *smc;
  2131. struct sock *sk;
  2132. int rc;
  2133. rc = -ESOCKTNOSUPPORT;
  2134. if (sock->type != SOCK_STREAM)
  2135. goto out;
  2136. rc = -EPROTONOSUPPORT;
  2137. if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
  2138. goto out;
  2139. rc = -ENOBUFS;
  2140. sock->ops = &smc_sock_ops;
  2141. sk = smc_sock_alloc(net, sock, protocol);
  2142. if (!sk)
  2143. goto out;
  2144. /* create internal TCP socket for CLC handshake and fallback */
  2145. smc = smc_sk(sk);
  2146. smc->use_fallback = false; /* assume rdma capability first */
  2147. smc->fallback_rsn = 0;
  2148. rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
  2149. &smc->clcsock);
  2150. if (rc) {
  2151. sk_common_release(sk);
  2152. goto out;
  2153. }
  2154. smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
  2155. smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
  2156. out:
  2157. return rc;
  2158. }
  2159. static const struct net_proto_family smc_sock_family_ops = {
  2160. .family = PF_SMC,
  2161. .owner = THIS_MODULE,
  2162. .create = smc_create,
  2163. };
  2164. unsigned int smc_net_id;
  2165. static __net_init int smc_net_init(struct net *net)
  2166. {
  2167. return smc_pnet_net_init(net);
  2168. }
  2169. static void __net_exit smc_net_exit(struct net *net)
  2170. {
  2171. smc_pnet_net_exit(net);
  2172. }
  2173. static struct pernet_operations smc_net_ops = {
  2174. .init = smc_net_init,
  2175. .exit = smc_net_exit,
  2176. .id = &smc_net_id,
  2177. .size = sizeof(struct smc_net),
  2178. };
  2179. static int __init smc_init(void)
  2180. {
  2181. int rc;
  2182. rc = register_pernet_subsys(&smc_net_ops);
  2183. if (rc)
  2184. return rc;
  2185. smc_ism_init();
  2186. smc_clc_init();
  2187. rc = smc_pnet_init();
  2188. if (rc)
  2189. goto out_pernet_subsys;
  2190. rc = -ENOMEM;
  2191. smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
  2192. if (!smc_hs_wq)
  2193. goto out_pnet;
  2194. smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
  2195. if (!smc_close_wq)
  2196. goto out_alloc_hs_wq;
  2197. rc = smc_core_init();
  2198. if (rc) {
  2199. pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
  2200. goto out_alloc_wqs;
  2201. }
  2202. rc = smc_llc_init();
  2203. if (rc) {
  2204. pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
  2205. goto out_core;
  2206. }
  2207. rc = smc_cdc_init();
  2208. if (rc) {
  2209. pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
  2210. goto out_core;
  2211. }
  2212. rc = proto_register(&smc_proto, 1);
  2213. if (rc) {
  2214. pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
  2215. goto out_core;
  2216. }
  2217. rc = proto_register(&smc_proto6, 1);
  2218. if (rc) {
  2219. pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
  2220. goto out_proto;
  2221. }
  2222. rc = sock_register(&smc_sock_family_ops);
  2223. if (rc) {
  2224. pr_err("%s: sock_register fails with %d\n", __func__, rc);
  2225. goto out_proto6;
  2226. }
  2227. INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
  2228. INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
  2229. rc = smc_ib_register_client();
  2230. if (rc) {
  2231. pr_err("%s: ib_register fails with %d\n", __func__, rc);
  2232. goto out_sock;
  2233. }
  2234. static_branch_enable(&tcp_have_smc);
  2235. return 0;
  2236. out_sock:
  2237. sock_unregister(PF_SMC);
  2238. out_proto6:
  2239. proto_unregister(&smc_proto6);
  2240. out_proto:
  2241. proto_unregister(&smc_proto);
  2242. out_core:
  2243. smc_core_exit();
  2244. out_alloc_wqs:
  2245. destroy_workqueue(smc_close_wq);
  2246. out_alloc_hs_wq:
  2247. destroy_workqueue(smc_hs_wq);
  2248. out_pnet:
  2249. smc_pnet_exit();
  2250. out_pernet_subsys:
  2251. unregister_pernet_subsys(&smc_net_ops);
  2252. return rc;
  2253. }
  2254. static void __exit smc_exit(void)
  2255. {
  2256. static_branch_disable(&tcp_have_smc);
  2257. sock_unregister(PF_SMC);
  2258. smc_core_exit();
  2259. smc_ib_unregister_client();
  2260. destroy_workqueue(smc_close_wq);
  2261. destroy_workqueue(smc_hs_wq);
  2262. proto_unregister(&smc_proto6);
  2263. proto_unregister(&smc_proto);
  2264. smc_pnet_exit();
  2265. unregister_pernet_subsys(&smc_net_ops);
  2266. rcu_barrier();
  2267. }
  2268. module_init(smc_init);
  2269. module_exit(smc_exit);
  2270. MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
  2271. MODULE_DESCRIPTION("smc socket address family");
  2272. MODULE_LICENSE("GPL");
  2273. MODULE_ALIAS_NETPROTO(PF_SMC);