smc_core.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * Definitions for SMC Connections, Link Groups and Links
  6. *
  7. * Copyright IBM Corp. 2016
  8. *
  9. * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
  10. */
  11. #ifndef _SMC_CORE_H
  12. #define _SMC_CORE_H
  13. #include <linux/atomic.h>
  14. #include <rdma/ib_verbs.h>
  15. #include "smc.h"
  16. #include "smc_ib.h"
  17. #define SMC_RMBS_PER_LGR_MAX 255 /* max. # of RMBs per link group */
  18. struct smc_lgr_list { /* list of link group definition */
  19. struct list_head list;
  20. spinlock_t lock; /* protects list of link groups */
  21. u32 num; /* unique link group number */
  22. };
  23. enum smc_lgr_role { /* possible roles of a link group */
  24. SMC_CLNT, /* client */
  25. SMC_SERV /* server */
  26. };
  27. enum smc_link_state { /* possible states of a link */
  28. SMC_LNK_UNUSED, /* link is unused */
  29. SMC_LNK_INACTIVE, /* link is inactive */
  30. SMC_LNK_ACTIVATING, /* link is being activated */
  31. SMC_LNK_ACTIVE, /* link is active */
  32. };
  33. #define SMC_WR_BUF_SIZE 48 /* size of work request buffer */
  34. struct smc_wr_buf {
  35. u8 raw[SMC_WR_BUF_SIZE];
  36. };
  37. #define SMC_WR_REG_MR_WAIT_TIME (5 * HZ)/* wait time for ib_wr_reg_mr result */
  38. enum smc_wr_reg_state {
  39. POSTED, /* ib_wr_reg_mr request posted */
  40. CONFIRMED, /* ib_wr_reg_mr response: successful */
  41. FAILED /* ib_wr_reg_mr response: failure */
  42. };
  43. struct smc_rdma_sge { /* sges for RDMA writes */
  44. struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE];
  45. };
  46. #define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per
  47. * message send
  48. */
  49. struct smc_rdma_sges { /* sges per message send */
  50. struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES];
  51. };
  52. struct smc_rdma_wr { /* work requests per message
  53. * send
  54. */
  55. struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES];
  56. };
  57. #define SMC_LGR_ID_SIZE 4
  58. struct smc_link {
  59. struct smc_ib_device *smcibdev; /* ib-device */
  60. u8 ibport; /* port - values 1 | 2 */
  61. struct ib_pd *roce_pd; /* IB protection domain,
  62. * unique for every RoCE QP
  63. */
  64. struct ib_qp *roce_qp; /* IB queue pair */
  65. struct ib_qp_attr qp_attr; /* IB queue pair attributes */
  66. struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */
  67. struct ib_send_wr *wr_tx_ibs; /* WR send meta data */
  68. struct ib_sge *wr_tx_sges; /* WR send gather meta data */
  69. struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/
  70. struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */
  71. struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */
  72. struct completion *wr_tx_compl; /* WR send CQE completion */
  73. /* above four vectors have wr_tx_cnt elements and use the same index */
  74. dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
  75. atomic_long_t wr_tx_id; /* seq # of last sent WR */
  76. unsigned long *wr_tx_mask; /* bit mask of used indexes */
  77. u32 wr_tx_cnt; /* number of WR send buffers */
  78. wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */
  79. atomic_t wr_tx_refcnt; /* tx refs to link */
  80. struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */
  81. struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */
  82. struct ib_sge *wr_rx_sges; /* WR recv scatter meta data */
  83. /* above three vectors have wr_rx_cnt elements and use the same index */
  84. dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
  85. u64 wr_rx_id; /* seq # of last recv WR */
  86. u32 wr_rx_cnt; /* number of WR recv buffers */
  87. unsigned long wr_rx_tstamp; /* jiffies when last buf rx */
  88. struct ib_reg_wr wr_reg; /* WR register memory region */
  89. wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
  90. atomic_t wr_reg_refcnt; /* reg refs to link */
  91. enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */
  92. u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/
  93. u8 sgid_index; /* gid index for vlan id */
  94. u32 peer_qpn; /* QP number of peer */
  95. enum ib_mtu path_mtu; /* used mtu */
  96. enum ib_mtu peer_mtu; /* mtu size of peer */
  97. u32 psn_initial; /* QP tx initial packet seqno */
  98. u32 peer_psn; /* QP rx initial packet seqno */
  99. u8 peer_mac[ETH_ALEN]; /* = gid[8:10||13:15] */
  100. u8 peer_gid[SMC_GID_SIZE]; /* gid of peer*/
  101. u8 link_id; /* unique # within link group */
  102. u8 link_uid[SMC_LGR_ID_SIZE]; /* unique lnk id */
  103. u8 peer_link_uid[SMC_LGR_ID_SIZE]; /* peer uid */
  104. u8 link_idx; /* index in lgr link array */
  105. u8 link_is_asym; /* is link asymmetric? */
  106. struct smc_link_group *lgr; /* parent link group */
  107. struct work_struct link_down_wrk; /* wrk to bring link down */
  108. enum smc_link_state state; /* state of link */
  109. struct delayed_work llc_testlink_wrk; /* testlink worker */
  110. struct completion llc_testlink_resp; /* wait for rx of testlink */
  111. int llc_testlink_time; /* testlink interval */
  112. };
  113. /* For now we just allow one parallel link per link group. The SMC protocol
  114. * allows more (up to 8).
  115. */
  116. #define SMC_LINKS_PER_LGR_MAX 3
  117. #define SMC_SINGLE_LINK 0
  118. /* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */
  119. struct smc_buf_desc {
  120. struct list_head list;
  121. void *cpu_addr; /* virtual address of buffer */
  122. struct page *pages;
  123. int len; /* length of buffer */
  124. u32 used; /* currently used / unused */
  125. union {
  126. struct { /* SMC-R */
  127. struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];
  128. /* virtual buffer */
  129. struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX];
  130. /* for rmb only: memory region
  131. * incl. rkey provided to peer
  132. */
  133. u32 order; /* allocation order */
  134. u8 is_conf_rkey;
  135. /* confirm_rkey done */
  136. u8 is_reg_mr[SMC_LINKS_PER_LGR_MAX];
  137. /* mem region registered */
  138. u8 is_map_ib[SMC_LINKS_PER_LGR_MAX];
  139. /* mem region mapped to lnk */
  140. u8 is_reg_err;
  141. /* buffer registration err */
  142. };
  143. struct { /* SMC-D */
  144. unsigned short sba_idx;
  145. /* SBA index number */
  146. u64 token;
  147. /* DMB token number */
  148. dma_addr_t dma_addr;
  149. /* DMA address */
  150. };
  151. };
  152. };
  153. struct smc_rtoken { /* address/key of remote RMB */
  154. u64 dma_addr;
  155. u32 rkey;
  156. };
  157. #define SMC_BUF_MIN_SIZE 16384 /* minimum size of an RMB */
  158. #define SMC_RMBE_SIZES 16 /* number of distinct RMBE sizes */
  159. /* theoretically, the RFC states that largest size would be 512K,
  160. * i.e. compressed 5 and thus 6 sizes (0..5), despite
  161. * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15)
  162. */
  163. struct smcd_dev;
  164. enum smc_lgr_type { /* redundancy state of lgr */
  165. SMC_LGR_NONE, /* no active links, lgr to be deleted */
  166. SMC_LGR_SINGLE, /* 1 active RNIC on each peer */
  167. SMC_LGR_SYMMETRIC, /* 2 active RNICs on each peer */
  168. SMC_LGR_ASYMMETRIC_PEER, /* local has 2, peer 1 active RNICs */
  169. SMC_LGR_ASYMMETRIC_LOCAL, /* local has 1, peer 2 active RNICs */
  170. };
  171. enum smc_llc_flowtype {
  172. SMC_LLC_FLOW_NONE = 0,
  173. SMC_LLC_FLOW_ADD_LINK = 2,
  174. SMC_LLC_FLOW_DEL_LINK = 4,
  175. SMC_LLC_FLOW_RKEY = 6,
  176. };
  177. struct smc_llc_qentry;
  178. struct smc_llc_flow {
  179. enum smc_llc_flowtype type;
  180. struct smc_llc_qentry *qentry;
  181. };
  182. struct smc_link_group {
  183. struct list_head list;
  184. struct rb_root conns_all; /* connection tree */
  185. rwlock_t conns_lock; /* protects conns_all */
  186. unsigned int conns_num; /* current # of connections */
  187. unsigned short vlan_id; /* vlan id of link group */
  188. struct list_head sndbufs[SMC_RMBE_SIZES];/* tx buffers */
  189. struct mutex sndbufs_lock; /* protects tx buffers */
  190. struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */
  191. struct mutex rmbs_lock; /* protects rx buffers */
  192. u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */
  193. struct delayed_work free_work; /* delayed freeing of an lgr */
  194. struct work_struct terminate_work; /* abnormal lgr termination */
  195. struct workqueue_struct *tx_wq; /* wq for conn. tx workers */
  196. u8 sync_err : 1; /* lgr no longer fits to peer */
  197. u8 terminating : 1;/* lgr is terminating */
  198. u8 freeing : 1; /* lgr is being freed */
  199. bool is_smcd; /* SMC-R or SMC-D */
  200. u8 smc_version;
  201. u8 negotiated_eid[SMC_MAX_EID_LEN];
  202. u8 peer_os; /* peer operating system */
  203. u8 peer_smc_release;
  204. u8 peer_hostname[SMC_MAX_HOSTNAME_LEN];
  205. union {
  206. struct { /* SMC-R */
  207. enum smc_lgr_role role;
  208. /* client or server */
  209. struct smc_link lnk[SMC_LINKS_PER_LGR_MAX];
  210. /* smc link */
  211. char peer_systemid[SMC_SYSTEMID_LEN];
  212. /* unique system_id of peer */
  213. struct smc_rtoken rtokens[SMC_RMBS_PER_LGR_MAX]
  214. [SMC_LINKS_PER_LGR_MAX];
  215. /* remote addr/key pairs */
  216. DECLARE_BITMAP(rtokens_used_mask, SMC_RMBS_PER_LGR_MAX);
  217. /* used rtoken elements */
  218. u8 next_link_id;
  219. enum smc_lgr_type type;
  220. /* redundancy state */
  221. u8 pnet_id[SMC_MAX_PNETID_LEN + 1];
  222. /* pnet id of this lgr */
  223. struct list_head llc_event_q;
  224. /* queue for llc events */
  225. spinlock_t llc_event_q_lock;
  226. /* protects llc_event_q */
  227. struct mutex llc_conf_mutex;
  228. /* protects lgr reconfig. */
  229. struct work_struct llc_add_link_work;
  230. struct work_struct llc_del_link_work;
  231. struct work_struct llc_event_work;
  232. /* llc event worker */
  233. wait_queue_head_t llc_flow_waiter;
  234. /* w4 next llc event */
  235. wait_queue_head_t llc_msg_waiter;
  236. /* w4 next llc msg */
  237. struct smc_llc_flow llc_flow_lcl;
  238. /* llc local control field */
  239. struct smc_llc_flow llc_flow_rmt;
  240. /* llc remote control field */
  241. struct smc_llc_qentry *delayed_event;
  242. /* arrived when flow active */
  243. spinlock_t llc_flow_lock;
  244. /* protects llc flow */
  245. int llc_testlink_time;
  246. /* link keep alive time */
  247. u32 llc_termination_rsn;
  248. /* rsn code for termination */
  249. };
  250. struct { /* SMC-D */
  251. u64 peer_gid;
  252. /* Peer GID (remote) */
  253. struct smcd_dev *smcd;
  254. /* ISM device for VLAN reg. */
  255. u8 peer_shutdown : 1;
  256. /* peer triggered shutdownn */
  257. };
  258. };
  259. };
  260. struct smc_clc_msg_local;
  261. struct smc_init_info {
  262. u8 is_smcd;
  263. u8 smc_type_v1;
  264. u8 smc_type_v2;
  265. u8 first_contact_peer;
  266. u8 first_contact_local;
  267. unsigned short vlan_id;
  268. /* SMC-R */
  269. struct smc_clc_msg_local *ib_lcl;
  270. struct smc_ib_device *ib_dev;
  271. u8 ib_gid[SMC_GID_SIZE];
  272. u8 ib_port;
  273. u32 ib_clcqpn;
  274. /* SMC-D */
  275. u64 ism_peer_gid[SMC_MAX_ISM_DEVS + 1];
  276. struct smcd_dev *ism_dev[SMC_MAX_ISM_DEVS + 1];
  277. u16 ism_chid[SMC_MAX_ISM_DEVS + 1];
  278. u8 ism_offered_cnt; /* # of ISM devices offered */
  279. u8 ism_selected; /* index of selected ISM dev*/
  280. u8 smcd_version;
  281. };
  282. /* Find the connection associated with the given alert token in the link group.
  283. * To use rbtrees we have to implement our own search core.
  284. * Requires @conns_lock
  285. * @token alert token to search for
  286. * @lgr link group to search in
  287. * Returns connection associated with token if found, NULL otherwise.
  288. */
  289. static inline struct smc_connection *smc_lgr_find_conn(
  290. u32 token, struct smc_link_group *lgr)
  291. {
  292. struct smc_connection *res = NULL;
  293. struct rb_node *node;
  294. node = lgr->conns_all.rb_node;
  295. while (node) {
  296. struct smc_connection *cur = rb_entry(node,
  297. struct smc_connection, alert_node);
  298. if (cur->alert_token_local > token) {
  299. node = node->rb_left;
  300. } else {
  301. if (cur->alert_token_local < token) {
  302. node = node->rb_right;
  303. } else {
  304. res = cur;
  305. break;
  306. }
  307. }
  308. }
  309. return res;
  310. }
  311. /* returns true if the specified link is usable */
  312. static inline bool smc_link_usable(struct smc_link *lnk)
  313. {
  314. if (lnk->state == SMC_LNK_UNUSED || lnk->state == SMC_LNK_INACTIVE)
  315. return false;
  316. return true;
  317. }
  318. static inline bool smc_link_sendable(struct smc_link *lnk)
  319. {
  320. return smc_link_usable(lnk) &&
  321. lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
  322. }
  323. static inline bool smc_link_active(struct smc_link *lnk)
  324. {
  325. return lnk->state == SMC_LNK_ACTIVE;
  326. }
  327. struct smc_sock;
  328. struct smc_clc_msg_accept_confirm;
  329. struct smc_clc_msg_local;
  330. void smc_lgr_cleanup_early(struct smc_connection *conn);
  331. void smc_lgr_terminate_sched(struct smc_link_group *lgr);
  332. void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport);
  333. void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport);
  334. void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
  335. unsigned short vlan);
  336. void smc_smcd_terminate_all(struct smcd_dev *dev);
  337. void smc_smcr_terminate_all(struct smc_ib_device *smcibdev);
  338. int smc_buf_create(struct smc_sock *smc, bool is_smcd);
  339. int smc_uncompress_bufsize(u8 compressed);
  340. int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_link *link,
  341. struct smc_clc_msg_accept_confirm *clc);
  342. int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey);
  343. int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey);
  344. void smc_rtoken_set(struct smc_link_group *lgr, int link_idx, int link_idx_new,
  345. __be32 nw_rkey_known, __be64 nw_vaddr, __be32 nw_rkey);
  346. void smc_rtoken_set2(struct smc_link_group *lgr, int rtok_idx, int link_id,
  347. __be64 nw_vaddr, __be32 nw_rkey);
  348. void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn);
  349. void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn);
  350. void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn);
  351. void smc_rmb_sync_sg_for_device(struct smc_connection *conn);
  352. int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini);
  353. void smc_conn_free(struct smc_connection *conn);
  354. int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini);
  355. void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
  356. int smc_core_init(void);
  357. void smc_core_exit(void);
  358. int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
  359. u8 link_idx, struct smc_init_info *ini);
  360. void smcr_link_clear(struct smc_link *lnk, bool log);
  361. int smcr_buf_map_lgr(struct smc_link *lnk);
  362. int smcr_buf_reg_lgr(struct smc_link *lnk);
  363. void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type);
  364. void smcr_lgr_set_type_asym(struct smc_link_group *lgr,
  365. enum smc_lgr_type new_type, int asym_lnk_idx);
  366. int smcr_link_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc);
  367. struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
  368. struct smc_link *from_lnk, bool is_dev_err);
  369. void smcr_link_down_cond(struct smc_link *lnk);
  370. void smcr_link_down_cond_sched(struct smc_link *lnk);
  371. static inline struct smc_link_group *smc_get_lgr(struct smc_link *link)
  372. {
  373. return link->lgr;
  374. }
  375. #endif