qp.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. /*
  2. * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef MLX4_QP_H
  33. #define MLX4_QP_H
  34. #include <linux/types.h>
  35. #include <linux/if_ether.h>
  36. #include <linux/mlx4/device.h>
  37. #define MLX4_INVALID_LKEY 0x100
  38. enum mlx4_qp_optpar {
  39. MLX4_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
  40. MLX4_QP_OPTPAR_RRE = 1 << 1,
  41. MLX4_QP_OPTPAR_RAE = 1 << 2,
  42. MLX4_QP_OPTPAR_RWE = 1 << 3,
  43. MLX4_QP_OPTPAR_PKEY_INDEX = 1 << 4,
  44. MLX4_QP_OPTPAR_Q_KEY = 1 << 5,
  45. MLX4_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
  46. MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
  47. MLX4_QP_OPTPAR_SRA_MAX = 1 << 8,
  48. MLX4_QP_OPTPAR_RRA_MAX = 1 << 9,
  49. MLX4_QP_OPTPAR_PM_STATE = 1 << 10,
  50. MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12,
  51. MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
  52. MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
  53. MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
  54. MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20,
  55. MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21,
  56. };
  57. enum mlx4_qp_state {
  58. MLX4_QP_STATE_RST = 0,
  59. MLX4_QP_STATE_INIT = 1,
  60. MLX4_QP_STATE_RTR = 2,
  61. MLX4_QP_STATE_RTS = 3,
  62. MLX4_QP_STATE_SQER = 4,
  63. MLX4_QP_STATE_SQD = 5,
  64. MLX4_QP_STATE_ERR = 6,
  65. MLX4_QP_STATE_SQ_DRAINING = 7,
  66. MLX4_QP_NUM_STATE
  67. };
  68. enum {
  69. MLX4_QP_ST_RC = 0x0,
  70. MLX4_QP_ST_UC = 0x1,
  71. MLX4_QP_ST_RD = 0x2,
  72. MLX4_QP_ST_UD = 0x3,
  73. MLX4_QP_ST_XRC = 0x6,
  74. MLX4_QP_ST_MLX = 0x7
  75. };
  76. enum {
  77. MLX4_QP_PM_MIGRATED = 0x3,
  78. MLX4_QP_PM_ARMED = 0x0,
  79. MLX4_QP_PM_REARM = 0x1
  80. };
  81. enum {
  82. /* params1 */
  83. MLX4_QP_BIT_SRE = 1 << 15,
  84. MLX4_QP_BIT_SWE = 1 << 14,
  85. MLX4_QP_BIT_SAE = 1 << 13,
  86. /* params2 */
  87. MLX4_QP_BIT_RRE = 1 << 15,
  88. MLX4_QP_BIT_RWE = 1 << 14,
  89. MLX4_QP_BIT_RAE = 1 << 13,
  90. MLX4_QP_BIT_FPP = 1 << 3,
  91. MLX4_QP_BIT_RIC = 1 << 4,
  92. };
  93. enum {
  94. MLX4_RSS_HASH_XOR = 0,
  95. MLX4_RSS_HASH_TOP = 1,
  96. MLX4_RSS_UDP_IPV6 = 1 << 0,
  97. MLX4_RSS_UDP_IPV4 = 1 << 1,
  98. MLX4_RSS_TCP_IPV6 = 1 << 2,
  99. MLX4_RSS_IPV6 = 1 << 3,
  100. MLX4_RSS_TCP_IPV4 = 1 << 4,
  101. MLX4_RSS_IPV4 = 1 << 5,
  102. MLX4_RSS_BY_OUTER_HEADERS = 0 << 6,
  103. MLX4_RSS_BY_INNER_HEADERS = 2 << 6,
  104. MLX4_RSS_BY_INNER_HEADERS_IPONLY = 3 << 6,
  105. /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
  106. MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24,
  107. /* offset of being RSS indirection QP within mlx4_qp_context.flags */
  108. MLX4_RSS_QPC_FLAG_OFFSET = 13,
  109. };
  110. #define MLX4_EN_RSS_KEY_SIZE 40
  111. struct mlx4_rss_context {
  112. __be32 base_qpn;
  113. __be32 default_qpn;
  114. u16 reserved;
  115. u8 hash_fn;
  116. u8 flags;
  117. __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)];
  118. __be32 base_qpn_udp;
  119. };
  120. struct mlx4_qp_path {
  121. u8 fl;
  122. union {
  123. u8 vlan_control;
  124. u8 control;
  125. };
  126. u8 disable_pkey_check;
  127. u8 pkey_index;
  128. u8 counter_index;
  129. u8 grh_mylmc;
  130. __be16 rlid;
  131. u8 ackto;
  132. u8 mgid_index;
  133. u8 static_rate;
  134. u8 hop_limit;
  135. __be32 tclass_flowlabel;
  136. u8 rgid[16];
  137. u8 sched_queue;
  138. u8 vlan_index;
  139. u8 feup;
  140. u8 fvl_rx;
  141. u8 reserved4[2];
  142. u8 dmac[ETH_ALEN];
  143. };
  144. enum { /* fl */
  145. MLX4_FL_CV = 1 << 6,
  146. MLX4_FL_SV = 1 << 5,
  147. MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
  148. MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
  149. MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
  150. };
  151. enum { /* control */
  152. MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER = 1 << 7,
  153. };
  154. enum { /* vlan_control */
  155. MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
  156. MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */
  157. MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED = 1 << 4,
  158. MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2,
  159. MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */
  160. MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0
  161. };
  162. enum { /* feup */
  163. MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
  164. MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
  165. MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
  166. };
  167. enum { /* fvl_rx */
  168. MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
  169. };
  170. struct mlx4_qp_context {
  171. __be32 flags;
  172. __be32 pd;
  173. u8 mtu_msgmax;
  174. u8 rq_size_stride;
  175. u8 sq_size_stride;
  176. u8 rlkey_roce_mode;
  177. __be32 usr_page;
  178. __be32 local_qpn;
  179. __be32 remote_qpn;
  180. struct mlx4_qp_path pri_path;
  181. struct mlx4_qp_path alt_path;
  182. __be32 params1;
  183. u32 reserved1;
  184. __be32 next_send_psn;
  185. __be32 cqn_send;
  186. __be16 roce_entropy;
  187. __be16 reserved2[3];
  188. __be32 last_acked_psn;
  189. __be32 ssn;
  190. __be32 params2;
  191. __be32 rnr_nextrecvpsn;
  192. __be32 xrcd;
  193. __be32 cqn_recv;
  194. __be64 db_rec_addr;
  195. __be32 qkey;
  196. __be32 srqn;
  197. __be32 msn;
  198. __be16 rq_wqe_counter;
  199. __be16 sq_wqe_counter;
  200. u32 reserved3;
  201. __be16 rate_limit_params;
  202. u8 reserved4;
  203. u8 qos_vport;
  204. __be32 param3;
  205. __be32 nummmcpeers_basemkey;
  206. u8 log_page_size;
  207. u8 reserved5[2];
  208. u8 mtt_base_addr_h;
  209. __be32 mtt_base_addr_l;
  210. u32 reserved6[10];
  211. };
  212. struct mlx4_update_qp_context {
  213. __be64 qp_mask;
  214. __be64 primary_addr_path_mask;
  215. __be64 secondary_addr_path_mask;
  216. u64 reserved1;
  217. struct mlx4_qp_context qp_context;
  218. u64 reserved2[58];
  219. };
  220. enum {
  221. MLX4_UPD_QP_MASK_PM_STATE = 32,
  222. MLX4_UPD_QP_MASK_VSD = 33,
  223. MLX4_UPD_QP_MASK_QOS_VPP = 34,
  224. MLX4_UPD_QP_MASK_RATE_LIMIT = 35,
  225. };
  226. enum {
  227. MLX4_UPD_QP_PATH_MASK_PKEY_INDEX = 0 + 32,
  228. MLX4_UPD_QP_PATH_MASK_FSM = 1 + 32,
  229. MLX4_UPD_QP_PATH_MASK_MAC_INDEX = 2 + 32,
  230. MLX4_UPD_QP_PATH_MASK_FVL = 3 + 32,
  231. MLX4_UPD_QP_PATH_MASK_CV = 4 + 32,
  232. MLX4_UPD_QP_PATH_MASK_VLAN_INDEX = 5 + 32,
  233. MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN = 6 + 32,
  234. MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED = 7 + 32,
  235. MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P = 8 + 32,
  236. MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED = 9 + 32,
  237. MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED = 10 + 32,
  238. MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P = 11 + 32,
  239. MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED = 12 + 32,
  240. MLX4_UPD_QP_PATH_MASK_FEUP = 13 + 32,
  241. MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32,
  242. MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32,
  243. MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
  244. MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
  245. MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
  246. MLX4_UPD_QP_PATH_MASK_SV = 22 + 32,
  247. };
  248. enum { /* param3 */
  249. MLX4_STRIP_VLAN = 1 << 30
  250. };
  251. /* Which firmware version adds support for NEC (NoErrorCompletion) bit */
  252. #define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
  253. enum {
  254. MLX4_WQE_CTRL_NEC = 1 << 29,
  255. MLX4_WQE_CTRL_IIP = 1 << 28,
  256. MLX4_WQE_CTRL_ILP = 1 << 27,
  257. MLX4_WQE_CTRL_FENCE = 1 << 6,
  258. MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
  259. MLX4_WQE_CTRL_SOLICITED = 1 << 1,
  260. MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
  261. MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
  262. MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
  263. MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
  264. MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
  265. MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
  266. };
  267. union mlx4_wqe_qpn_vlan {
  268. struct {
  269. __be16 vlan_tag;
  270. u8 ins_vlan;
  271. u8 fence_size;
  272. };
  273. __be32 bf_qpn;
  274. };
  275. struct mlx4_wqe_ctrl_seg {
  276. __be32 owner_opcode;
  277. union mlx4_wqe_qpn_vlan qpn_vlan;
  278. /*
  279. * High 24 bits are SRC remote buffer; low 8 bits are flags:
  280. * [7] SO (strong ordering)
  281. * [5] TCP/UDP checksum
  282. * [4] IP checksum
  283. * [3:2] C (generate completion queue entry)
  284. * [1] SE (solicited event)
  285. * [0] FL (force loopback)
  286. */
  287. union {
  288. __be32 srcrb_flags;
  289. __be16 srcrb_flags16[2];
  290. };
  291. /*
  292. * imm is immediate data for send/RDMA write w/ immediate;
  293. * also invalidation key for send with invalidate; input
  294. * modifier for WQEs on CCQs.
  295. */
  296. __be32 imm;
  297. };
  298. enum {
  299. MLX4_WQE_MLX_VL15 = 1 << 17,
  300. MLX4_WQE_MLX_SLR = 1 << 16
  301. };
  302. struct mlx4_wqe_mlx_seg {
  303. u8 owner;
  304. u8 reserved1[2];
  305. u8 opcode;
  306. __be16 sched_prio;
  307. u8 reserved2;
  308. u8 size;
  309. /*
  310. * [17] VL15
  311. * [16] SLR
  312. * [15:12] static rate
  313. * [11:8] SL
  314. * [4] ICRC
  315. * [3:2] C
  316. * [0] FL (force loopback)
  317. */
  318. __be32 flags;
  319. __be16 rlid;
  320. u16 reserved3;
  321. };
  322. struct mlx4_wqe_datagram_seg {
  323. __be32 av[8];
  324. __be32 dqpn;
  325. __be32 qkey;
  326. __be16 vlan;
  327. u8 mac[ETH_ALEN];
  328. };
  329. struct mlx4_wqe_lso_seg {
  330. __be32 mss_hdr_size;
  331. __be32 header[];
  332. };
  333. enum mlx4_wqe_bind_seg_flags2 {
  334. MLX4_WQE_BIND_ZERO_BASED = (1 << 30),
  335. MLX4_WQE_BIND_TYPE_2 = (1 << 31),
  336. };
  337. struct mlx4_wqe_bind_seg {
  338. __be32 flags1;
  339. __be32 flags2;
  340. __be32 new_rkey;
  341. __be32 lkey;
  342. __be64 addr;
  343. __be64 length;
  344. };
  345. enum {
  346. MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
  347. MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
  348. MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ = 1 << 29,
  349. MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE = 1 << 30,
  350. MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC = 1 << 31
  351. };
  352. struct mlx4_wqe_fmr_seg {
  353. __be32 flags;
  354. __be32 mem_key;
  355. __be64 buf_list;
  356. __be64 start_addr;
  357. __be64 reg_len;
  358. __be32 offset;
  359. __be32 page_size;
  360. u32 reserved[2];
  361. };
  362. struct mlx4_wqe_fmr_ext_seg {
  363. u8 flags;
  364. u8 reserved;
  365. __be16 app_mask;
  366. __be16 wire_app_tag;
  367. __be16 mem_app_tag;
  368. __be32 wire_ref_tag_base;
  369. __be32 mem_ref_tag_base;
  370. };
  371. struct mlx4_wqe_local_inval_seg {
  372. u64 reserved1;
  373. __be32 mem_key;
  374. u32 reserved2;
  375. u64 reserved3[2];
  376. };
  377. struct mlx4_wqe_raddr_seg {
  378. __be64 raddr;
  379. __be32 rkey;
  380. u32 reserved;
  381. };
  382. struct mlx4_wqe_atomic_seg {
  383. __be64 swap_add;
  384. __be64 compare;
  385. };
  386. struct mlx4_wqe_masked_atomic_seg {
  387. __be64 swap_add;
  388. __be64 compare;
  389. __be64 swap_add_mask;
  390. __be64 compare_mask;
  391. };
  392. struct mlx4_wqe_data_seg {
  393. __be32 byte_count;
  394. __be32 lkey;
  395. __be64 addr;
  396. };
  397. enum {
  398. MLX4_INLINE_ALIGN = 64,
  399. MLX4_INLINE_SEG = 1 << 31,
  400. };
  401. struct mlx4_wqe_inline_seg {
  402. __be32 byte_count;
  403. };
  404. enum mlx4_update_qp_attr {
  405. MLX4_UPDATE_QP_SMAC = 1 << 0,
  406. MLX4_UPDATE_QP_VSD = 1 << 1,
  407. MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
  408. MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
  409. MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB = 1 << 4,
  410. MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 5) - 1
  411. };
  412. enum mlx4_update_qp_params_flags {
  413. MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB = 1 << 0,
  414. MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 1,
  415. };
  416. struct mlx4_update_qp_params {
  417. u8 smac_index;
  418. u8 qos_vport;
  419. u32 flags;
  420. u16 rate_unit;
  421. u16 rate_val;
  422. };
  423. struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
  424. int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
  425. enum mlx4_update_qp_attr attr,
  426. struct mlx4_update_qp_params *params);
  427. int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
  428. enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
  429. struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
  430. int sqd_event, struct mlx4_qp *qp);
  431. int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
  432. struct mlx4_qp_context *context);
  433. int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
  434. struct mlx4_qp_context *context,
  435. struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
  436. static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
  437. {
  438. return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
  439. }
  440. void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
  441. static inline u16 folded_qp(u32 q)
  442. {
  443. u16 res;
  444. res = ((q & 0xff) ^ ((q & 0xff0000) >> 16)) | (q & 0xff00);
  445. return res;
  446. }
  447. u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
  448. #endif /* MLX4_QP_H */