ib_verbs.h 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. *
  38. * $Id: ib_verbs.h,v 1.1.1.1 2007/06/12 07:27:15 eyryu Exp $
  39. */
  40. #if !defined(IB_VERBS_H)
  41. #define IB_VERBS_H
  42. #include <linux/types.h>
  43. #include <linux/device.h>
  44. #include <linux/mm.h>
  45. #include <linux/dma-mapping.h>
  46. #include <linux/kref.h>
  47. #include <asm/atomic.h>
  48. #include <asm/scatterlist.h>
  49. #include <asm/uaccess.h>
  50. union ib_gid {
  51. u8 raw[16];
  52. struct {
  53. __be64 subnet_prefix;
  54. __be64 interface_id;
  55. } global;
  56. };
  57. enum rdma_node_type {
  58. /* IB values map to NodeInfo:NodeType. */
  59. RDMA_NODE_IB_CA = 1,
  60. RDMA_NODE_IB_SWITCH,
  61. RDMA_NODE_IB_ROUTER,
  62. RDMA_NODE_RNIC
  63. };
  64. enum rdma_transport_type {
  65. RDMA_TRANSPORT_IB,
  66. RDMA_TRANSPORT_IWARP
  67. };
  68. enum rdma_transport_type
  69. rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
  70. enum ib_device_cap_flags {
  71. IB_DEVICE_RESIZE_MAX_WR = 1,
  72. IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
  73. IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
  74. IB_DEVICE_RAW_MULTI = (1<<3),
  75. IB_DEVICE_AUTO_PATH_MIG = (1<<4),
  76. IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
  77. IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
  78. IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
  79. IB_DEVICE_SHUTDOWN_PORT = (1<<8),
  80. IB_DEVICE_INIT_TYPE = (1<<9),
  81. IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
  82. IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
  83. IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
  84. IB_DEVICE_SRQ_RESIZE = (1<<13),
  85. IB_DEVICE_N_NOTIFY_CQ = (1<<14),
  86. IB_DEVICE_ZERO_STAG = (1<<15),
  87. IB_DEVICE_SEND_W_INV = (1<<16),
  88. IB_DEVICE_MEM_WINDOW = (1<<17)
  89. };
  90. enum ib_atomic_cap {
  91. IB_ATOMIC_NONE,
  92. IB_ATOMIC_HCA,
  93. IB_ATOMIC_GLOB
  94. };
  95. struct ib_device_attr {
  96. u64 fw_ver;
  97. __be64 sys_image_guid;
  98. u64 max_mr_size;
  99. u64 page_size_cap;
  100. u32 vendor_id;
  101. u32 vendor_part_id;
  102. u32 hw_ver;
  103. int max_qp;
  104. int max_qp_wr;
  105. int device_cap_flags;
  106. int max_sge;
  107. int max_sge_rd;
  108. int max_cq;
  109. int max_cqe;
  110. int max_mr;
  111. int max_pd;
  112. int max_qp_rd_atom;
  113. int max_ee_rd_atom;
  114. int max_res_rd_atom;
  115. int max_qp_init_rd_atom;
  116. int max_ee_init_rd_atom;
  117. enum ib_atomic_cap atomic_cap;
  118. int max_ee;
  119. int max_rdd;
  120. int max_mw;
  121. int max_raw_ipv6_qp;
  122. int max_raw_ethy_qp;
  123. int max_mcast_grp;
  124. int max_mcast_qp_attach;
  125. int max_total_mcast_qp_attach;
  126. int max_ah;
  127. int max_fmr;
  128. int max_map_per_fmr;
  129. int max_srq;
  130. int max_srq_wr;
  131. int max_srq_sge;
  132. u16 max_pkeys;
  133. u8 local_ca_ack_delay;
  134. };
  135. enum ib_mtu {
  136. IB_MTU_256 = 1,
  137. IB_MTU_512 = 2,
  138. IB_MTU_1024 = 3,
  139. IB_MTU_2048 = 4,
  140. IB_MTU_4096 = 5
  141. };
  142. static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
  143. {
  144. switch (mtu) {
  145. case IB_MTU_256: return 256;
  146. case IB_MTU_512: return 512;
  147. case IB_MTU_1024: return 1024;
  148. case IB_MTU_2048: return 2048;
  149. case IB_MTU_4096: return 4096;
  150. default: return -1;
  151. }
  152. }
  153. enum ib_port_state {
  154. IB_PORT_NOP = 0,
  155. IB_PORT_DOWN = 1,
  156. IB_PORT_INIT = 2,
  157. IB_PORT_ARMED = 3,
  158. IB_PORT_ACTIVE = 4,
  159. IB_PORT_ACTIVE_DEFER = 5
  160. };
  161. enum ib_port_cap_flags {
  162. IB_PORT_SM = 1 << 1,
  163. IB_PORT_NOTICE_SUP = 1 << 2,
  164. IB_PORT_TRAP_SUP = 1 << 3,
  165. IB_PORT_OPT_IPD_SUP = 1 << 4,
  166. IB_PORT_AUTO_MIGR_SUP = 1 << 5,
  167. IB_PORT_SL_MAP_SUP = 1 << 6,
  168. IB_PORT_MKEY_NVRAM = 1 << 7,
  169. IB_PORT_PKEY_NVRAM = 1 << 8,
  170. IB_PORT_LED_INFO_SUP = 1 << 9,
  171. IB_PORT_SM_DISABLED = 1 << 10,
  172. IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
  173. IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
  174. IB_PORT_CM_SUP = 1 << 16,
  175. IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
  176. IB_PORT_REINIT_SUP = 1 << 18,
  177. IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
  178. IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
  179. IB_PORT_DR_NOTICE_SUP = 1 << 21,
  180. IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
  181. IB_PORT_BOOT_MGMT_SUP = 1 << 23,
  182. IB_PORT_LINK_LATENCY_SUP = 1 << 24,
  183. IB_PORT_CLIENT_REG_SUP = 1 << 25
  184. };
  185. enum ib_port_width {
  186. IB_WIDTH_1X = 1,
  187. IB_WIDTH_4X = 2,
  188. IB_WIDTH_8X = 4,
  189. IB_WIDTH_12X = 8
  190. };
  191. static inline int ib_width_enum_to_int(enum ib_port_width width)
  192. {
  193. switch (width) {
  194. case IB_WIDTH_1X: return 1;
  195. case IB_WIDTH_4X: return 4;
  196. case IB_WIDTH_8X: return 8;
  197. case IB_WIDTH_12X: return 12;
  198. default: return -1;
  199. }
  200. }
  201. struct ib_port_attr {
  202. enum ib_port_state state;
  203. enum ib_mtu max_mtu;
  204. enum ib_mtu active_mtu;
  205. int gid_tbl_len;
  206. u32 port_cap_flags;
  207. u32 max_msg_sz;
  208. u32 bad_pkey_cntr;
  209. u32 qkey_viol_cntr;
  210. u16 pkey_tbl_len;
  211. u16 lid;
  212. u16 sm_lid;
  213. u8 lmc;
  214. u8 max_vl_num;
  215. u8 sm_sl;
  216. u8 subnet_timeout;
  217. u8 init_type_reply;
  218. u8 active_width;
  219. u8 active_speed;
  220. u8 phys_state;
  221. };
  222. enum ib_device_modify_flags {
  223. IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
  224. IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
  225. };
  226. struct ib_device_modify {
  227. u64 sys_image_guid;
  228. char node_desc[64];
  229. };
  230. enum ib_port_modify_flags {
  231. IB_PORT_SHUTDOWN = 1,
  232. IB_PORT_INIT_TYPE = (1<<2),
  233. IB_PORT_RESET_QKEY_CNTR = (1<<3)
  234. };
  235. struct ib_port_modify {
  236. u32 set_port_cap_mask;
  237. u32 clr_port_cap_mask;
  238. u8 init_type;
  239. };
  240. enum ib_event_type {
  241. IB_EVENT_CQ_ERR,
  242. IB_EVENT_QP_FATAL,
  243. IB_EVENT_QP_REQ_ERR,
  244. IB_EVENT_QP_ACCESS_ERR,
  245. IB_EVENT_COMM_EST,
  246. IB_EVENT_SQ_DRAINED,
  247. IB_EVENT_PATH_MIG,
  248. IB_EVENT_PATH_MIG_ERR,
  249. IB_EVENT_DEVICE_FATAL,
  250. IB_EVENT_PORT_ACTIVE,
  251. IB_EVENT_PORT_ERR,
  252. IB_EVENT_LID_CHANGE,
  253. IB_EVENT_PKEY_CHANGE,
  254. IB_EVENT_SM_CHANGE,
  255. IB_EVENT_SRQ_ERR,
  256. IB_EVENT_SRQ_LIMIT_REACHED,
  257. IB_EVENT_QP_LAST_WQE_REACHED,
  258. IB_EVENT_CLIENT_REREGISTER
  259. };
  260. struct ib_event {
  261. struct ib_device *device;
  262. union {
  263. struct ib_cq *cq;
  264. struct ib_qp *qp;
  265. struct ib_srq *srq;
  266. u8 port_num;
  267. } element;
  268. enum ib_event_type event;
  269. };
  270. struct ib_event_handler {
  271. struct ib_device *device;
  272. void (*handler)(struct ib_event_handler *, struct ib_event *);
  273. struct list_head list;
  274. };
  275. #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
  276. do { \
  277. (_ptr)->device = _device; \
  278. (_ptr)->handler = _handler; \
  279. INIT_LIST_HEAD(&(_ptr)->list); \
  280. } while (0)
  281. struct ib_global_route {
  282. union ib_gid dgid;
  283. u32 flow_label;
  284. u8 sgid_index;
  285. u8 hop_limit;
  286. u8 traffic_class;
  287. };
  288. struct ib_grh {
  289. __be32 version_tclass_flow;
  290. __be16 paylen;
  291. u8 next_hdr;
  292. u8 hop_limit;
  293. union ib_gid sgid;
  294. union ib_gid dgid;
  295. };
  296. enum {
  297. IB_MULTICAST_QPN = 0xffffff
  298. };
  299. #define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
  300. enum ib_ah_flags {
  301. IB_AH_GRH = 1
  302. };
  303. enum ib_rate {
  304. IB_RATE_PORT_CURRENT = 0,
  305. IB_RATE_2_5_GBPS = 2,
  306. IB_RATE_5_GBPS = 5,
  307. IB_RATE_10_GBPS = 3,
  308. IB_RATE_20_GBPS = 6,
  309. IB_RATE_30_GBPS = 4,
  310. IB_RATE_40_GBPS = 7,
  311. IB_RATE_60_GBPS = 8,
  312. IB_RATE_80_GBPS = 9,
  313. IB_RATE_120_GBPS = 10
  314. };
  315. /**
  316. * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
  317. * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
  318. * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
  319. * @rate: rate to convert.
  320. */
  321. int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
  322. /**
  323. * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
  324. * enum.
  325. * @mult: multiple to convert.
  326. */
  327. enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
  328. struct ib_ah_attr {
  329. struct ib_global_route grh;
  330. u16 dlid;
  331. u8 sl;
  332. u8 src_path_bits;
  333. u8 static_rate;
  334. u8 ah_flags;
  335. u8 port_num;
  336. };
  337. enum ib_wc_status {
  338. IB_WC_SUCCESS,
  339. IB_WC_LOC_LEN_ERR,
  340. IB_WC_LOC_QP_OP_ERR,
  341. IB_WC_LOC_EEC_OP_ERR,
  342. IB_WC_LOC_PROT_ERR,
  343. IB_WC_WR_FLUSH_ERR,
  344. IB_WC_MW_BIND_ERR,
  345. IB_WC_BAD_RESP_ERR,
  346. IB_WC_LOC_ACCESS_ERR,
  347. IB_WC_REM_INV_REQ_ERR,
  348. IB_WC_REM_ACCESS_ERR,
  349. IB_WC_REM_OP_ERR,
  350. IB_WC_RETRY_EXC_ERR,
  351. IB_WC_RNR_RETRY_EXC_ERR,
  352. IB_WC_LOC_RDD_VIOL_ERR,
  353. IB_WC_REM_INV_RD_REQ_ERR,
  354. IB_WC_REM_ABORT_ERR,
  355. IB_WC_INV_EECN_ERR,
  356. IB_WC_INV_EEC_STATE_ERR,
  357. IB_WC_FATAL_ERR,
  358. IB_WC_RESP_TIMEOUT_ERR,
  359. IB_WC_GENERAL_ERR
  360. };
  361. enum ib_wc_opcode {
  362. IB_WC_SEND,
  363. IB_WC_RDMA_WRITE,
  364. IB_WC_RDMA_READ,
  365. IB_WC_COMP_SWAP,
  366. IB_WC_FETCH_ADD,
  367. IB_WC_BIND_MW,
  368. /*
  369. * Set value of IB_WC_RECV so consumers can test if a completion is a
  370. * receive by testing (opcode & IB_WC_RECV).
  371. */
  372. IB_WC_RECV = 1 << 7,
  373. IB_WC_RECV_RDMA_WITH_IMM
  374. };
  375. enum ib_wc_flags {
  376. IB_WC_GRH = 1,
  377. IB_WC_WITH_IMM = (1<<1)
  378. };
  379. struct ib_wc {
  380. u64 wr_id;
  381. enum ib_wc_status status;
  382. enum ib_wc_opcode opcode;
  383. u32 vendor_err;
  384. u32 byte_len;
  385. struct ib_qp *qp;
  386. __be32 imm_data;
  387. u32 src_qp;
  388. int wc_flags;
  389. u16 pkey_index;
  390. u16 slid;
  391. u8 sl;
  392. u8 dlid_path_bits;
  393. u8 port_num; /* valid only for DR SMPs on switches */
  394. };
  395. enum ib_cq_notify {
  396. IB_CQ_SOLICITED,
  397. IB_CQ_NEXT_COMP
  398. };
  399. enum ib_srq_attr_mask {
  400. IB_SRQ_MAX_WR = 1 << 0,
  401. IB_SRQ_LIMIT = 1 << 1,
  402. };
  403. struct ib_srq_attr {
  404. u32 max_wr;
  405. u32 max_sge;
  406. u32 srq_limit;
  407. };
  408. struct ib_srq_init_attr {
  409. void (*event_handler)(struct ib_event *, void *);
  410. void *srq_context;
  411. struct ib_srq_attr attr;
  412. };
  413. struct ib_qp_cap {
  414. u32 max_send_wr;
  415. u32 max_recv_wr;
  416. u32 max_send_sge;
  417. u32 max_recv_sge;
  418. u32 max_inline_data;
  419. };
  420. enum ib_sig_type {
  421. IB_SIGNAL_ALL_WR,
  422. IB_SIGNAL_REQ_WR
  423. };
  424. enum ib_qp_type {
  425. /*
  426. * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
  427. * here (and in that order) since the MAD layer uses them as
  428. * indices into a 2-entry table.
  429. */
  430. IB_QPT_SMI,
  431. IB_QPT_GSI,
  432. IB_QPT_RC,
  433. IB_QPT_UC,
  434. IB_QPT_UD,
  435. IB_QPT_RAW_IPV6,
  436. IB_QPT_RAW_ETY
  437. };
  438. struct ib_qp_init_attr {
  439. void (*event_handler)(struct ib_event *, void *);
  440. void *qp_context;
  441. struct ib_cq *send_cq;
  442. struct ib_cq *recv_cq;
  443. struct ib_srq *srq;
  444. struct ib_qp_cap cap;
  445. enum ib_sig_type sq_sig_type;
  446. enum ib_qp_type qp_type;
  447. u8 port_num; /* special QP types only */
  448. };
  449. enum ib_rnr_timeout {
  450. IB_RNR_TIMER_655_36 = 0,
  451. IB_RNR_TIMER_000_01 = 1,
  452. IB_RNR_TIMER_000_02 = 2,
  453. IB_RNR_TIMER_000_03 = 3,
  454. IB_RNR_TIMER_000_04 = 4,
  455. IB_RNR_TIMER_000_06 = 5,
  456. IB_RNR_TIMER_000_08 = 6,
  457. IB_RNR_TIMER_000_12 = 7,
  458. IB_RNR_TIMER_000_16 = 8,
  459. IB_RNR_TIMER_000_24 = 9,
  460. IB_RNR_TIMER_000_32 = 10,
  461. IB_RNR_TIMER_000_48 = 11,
  462. IB_RNR_TIMER_000_64 = 12,
  463. IB_RNR_TIMER_000_96 = 13,
  464. IB_RNR_TIMER_001_28 = 14,
  465. IB_RNR_TIMER_001_92 = 15,
  466. IB_RNR_TIMER_002_56 = 16,
  467. IB_RNR_TIMER_003_84 = 17,
  468. IB_RNR_TIMER_005_12 = 18,
  469. IB_RNR_TIMER_007_68 = 19,
  470. IB_RNR_TIMER_010_24 = 20,
  471. IB_RNR_TIMER_015_36 = 21,
  472. IB_RNR_TIMER_020_48 = 22,
  473. IB_RNR_TIMER_030_72 = 23,
  474. IB_RNR_TIMER_040_96 = 24,
  475. IB_RNR_TIMER_061_44 = 25,
  476. IB_RNR_TIMER_081_92 = 26,
  477. IB_RNR_TIMER_122_88 = 27,
  478. IB_RNR_TIMER_163_84 = 28,
  479. IB_RNR_TIMER_245_76 = 29,
  480. IB_RNR_TIMER_327_68 = 30,
  481. IB_RNR_TIMER_491_52 = 31
  482. };
  483. enum ib_qp_attr_mask {
  484. IB_QP_STATE = 1,
  485. IB_QP_CUR_STATE = (1<<1),
  486. IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
  487. IB_QP_ACCESS_FLAGS = (1<<3),
  488. IB_QP_PKEY_INDEX = (1<<4),
  489. IB_QP_PORT = (1<<5),
  490. IB_QP_QKEY = (1<<6),
  491. IB_QP_AV = (1<<7),
  492. IB_QP_PATH_MTU = (1<<8),
  493. IB_QP_TIMEOUT = (1<<9),
  494. IB_QP_RETRY_CNT = (1<<10),
  495. IB_QP_RNR_RETRY = (1<<11),
  496. IB_QP_RQ_PSN = (1<<12),
  497. IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
  498. IB_QP_ALT_PATH = (1<<14),
  499. IB_QP_MIN_RNR_TIMER = (1<<15),
  500. IB_QP_SQ_PSN = (1<<16),
  501. IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
  502. IB_QP_PATH_MIG_STATE = (1<<18),
  503. IB_QP_CAP = (1<<19),
  504. IB_QP_DEST_QPN = (1<<20)
  505. };
  506. enum ib_qp_state {
  507. IB_QPS_RESET,
  508. IB_QPS_INIT,
  509. IB_QPS_RTR,
  510. IB_QPS_RTS,
  511. IB_QPS_SQD,
  512. IB_QPS_SQE,
  513. IB_QPS_ERR
  514. };
  515. enum ib_mig_state {
  516. IB_MIG_MIGRATED,
  517. IB_MIG_REARM,
  518. IB_MIG_ARMED
  519. };
  520. struct ib_qp_attr {
  521. enum ib_qp_state qp_state;
  522. enum ib_qp_state cur_qp_state;
  523. enum ib_mtu path_mtu;
  524. enum ib_mig_state path_mig_state;
  525. u32 qkey;
  526. u32 rq_psn;
  527. u32 sq_psn;
  528. u32 dest_qp_num;
  529. int qp_access_flags;
  530. struct ib_qp_cap cap;
  531. struct ib_ah_attr ah_attr;
  532. struct ib_ah_attr alt_ah_attr;
  533. u16 pkey_index;
  534. u16 alt_pkey_index;
  535. u8 en_sqd_async_notify;
  536. u8 sq_draining;
  537. u8 max_rd_atomic;
  538. u8 max_dest_rd_atomic;
  539. u8 min_rnr_timer;
  540. u8 port_num;
  541. u8 timeout;
  542. u8 retry_cnt;
  543. u8 rnr_retry;
  544. u8 alt_port_num;
  545. u8 alt_timeout;
  546. };
  547. enum ib_wr_opcode {
  548. IB_WR_RDMA_WRITE,
  549. IB_WR_RDMA_WRITE_WITH_IMM,
  550. IB_WR_SEND,
  551. IB_WR_SEND_WITH_IMM,
  552. IB_WR_RDMA_READ,
  553. IB_WR_ATOMIC_CMP_AND_SWP,
  554. IB_WR_ATOMIC_FETCH_AND_ADD
  555. };
  556. enum ib_send_flags {
  557. IB_SEND_FENCE = 1,
  558. IB_SEND_SIGNALED = (1<<1),
  559. IB_SEND_SOLICITED = (1<<2),
  560. IB_SEND_INLINE = (1<<3)
  561. };
  562. struct ib_sge {
  563. u64 addr;
  564. u32 length;
  565. u32 lkey;
  566. };
  567. struct ib_send_wr {
  568. struct ib_send_wr *next;
  569. u64 wr_id;
  570. struct ib_sge *sg_list;
  571. int num_sge;
  572. enum ib_wr_opcode opcode;
  573. int send_flags;
  574. __be32 imm_data;
  575. union {
  576. struct {
  577. u64 remote_addr;
  578. u32 rkey;
  579. } rdma;
  580. struct {
  581. u64 remote_addr;
  582. u64 compare_add;
  583. u64 swap;
  584. u32 rkey;
  585. } atomic;
  586. struct {
  587. struct ib_ah *ah;
  588. u32 remote_qpn;
  589. u32 remote_qkey;
  590. u16 pkey_index; /* valid for GSI only */
  591. u8 port_num; /* valid for DR SMPs on switch only */
  592. } ud;
  593. } wr;
  594. };
  595. struct ib_recv_wr {
  596. struct ib_recv_wr *next;
  597. u64 wr_id;
  598. struct ib_sge *sg_list;
  599. int num_sge;
  600. };
  601. enum ib_access_flags {
  602. IB_ACCESS_LOCAL_WRITE = 1,
  603. IB_ACCESS_REMOTE_WRITE = (1<<1),
  604. IB_ACCESS_REMOTE_READ = (1<<2),
  605. IB_ACCESS_REMOTE_ATOMIC = (1<<3),
  606. IB_ACCESS_MW_BIND = (1<<4)
  607. };
  608. struct ib_phys_buf {
  609. u64 addr;
  610. u64 size;
  611. };
  612. struct ib_mr_attr {
  613. struct ib_pd *pd;
  614. u64 device_virt_addr;
  615. u64 size;
  616. int mr_access_flags;
  617. u32 lkey;
  618. u32 rkey;
  619. };
  620. enum ib_mr_rereg_flags {
  621. IB_MR_REREG_TRANS = 1,
  622. IB_MR_REREG_PD = (1<<1),
  623. IB_MR_REREG_ACCESS = (1<<2)
  624. };
  625. struct ib_mw_bind {
  626. struct ib_mr *mr;
  627. u64 wr_id;
  628. u64 addr;
  629. u32 length;
  630. int send_flags;
  631. int mw_access_flags;
  632. };
  633. struct ib_fmr_attr {
  634. int max_pages;
  635. int max_maps;
  636. u8 page_shift;
  637. };
  638. struct ib_ucontext {
  639. struct ib_device *device;
  640. struct list_head pd_list;
  641. struct list_head mr_list;
  642. struct list_head mw_list;
  643. struct list_head cq_list;
  644. struct list_head qp_list;
  645. struct list_head srq_list;
  646. struct list_head ah_list;
  647. };
  648. struct ib_uobject {
  649. u64 user_handle; /* handle given to us by userspace */
  650. struct ib_ucontext *context; /* associated user context */
  651. void *object; /* containing object */
  652. struct list_head list; /* link to context's list */
  653. u32 id; /* index into kernel idr */
  654. struct kref ref;
  655. struct rw_semaphore mutex; /* protects .live */
  656. int live;
  657. };
  658. struct ib_umem {
  659. unsigned long user_base;
  660. unsigned long virt_base;
  661. size_t length;
  662. int offset;
  663. int page_size;
  664. int writable;
  665. struct list_head chunk_list;
  666. };
  667. struct ib_umem_chunk {
  668. struct list_head list;
  669. int nents;
  670. int nmap;
  671. struct scatterlist page_list[0];
  672. };
  673. struct ib_udata {
  674. void __user *inbuf;
  675. void __user *outbuf;
  676. size_t inlen;
  677. size_t outlen;
  678. };
  679. #define IB_UMEM_MAX_PAGE_CHUNK \
  680. ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
  681. ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
  682. (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
  683. struct ib_umem_object {
  684. struct ib_uobject uobject;
  685. struct ib_umem umem;
  686. };
  687. struct ib_pd {
  688. struct ib_device *device;
  689. struct ib_uobject *uobject;
  690. atomic_t usecnt; /* count all resources */
  691. };
  692. struct ib_ah {
  693. struct ib_device *device;
  694. struct ib_pd *pd;
  695. struct ib_uobject *uobject;
  696. };
  697. typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
  698. struct ib_cq {
  699. struct ib_device *device;
  700. struct ib_uobject *uobject;
  701. ib_comp_handler comp_handler;
  702. void (*event_handler)(struct ib_event *, void *);
  703. void * cq_context;
  704. int cqe;
  705. atomic_t usecnt; /* count number of work queues */
  706. };
  707. struct ib_srq {
  708. struct ib_device *device;
  709. struct ib_pd *pd;
  710. struct ib_uobject *uobject;
  711. void (*event_handler)(struct ib_event *, void *);
  712. void *srq_context;
  713. atomic_t usecnt;
  714. };
  715. struct ib_qp {
  716. struct ib_device *device;
  717. struct ib_pd *pd;
  718. struct ib_cq *send_cq;
  719. struct ib_cq *recv_cq;
  720. struct ib_srq *srq;
  721. struct ib_uobject *uobject;
  722. void (*event_handler)(struct ib_event *, void *);
  723. void *qp_context;
  724. u32 qp_num;
  725. enum ib_qp_type qp_type;
  726. };
  727. struct ib_mr {
  728. struct ib_device *device;
  729. struct ib_pd *pd;
  730. struct ib_uobject *uobject;
  731. u32 lkey;
  732. u32 rkey;
  733. atomic_t usecnt; /* count number of MWs */
  734. };
  735. struct ib_mw {
  736. struct ib_device *device;
  737. struct ib_pd *pd;
  738. struct ib_uobject *uobject;
  739. u32 rkey;
  740. };
  741. struct ib_fmr {
  742. struct ib_device *device;
  743. struct ib_pd *pd;
  744. struct list_head list;
  745. u32 lkey;
  746. u32 rkey;
  747. };
  748. struct ib_mad;
  749. struct ib_grh;
  750. enum ib_process_mad_flags {
  751. IB_MAD_IGNORE_MKEY = 1,
  752. IB_MAD_IGNORE_BKEY = 2,
  753. IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
  754. };
  755. enum ib_mad_result {
  756. IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
  757. IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
  758. IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
  759. IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
  760. };
  761. #define IB_DEVICE_NAME_MAX 64
  762. struct ib_cache {
  763. rwlock_t lock;
  764. struct ib_event_handler event_handler;
  765. struct ib_pkey_cache **pkey_cache;
  766. struct ib_gid_cache **gid_cache;
  767. u8 *lmc_cache;
  768. };
  769. struct ib_dma_mapping_ops {
  770. int (*mapping_error)(struct ib_device *dev,
  771. u64 dma_addr);
  772. u64 (*map_single)(struct ib_device *dev,
  773. void *ptr, size_t size,
  774. enum dma_data_direction direction);
  775. void (*unmap_single)(struct ib_device *dev,
  776. u64 addr, size_t size,
  777. enum dma_data_direction direction);
  778. u64 (*map_page)(struct ib_device *dev,
  779. struct page *page, unsigned long offset,
  780. size_t size,
  781. enum dma_data_direction direction);
  782. void (*unmap_page)(struct ib_device *dev,
  783. u64 addr, size_t size,
  784. enum dma_data_direction direction);
  785. int (*map_sg)(struct ib_device *dev,
  786. struct scatterlist *sg, int nents,
  787. enum dma_data_direction direction);
  788. void (*unmap_sg)(struct ib_device *dev,
  789. struct scatterlist *sg, int nents,
  790. enum dma_data_direction direction);
  791. u64 (*dma_address)(struct ib_device *dev,
  792. struct scatterlist *sg);
  793. unsigned int (*dma_len)(struct ib_device *dev,
  794. struct scatterlist *sg);
  795. void (*sync_single_for_cpu)(struct ib_device *dev,
  796. u64 dma_handle,
  797. size_t size,
  798. enum dma_data_direction dir);
  799. void (*sync_single_for_device)(struct ib_device *dev,
  800. u64 dma_handle,
  801. size_t size,
  802. enum dma_data_direction dir);
  803. void *(*alloc_coherent)(struct ib_device *dev,
  804. size_t size,
  805. u64 *dma_handle,
  806. gfp_t flag);
  807. void (*free_coherent)(struct ib_device *dev,
  808. size_t size, void *cpu_addr,
  809. u64 dma_handle);
  810. };
  811. struct iw_cm_verbs;
  812. struct ib_device {
  813. struct device *dma_device;
  814. char name[IB_DEVICE_NAME_MAX];
  815. struct list_head event_handler_list;
  816. spinlock_t event_handler_lock;
  817. struct list_head core_list;
  818. struct list_head client_data_list;
  819. spinlock_t client_data_lock;
  820. struct ib_cache cache;
  821. u32 flags;
  822. struct iw_cm_verbs *iwcm;
  823. int (*query_device)(struct ib_device *device,
  824. struct ib_device_attr *device_attr);
  825. int (*query_port)(struct ib_device *device,
  826. u8 port_num,
  827. struct ib_port_attr *port_attr);
  828. int (*query_gid)(struct ib_device *device,
  829. u8 port_num, int index,
  830. union ib_gid *gid);
  831. int (*query_pkey)(struct ib_device *device,
  832. u8 port_num, u16 index, u16 *pkey);
  833. int (*modify_device)(struct ib_device *device,
  834. int device_modify_mask,
  835. struct ib_device_modify *device_modify);
  836. int (*modify_port)(struct ib_device *device,
  837. u8 port_num, int port_modify_mask,
  838. struct ib_port_modify *port_modify);
  839. struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
  840. struct ib_udata *udata);
  841. int (*dealloc_ucontext)(struct ib_ucontext *context);
  842. int (*mmap)(struct ib_ucontext *context,
  843. struct vm_area_struct *vma);
  844. struct ib_pd * (*alloc_pd)(struct ib_device *device,
  845. struct ib_ucontext *context,
  846. struct ib_udata *udata);
  847. int (*dealloc_pd)(struct ib_pd *pd);
  848. struct ib_ah * (*create_ah)(struct ib_pd *pd,
  849. struct ib_ah_attr *ah_attr);
  850. int (*modify_ah)(struct ib_ah *ah,
  851. struct ib_ah_attr *ah_attr);
  852. int (*query_ah)(struct ib_ah *ah,
  853. struct ib_ah_attr *ah_attr);
  854. int (*destroy_ah)(struct ib_ah *ah);
  855. struct ib_srq * (*create_srq)(struct ib_pd *pd,
  856. struct ib_srq_init_attr *srq_init_attr,
  857. struct ib_udata *udata);
  858. int (*modify_srq)(struct ib_srq *srq,
  859. struct ib_srq_attr *srq_attr,
  860. enum ib_srq_attr_mask srq_attr_mask,
  861. struct ib_udata *udata);
  862. int (*query_srq)(struct ib_srq *srq,
  863. struct ib_srq_attr *srq_attr);
  864. int (*destroy_srq)(struct ib_srq *srq);
  865. int (*post_srq_recv)(struct ib_srq *srq,
  866. struct ib_recv_wr *recv_wr,
  867. struct ib_recv_wr **bad_recv_wr);
  868. struct ib_qp * (*create_qp)(struct ib_pd *pd,
  869. struct ib_qp_init_attr *qp_init_attr,
  870. struct ib_udata *udata);
  871. int (*modify_qp)(struct ib_qp *qp,
  872. struct ib_qp_attr *qp_attr,
  873. int qp_attr_mask,
  874. struct ib_udata *udata);
  875. int (*query_qp)(struct ib_qp *qp,
  876. struct ib_qp_attr *qp_attr,
  877. int qp_attr_mask,
  878. struct ib_qp_init_attr *qp_init_attr);
  879. int (*destroy_qp)(struct ib_qp *qp);
  880. int (*post_send)(struct ib_qp *qp,
  881. struct ib_send_wr *send_wr,
  882. struct ib_send_wr **bad_send_wr);
  883. int (*post_recv)(struct ib_qp *qp,
  884. struct ib_recv_wr *recv_wr,
  885. struct ib_recv_wr **bad_recv_wr);
  886. struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
  887. struct ib_ucontext *context,
  888. struct ib_udata *udata);
  889. int (*destroy_cq)(struct ib_cq *cq);
  890. int (*resize_cq)(struct ib_cq *cq, int cqe,
  891. struct ib_udata *udata);
  892. int (*poll_cq)(struct ib_cq *cq, int num_entries,
  893. struct ib_wc *wc);
  894. int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
  895. int (*req_notify_cq)(struct ib_cq *cq,
  896. enum ib_cq_notify cq_notify);
  897. int (*req_ncomp_notif)(struct ib_cq *cq,
  898. int wc_cnt);
  899. struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
  900. int mr_access_flags);
  901. struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
  902. struct ib_phys_buf *phys_buf_array,
  903. int num_phys_buf,
  904. int mr_access_flags,
  905. u64 *iova_start);
  906. struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
  907. struct ib_umem *region,
  908. int mr_access_flags,
  909. struct ib_udata *udata);
  910. int (*query_mr)(struct ib_mr *mr,
  911. struct ib_mr_attr *mr_attr);
  912. int (*dereg_mr)(struct ib_mr *mr);
  913. int (*rereg_phys_mr)(struct ib_mr *mr,
  914. int mr_rereg_mask,
  915. struct ib_pd *pd,
  916. struct ib_phys_buf *phys_buf_array,
  917. int num_phys_buf,
  918. int mr_access_flags,
  919. u64 *iova_start);
  920. struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
  921. int (*bind_mw)(struct ib_qp *qp,
  922. struct ib_mw *mw,
  923. struct ib_mw_bind *mw_bind);
  924. int (*dealloc_mw)(struct ib_mw *mw);
  925. struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
  926. int mr_access_flags,
  927. struct ib_fmr_attr *fmr_attr);
  928. int (*map_phys_fmr)(struct ib_fmr *fmr,
  929. u64 *page_list, int list_len,
  930. u64 iova);
  931. int (*unmap_fmr)(struct list_head *fmr_list);
  932. int (*dealloc_fmr)(struct ib_fmr *fmr);
  933. int (*attach_mcast)(struct ib_qp *qp,
  934. union ib_gid *gid,
  935. u16 lid);
  936. int (*detach_mcast)(struct ib_qp *qp,
  937. union ib_gid *gid,
  938. u16 lid);
  939. int (*process_mad)(struct ib_device *device,
  940. int process_mad_flags,
  941. u8 port_num,
  942. struct ib_wc *in_wc,
  943. struct ib_grh *in_grh,
  944. struct ib_mad *in_mad,
  945. struct ib_mad *out_mad);
  946. struct ib_dma_mapping_ops *dma_ops;
  947. struct module *owner;
  948. struct class_device class_dev;
  949. struct kobject ports_parent;
  950. struct list_head port_list;
  951. enum {
  952. IB_DEV_UNINITIALIZED,
  953. IB_DEV_REGISTERED,
  954. IB_DEV_UNREGISTERED
  955. } reg_state;
  956. u64 uverbs_cmd_mask;
  957. int uverbs_abi_ver;
  958. char node_desc[64];
  959. __be64 node_guid;
  960. u8 node_type;
  961. u8 phys_port_cnt;
  962. };
  963. struct ib_client {
  964. char *name;
  965. void (*add) (struct ib_device *);
  966. void (*remove)(struct ib_device *);
  967. struct list_head list;
  968. };
  969. struct ib_device *ib_alloc_device(size_t size);
  970. void ib_dealloc_device(struct ib_device *device);
  971. int ib_register_device (struct ib_device *device);
  972. void ib_unregister_device(struct ib_device *device);
  973. int ib_register_client (struct ib_client *client);
  974. void ib_unregister_client(struct ib_client *client);
  975. void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
  976. void ib_set_client_data(struct ib_device *device, struct ib_client *client,
  977. void *data);
  978. static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
  979. {
  980. return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
  981. }
  982. static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
  983. {
  984. return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
  985. }
  986. /**
  987. * ib_modify_qp_is_ok - Check that the supplied attribute mask
  988. * contains all required attributes and no attributes not allowed for
  989. * the given QP state transition.
  990. * @cur_state: Current QP state
  991. * @next_state: Next QP state
  992. * @type: QP type
  993. * @mask: Mask of supplied QP attributes
  994. *
  995. * This function is a helper function that a low-level driver's
  996. * modify_qp method can use to validate the consumer's input. It
  997. * checks that cur_state and next_state are valid QP states, that a
  998. * transition from cur_state to next_state is allowed by the IB spec,
  999. * and that the attribute mask supplied is allowed for the transition.
  1000. */
  1001. int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
  1002. enum ib_qp_type type, enum ib_qp_attr_mask mask);
  1003. int ib_register_event_handler (struct ib_event_handler *event_handler);
  1004. int ib_unregister_event_handler(struct ib_event_handler *event_handler);
  1005. void ib_dispatch_event(struct ib_event *event);
  1006. int ib_query_device(struct ib_device *device,
  1007. struct ib_device_attr *device_attr);
  1008. int ib_query_port(struct ib_device *device,
  1009. u8 port_num, struct ib_port_attr *port_attr);
  1010. int ib_query_gid(struct ib_device *device,
  1011. u8 port_num, int index, union ib_gid *gid);
  1012. int ib_query_pkey(struct ib_device *device,
  1013. u8 port_num, u16 index, u16 *pkey);
  1014. int ib_modify_device(struct ib_device *device,
  1015. int device_modify_mask,
  1016. struct ib_device_modify *device_modify);
  1017. int ib_modify_port(struct ib_device *device,
  1018. u8 port_num, int port_modify_mask,
  1019. struct ib_port_modify *port_modify);
  1020. /**
  1021. * ib_alloc_pd - Allocates an unused protection domain.
  1022. * @device: The device on which to allocate the protection domain.
  1023. *
  1024. * A protection domain object provides an association between QPs, shared
  1025. * receive queues, address handles, memory regions, and memory windows.
  1026. */
  1027. struct ib_pd *ib_alloc_pd(struct ib_device *device);
  1028. /**
  1029. * ib_dealloc_pd - Deallocates a protection domain.
  1030. * @pd: The protection domain to deallocate.
  1031. */
  1032. int ib_dealloc_pd(struct ib_pd *pd);
  1033. /**
  1034. * ib_create_ah - Creates an address handle for the given address vector.
  1035. * @pd: The protection domain associated with the address handle.
  1036. * @ah_attr: The attributes of the address vector.
  1037. *
  1038. * The address handle is used to reference a local or global destination
  1039. * in all UD QP post sends.
  1040. */
  1041. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
  1042. /**
  1043. * ib_init_ah_from_wc - Initializes address handle attributes from a
  1044. * work completion.
  1045. * @device: Device on which the received message arrived.
  1046. * @port_num: Port on which the received message arrived.
  1047. * @wc: Work completion associated with the received message.
  1048. * @grh: References the received global route header. This parameter is
  1049. * ignored unless the work completion indicates that the GRH is valid.
  1050. * @ah_attr: Returned attributes that can be used when creating an address
  1051. * handle for replying to the message.
  1052. */
  1053. int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
  1054. struct ib_grh *grh, struct ib_ah_attr *ah_attr);
  1055. /**
  1056. * ib_create_ah_from_wc - Creates an address handle associated with the
  1057. * sender of the specified work completion.
  1058. * @pd: The protection domain associated with the address handle.
  1059. * @wc: Work completion information associated with a received message.
  1060. * @grh: References the received global route header. This parameter is
  1061. * ignored unless the work completion indicates that the GRH is valid.
  1062. * @port_num: The outbound port number to associate with the address.
  1063. *
  1064. * The address handle is used to reference a local or global destination
  1065. * in all UD QP post sends.
  1066. */
  1067. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
  1068. struct ib_grh *grh, u8 port_num);
  1069. /**
  1070. * ib_modify_ah - Modifies the address vector associated with an address
  1071. * handle.
  1072. * @ah: The address handle to modify.
  1073. * @ah_attr: The new address vector attributes to associate with the
  1074. * address handle.
  1075. */
  1076. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
  1077. /**
  1078. * ib_query_ah - Queries the address vector associated with an address
  1079. * handle.
  1080. * @ah: The address handle to query.
  1081. * @ah_attr: The address vector attributes associated with the address
  1082. * handle.
  1083. */
  1084. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
  1085. /**
  1086. * ib_destroy_ah - Destroys an address handle.
  1087. * @ah: The address handle to destroy.
  1088. */
  1089. int ib_destroy_ah(struct ib_ah *ah);
  1090. /**
  1091. * ib_create_srq - Creates a SRQ associated with the specified protection
  1092. * domain.
  1093. * @pd: The protection domain associated with the SRQ.
  1094. * @srq_init_attr: A list of initial attributes required to create the
  1095. * SRQ. If SRQ creation succeeds, then the attributes are updated to
  1096. * the actual capabilities of the created SRQ.
  1097. *
  1098. * srq_attr->max_wr and srq_attr->max_sge are read the determine the
  1099. * requested size of the SRQ, and set to the actual values allocated
  1100. * on return. If ib_create_srq() succeeds, then max_wr and max_sge
  1101. * will always be at least as large as the requested values.
  1102. */
  1103. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  1104. struct ib_srq_init_attr *srq_init_attr);
  1105. /**
  1106. * ib_modify_srq - Modifies the attributes for the specified SRQ.
  1107. * @srq: The SRQ to modify.
  1108. * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
  1109. * the current values of selected SRQ attributes are returned.
  1110. * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
  1111. * are being modified.
  1112. *
  1113. * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
  1114. * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
  1115. * the number of receives queued drops below the limit.
  1116. */
  1117. int ib_modify_srq(struct ib_srq *srq,
  1118. struct ib_srq_attr *srq_attr,
  1119. enum ib_srq_attr_mask srq_attr_mask);
  1120. /**
  1121. * ib_query_srq - Returns the attribute list and current values for the
  1122. * specified SRQ.
  1123. * @srq: The SRQ to query.
  1124. * @srq_attr: The attributes of the specified SRQ.
  1125. */
  1126. int ib_query_srq(struct ib_srq *srq,
  1127. struct ib_srq_attr *srq_attr);
  1128. /**
  1129. * ib_destroy_srq - Destroys the specified SRQ.
  1130. * @srq: The SRQ to destroy.
  1131. */
  1132. int ib_destroy_srq(struct ib_srq *srq);
  1133. /**
  1134. * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
  1135. * @srq: The SRQ to post the work request on.
  1136. * @recv_wr: A list of work requests to post on the receive queue.
  1137. * @bad_recv_wr: On an immediate failure, this parameter will reference
  1138. * the work request that failed to be posted on the QP.
  1139. */
  1140. static inline int ib_post_srq_recv(struct ib_srq *srq,
  1141. struct ib_recv_wr *recv_wr,
  1142. struct ib_recv_wr **bad_recv_wr)
  1143. {
  1144. return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
  1145. }
  1146. /**
  1147. * ib_create_qp - Creates a QP associated with the specified protection
  1148. * domain.
  1149. * @pd: The protection domain associated with the QP.
  1150. * @qp_init_attr: A list of initial attributes required to create the
  1151. * QP. If QP creation succeeds, then the attributes are updated to
  1152. * the actual capabilities of the created QP.
  1153. */
  1154. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  1155. struct ib_qp_init_attr *qp_init_attr);
  1156. /**
  1157. * ib_modify_qp - Modifies the attributes for the specified QP and then
  1158. * transitions the QP to the given state.
  1159. * @qp: The QP to modify.
  1160. * @qp_attr: On input, specifies the QP attributes to modify. On output,
  1161. * the current values of selected QP attributes are returned.
  1162. * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
  1163. * are being modified.
  1164. */
  1165. int ib_modify_qp(struct ib_qp *qp,
  1166. struct ib_qp_attr *qp_attr,
  1167. int qp_attr_mask);
  1168. /**
  1169. * ib_query_qp - Returns the attribute list and current values for the
  1170. * specified QP.
  1171. * @qp: The QP to query.
  1172. * @qp_attr: The attributes of the specified QP.
  1173. * @qp_attr_mask: A bit-mask used to select specific attributes to query.
  1174. * @qp_init_attr: Additional attributes of the selected QP.
  1175. *
  1176. * The qp_attr_mask may be used to limit the query to gathering only the
  1177. * selected attributes.
  1178. */
  1179. int ib_query_qp(struct ib_qp *qp,
  1180. struct ib_qp_attr *qp_attr,
  1181. int qp_attr_mask,
  1182. struct ib_qp_init_attr *qp_init_attr);
  1183. /**
  1184. * ib_destroy_qp - Destroys the specified QP.
  1185. * @qp: The QP to destroy.
  1186. */
  1187. int ib_destroy_qp(struct ib_qp *qp);
  1188. /**
  1189. * ib_post_send - Posts a list of work requests to the send queue of
  1190. * the specified QP.
  1191. * @qp: The QP to post the work request on.
  1192. * @send_wr: A list of work requests to post on the send queue.
  1193. * @bad_send_wr: On an immediate failure, this parameter will reference
  1194. * the work request that failed to be posted on the QP.
  1195. */
  1196. static inline int ib_post_send(struct ib_qp *qp,
  1197. struct ib_send_wr *send_wr,
  1198. struct ib_send_wr **bad_send_wr)
  1199. {
  1200. return qp->device->post_send(qp, send_wr, bad_send_wr);
  1201. }
  1202. /**
  1203. * ib_post_recv - Posts a list of work requests to the receive queue of
  1204. * the specified QP.
  1205. * @qp: The QP to post the work request on.
  1206. * @recv_wr: A list of work requests to post on the receive queue.
  1207. * @bad_recv_wr: On an immediate failure, this parameter will reference
  1208. * the work request that failed to be posted on the QP.
  1209. */
  1210. static inline int ib_post_recv(struct ib_qp *qp,
  1211. struct ib_recv_wr *recv_wr,
  1212. struct ib_recv_wr **bad_recv_wr)
  1213. {
  1214. return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
  1215. }
  1216. /**
  1217. * ib_create_cq - Creates a CQ on the specified device.
  1218. * @device: The device on which to create the CQ.
  1219. * @comp_handler: A user-specified callback that is invoked when a
  1220. * completion event occurs on the CQ.
  1221. * @event_handler: A user-specified callback that is invoked when an
  1222. * asynchronous event not associated with a completion occurs on the CQ.
  1223. * @cq_context: Context associated with the CQ returned to the user via
  1224. * the associated completion and event handlers.
  1225. * @cqe: The minimum size of the CQ.
  1226. *
  1227. * Users can examine the cq structure to determine the actual CQ size.
  1228. */
  1229. struct ib_cq *ib_create_cq(struct ib_device *device,
  1230. ib_comp_handler comp_handler,
  1231. void (*event_handler)(struct ib_event *, void *),
  1232. void *cq_context, int cqe);
  1233. /**
  1234. * ib_resize_cq - Modifies the capacity of the CQ.
  1235. * @cq: The CQ to resize.
  1236. * @cqe: The minimum size of the CQ.
  1237. *
  1238. * Users can examine the cq structure to determine the actual CQ size.
  1239. */
  1240. int ib_resize_cq(struct ib_cq *cq, int cqe);
  1241. /**
  1242. * ib_destroy_cq - Destroys the specified CQ.
  1243. * @cq: The CQ to destroy.
  1244. */
  1245. int ib_destroy_cq(struct ib_cq *cq);
  1246. /**
  1247. * ib_poll_cq - poll a CQ for completion(s)
  1248. * @cq:the CQ being polled
  1249. * @num_entries:maximum number of completions to return
  1250. * @wc:array of at least @num_entries &struct ib_wc where completions
  1251. * will be returned
  1252. *
  1253. * Poll a CQ for (possibly multiple) completions. If the return value
  1254. * is < 0, an error occurred. If the return value is >= 0, it is the
  1255. * number of completions returned. If the return value is
  1256. * non-negative and < num_entries, then the CQ was emptied.
  1257. */
  1258. static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
  1259. struct ib_wc *wc)
  1260. {
  1261. return cq->device->poll_cq(cq, num_entries, wc);
  1262. }
  1263. /**
  1264. * ib_peek_cq - Returns the number of unreaped completions currently
  1265. * on the specified CQ.
  1266. * @cq: The CQ to peek.
  1267. * @wc_cnt: A minimum number of unreaped completions to check for.
  1268. *
  1269. * If the number of unreaped completions is greater than or equal to wc_cnt,
  1270. * this function returns wc_cnt, otherwise, it returns the actual number of
  1271. * unreaped completions.
  1272. */
  1273. int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
  1274. /**
  1275. * ib_req_notify_cq - Request completion notification on a CQ.
  1276. * @cq: The CQ to generate an event for.
  1277. * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will
  1278. * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP,
  1279. * notification will occur on the next completion.
  1280. */
  1281. static inline int ib_req_notify_cq(struct ib_cq *cq,
  1282. enum ib_cq_notify cq_notify)
  1283. {
  1284. return cq->device->req_notify_cq(cq, cq_notify);
  1285. }
  1286. /**
  1287. * ib_req_ncomp_notif - Request completion notification when there are
  1288. * at least the specified number of unreaped completions on the CQ.
  1289. * @cq: The CQ to generate an event for.
  1290. * @wc_cnt: The number of unreaped completions that should be on the
  1291. * CQ before an event is generated.
  1292. */
  1293. static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
  1294. {
  1295. return cq->device->req_ncomp_notif ?
  1296. cq->device->req_ncomp_notif(cq, wc_cnt) :
  1297. -ENOSYS;
  1298. }
  1299. /**
  1300. * ib_get_dma_mr - Returns a memory region for system memory that is
  1301. * usable for DMA.
  1302. * @pd: The protection domain associated with the memory region.
  1303. * @mr_access_flags: Specifies the memory access rights.
  1304. *
  1305. * Note that the ib_dma_*() functions defined below must be used
  1306. * to create/destroy addresses used with the Lkey or Rkey returned
  1307. * by ib_get_dma_mr().
  1308. */
  1309. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
  1310. /**
  1311. * ib_dma_mapping_error - check a DMA addr for error
  1312. * @dev: The device for which the dma_addr was created
  1313. * @dma_addr: The DMA address to check
  1314. */
  1315. static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
  1316. {
  1317. if (dev->dma_ops)
  1318. return dev->dma_ops->mapping_error(dev, dma_addr);
  1319. return dma_mapping_error(dma_addr);
  1320. }
  1321. /**
  1322. * ib_dma_map_single - Map a kernel virtual address to DMA address
  1323. * @dev: The device for which the dma_addr is to be created
  1324. * @cpu_addr: The kernel virtual address
  1325. * @size: The size of the region in bytes
  1326. * @direction: The direction of the DMA
  1327. */
  1328. static inline u64 ib_dma_map_single(struct ib_device *dev,
  1329. void *cpu_addr, size_t size,
  1330. enum dma_data_direction direction)
  1331. {
  1332. if (dev->dma_ops)
  1333. return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
  1334. return dma_map_single(dev->dma_device, cpu_addr, size, direction);
  1335. }
  1336. /**
  1337. * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
  1338. * @dev: The device for which the DMA address was created
  1339. * @addr: The DMA address
  1340. * @size: The size of the region in bytes
  1341. * @direction: The direction of the DMA
  1342. */
  1343. static inline void ib_dma_unmap_single(struct ib_device *dev,
  1344. u64 addr, size_t size,
  1345. enum dma_data_direction direction)
  1346. {
  1347. if (dev->dma_ops)
  1348. dev->dma_ops->unmap_single(dev, addr, size, direction);
  1349. else
  1350. dma_unmap_single(dev->dma_device, addr, size, direction);
  1351. }
  1352. /**
  1353. * ib_dma_map_page - Map a physical page to DMA address
  1354. * @dev: The device for which the dma_addr is to be created
  1355. * @page: The page to be mapped
  1356. * @offset: The offset within the page
  1357. * @size: The size of the region in bytes
  1358. * @direction: The direction of the DMA
  1359. */
  1360. static inline u64 ib_dma_map_page(struct ib_device *dev,
  1361. struct page *page,
  1362. unsigned long offset,
  1363. size_t size,
  1364. enum dma_data_direction direction)
  1365. {
  1366. if (dev->dma_ops)
  1367. return dev->dma_ops->map_page(dev, page, offset, size, direction);
  1368. return dma_map_page(dev->dma_device, page, offset, size, direction);
  1369. }
  1370. /**
  1371. * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
  1372. * @dev: The device for which the DMA address was created
  1373. * @addr: The DMA address
  1374. * @size: The size of the region in bytes
  1375. * @direction: The direction of the DMA
  1376. */
  1377. static inline void ib_dma_unmap_page(struct ib_device *dev,
  1378. u64 addr, size_t size,
  1379. enum dma_data_direction direction)
  1380. {
  1381. if (dev->dma_ops)
  1382. dev->dma_ops->unmap_page(dev, addr, size, direction);
  1383. else
  1384. dma_unmap_page(dev->dma_device, addr, size, direction);
  1385. }
  1386. /**
  1387. * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
  1388. * @dev: The device for which the DMA addresses are to be created
  1389. * @sg: The array of scatter/gather entries
  1390. * @nents: The number of scatter/gather entries
  1391. * @direction: The direction of the DMA
  1392. */
  1393. static inline int ib_dma_map_sg(struct ib_device *dev,
  1394. struct scatterlist *sg, int nents,
  1395. enum dma_data_direction direction)
  1396. {
  1397. if (dev->dma_ops)
  1398. return dev->dma_ops->map_sg(dev, sg, nents, direction);
  1399. return dma_map_sg(dev->dma_device, sg, nents, direction);
  1400. }
  1401. /**
  1402. * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
  1403. * @dev: The device for which the DMA addresses were created
  1404. * @sg: The array of scatter/gather entries
  1405. * @nents: The number of scatter/gather entries
  1406. * @direction: The direction of the DMA
  1407. */
  1408. static inline void ib_dma_unmap_sg(struct ib_device *dev,
  1409. struct scatterlist *sg, int nents,
  1410. enum dma_data_direction direction)
  1411. {
  1412. if (dev->dma_ops)
  1413. dev->dma_ops->unmap_sg(dev, sg, nents, direction);
  1414. else
  1415. dma_unmap_sg(dev->dma_device, sg, nents, direction);
  1416. }
  1417. /**
  1418. * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
  1419. * @dev: The device for which the DMA addresses were created
  1420. * @sg: The scatter/gather entry
  1421. */
  1422. static inline u64 ib_sg_dma_address(struct ib_device *dev,
  1423. struct scatterlist *sg)
  1424. {
  1425. if (dev->dma_ops)
  1426. return dev->dma_ops->dma_address(dev, sg);
  1427. return sg_dma_address(sg);
  1428. }
  1429. /**
  1430. * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
  1431. * @dev: The device for which the DMA addresses were created
  1432. * @sg: The scatter/gather entry
  1433. */
  1434. static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
  1435. struct scatterlist *sg)
  1436. {
  1437. if (dev->dma_ops)
  1438. return dev->dma_ops->dma_len(dev, sg);
  1439. return sg_dma_len(sg);
  1440. }
  1441. /**
  1442. * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
  1443. * @dev: The device for which the DMA address was created
  1444. * @addr: The DMA address
  1445. * @size: The size of the region in bytes
  1446. * @dir: The direction of the DMA
  1447. */
  1448. static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
  1449. u64 addr,
  1450. size_t size,
  1451. enum dma_data_direction dir)
  1452. {
  1453. if (dev->dma_ops)
  1454. dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
  1455. else
  1456. dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
  1457. }
  1458. /**
  1459. * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
  1460. * @dev: The device for which the DMA address was created
  1461. * @addr: The DMA address
  1462. * @size: The size of the region in bytes
  1463. * @dir: The direction of the DMA
  1464. */
  1465. static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
  1466. u64 addr,
  1467. size_t size,
  1468. enum dma_data_direction dir)
  1469. {
  1470. if (dev->dma_ops)
  1471. dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
  1472. else
  1473. dma_sync_single_for_device(dev->dma_device, addr, size, dir);
  1474. }
  1475. /**
  1476. * ib_dma_alloc_coherent - Allocate memory and map it for DMA
  1477. * @dev: The device for which the DMA address is requested
  1478. * @size: The size of the region to allocate in bytes
  1479. * @dma_handle: A pointer for returning the DMA address of the region
  1480. * @flag: memory allocator flags
  1481. */
  1482. static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
  1483. size_t size,
  1484. u64 *dma_handle,
  1485. gfp_t flag)
  1486. {
  1487. if (dev->dma_ops)
  1488. return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
  1489. else {
  1490. dma_addr_t handle;
  1491. void *ret;
  1492. ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
  1493. *dma_handle = handle;
  1494. return ret;
  1495. }
  1496. }
  1497. /**
  1498. * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
  1499. * @dev: The device for which the DMA addresses were allocated
  1500. * @size: The size of the region
  1501. * @cpu_addr: the address returned by ib_dma_alloc_coherent()
  1502. * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
  1503. */
  1504. static inline void ib_dma_free_coherent(struct ib_device *dev,
  1505. size_t size, void *cpu_addr,
  1506. u64 dma_handle)
  1507. {
  1508. if (dev->dma_ops)
  1509. dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
  1510. else
  1511. dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
  1512. }
  1513. /**
  1514. * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
  1515. * by an HCA.
  1516. * @pd: The protection domain associated assigned to the registered region.
  1517. * @phys_buf_array: Specifies a list of physical buffers to use in the
  1518. * memory region.
  1519. * @num_phys_buf: Specifies the size of the phys_buf_array.
  1520. * @mr_access_flags: Specifies the memory access rights.
  1521. * @iova_start: The offset of the region's starting I/O virtual address.
  1522. */
  1523. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  1524. struct ib_phys_buf *phys_buf_array,
  1525. int num_phys_buf,
  1526. int mr_access_flags,
  1527. u64 *iova_start);
  1528. /**
  1529. * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
  1530. * Conceptually, this call performs the functions deregister memory region
  1531. * followed by register physical memory region. Where possible,
  1532. * resources are reused instead of deallocated and reallocated.
  1533. * @mr: The memory region to modify.
  1534. * @mr_rereg_mask: A bit-mask used to indicate which of the following
  1535. * properties of the memory region are being modified.
  1536. * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
  1537. * the new protection domain to associated with the memory region,
  1538. * otherwise, this parameter is ignored.
  1539. * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
  1540. * field specifies a list of physical buffers to use in the new
  1541. * translation, otherwise, this parameter is ignored.
  1542. * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
  1543. * field specifies the size of the phys_buf_array, otherwise, this
  1544. * parameter is ignored.
  1545. * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
  1546. * field specifies the new memory access rights, otherwise, this
  1547. * parameter is ignored.
  1548. * @iova_start: The offset of the region's starting I/O virtual address.
  1549. */
  1550. int ib_rereg_phys_mr(struct ib_mr *mr,
  1551. int mr_rereg_mask,
  1552. struct ib_pd *pd,
  1553. struct ib_phys_buf *phys_buf_array,
  1554. int num_phys_buf,
  1555. int mr_access_flags,
  1556. u64 *iova_start);
  1557. /**
  1558. * ib_query_mr - Retrieves information about a specific memory region.
  1559. * @mr: The memory region to retrieve information about.
  1560. * @mr_attr: The attributes of the specified memory region.
  1561. */
  1562. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
  1563. /**
  1564. * ib_dereg_mr - Deregisters a memory region and removes it from the
  1565. * HCA translation table.
  1566. * @mr: The memory region to deregister.
  1567. */
  1568. int ib_dereg_mr(struct ib_mr *mr);
  1569. /**
  1570. * ib_alloc_mw - Allocates a memory window.
  1571. * @pd: The protection domain associated with the memory window.
  1572. */
  1573. struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
  1574. /**
  1575. * ib_bind_mw - Posts a work request to the send queue of the specified
  1576. * QP, which binds the memory window to the given address range and
  1577. * remote access attributes.
  1578. * @qp: QP to post the bind work request on.
  1579. * @mw: The memory window to bind.
  1580. * @mw_bind: Specifies information about the memory window, including
  1581. * its address range, remote access rights, and associated memory region.
  1582. */
  1583. static inline int ib_bind_mw(struct ib_qp *qp,
  1584. struct ib_mw *mw,
  1585. struct ib_mw_bind *mw_bind)
  1586. {
  1587. /* XXX reference counting in corresponding MR? */
  1588. return mw->device->bind_mw ?
  1589. mw->device->bind_mw(qp, mw, mw_bind) :
  1590. -ENOSYS;
  1591. }
  1592. /**
  1593. * ib_dealloc_mw - Deallocates a memory window.
  1594. * @mw: The memory window to deallocate.
  1595. */
  1596. int ib_dealloc_mw(struct ib_mw *mw);
  1597. /**
  1598. * ib_alloc_fmr - Allocates a unmapped fast memory region.
  1599. * @pd: The protection domain associated with the unmapped region.
  1600. * @mr_access_flags: Specifies the memory access rights.
  1601. * @fmr_attr: Attributes of the unmapped region.
  1602. *
  1603. * A fast memory region must be mapped before it can be used as part of
  1604. * a work request.
  1605. */
  1606. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  1607. int mr_access_flags,
  1608. struct ib_fmr_attr *fmr_attr);
  1609. /**
  1610. * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
  1611. * @fmr: The fast memory region to associate with the pages.
  1612. * @page_list: An array of physical pages to map to the fast memory region.
  1613. * @list_len: The number of pages in page_list.
  1614. * @iova: The I/O virtual address to use with the mapped region.
  1615. */
  1616. static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
  1617. u64 *page_list, int list_len,
  1618. u64 iova)
  1619. {
  1620. return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
  1621. }
  1622. /**
  1623. * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
  1624. * @fmr_list: A linked list of fast memory regions to unmap.
  1625. */
  1626. int ib_unmap_fmr(struct list_head *fmr_list);
  1627. /**
  1628. * ib_dealloc_fmr - Deallocates a fast memory region.
  1629. * @fmr: The fast memory region to deallocate.
  1630. */
  1631. int ib_dealloc_fmr(struct ib_fmr *fmr);
  1632. /**
  1633. * ib_attach_mcast - Attaches the specified QP to a multicast group.
  1634. * @qp: QP to attach to the multicast group. The QP must be type
  1635. * IB_QPT_UD.
  1636. * @gid: Multicast group GID.
  1637. * @lid: Multicast group LID in host byte order.
  1638. *
  1639. * In order to send and receive multicast packets, subnet
  1640. * administration must have created the multicast group and configured
  1641. * the fabric appropriately. The port associated with the specified
  1642. * QP must also be a member of the multicast group.
  1643. */
  1644. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
  1645. /**
  1646. * ib_detach_mcast - Detaches the specified QP from a multicast group.
  1647. * @qp: QP to detach from the multicast group.
  1648. * @gid: Multicast group GID.
  1649. * @lid: Multicast group LID in host byte order.
  1650. */
  1651. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
  1652. #endif /* IB_VERBS_H */