hyperv.h 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. *
  4. * Copyright (c) 2011, Microsoft Corporation.
  5. *
  6. * Authors:
  7. * Haiyang Zhang <haiyangz@microsoft.com>
  8. * Hank Janssen <hjanssen@microsoft.com>
  9. * K. Y. Srinivasan <kys@microsoft.com>
  10. */
  11. #ifndef _HYPERV_H
  12. #define _HYPERV_H
  13. #include <uapi/linux/hyperv.h>
  14. #include <linux/mm.h>
  15. #include <linux/types.h>
  16. #include <linux/scatterlist.h>
  17. #include <linux/list.h>
  18. #include <linux/timer.h>
  19. #include <linux/completion.h>
  20. #include <linux/device.h>
  21. #include <linux/mod_devicetable.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/reciprocal_div.h>
  24. #include <asm/hyperv-tlfs.h>
  25. #define MAX_PAGE_BUFFER_COUNT 32
  26. #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
  27. #pragma pack(push, 1)
  28. /*
  29. * Types for GPADL, decides is how GPADL header is created.
  30. *
  31. * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
  32. * same as HV_HYP_PAGE_SIZE.
  33. *
  34. * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
  35. * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
  36. * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
  37. * HV_HYP_PAGE will be different between different types of GPADL, for example
  38. * if PAGE_SIZE is 64K:
  39. *
  40. * BUFFER:
  41. *
  42. * gva: |-- 64k --|-- 64k --| ... |
  43. * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
  44. * index: 0 1 2 15 16 17 18 .. 31 32 ...
  45. * | | ... | | | ... | ...
  46. * v V V V V V
  47. * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
  48. * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
  49. *
  50. * RING:
  51. *
  52. * | header | data | header | data |
  53. * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
  54. * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
  55. * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
  56. * | / / / | / /
  57. * | / / / | / /
  58. * | / / ... / ... | / ... /
  59. * | / / / | / /
  60. * | / / / | / /
  61. * V V V V V V v
  62. * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
  63. * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
  64. */
  65. enum hv_gpadl_type {
  66. HV_GPADL_BUFFER,
  67. HV_GPADL_RING
  68. };
  69. /* Single-page buffer */
  70. struct hv_page_buffer {
  71. u32 len;
  72. u32 offset;
  73. u64 pfn;
  74. };
  75. /* Multiple-page buffer */
  76. struct hv_multipage_buffer {
  77. /* Length and Offset determines the # of pfns in the array */
  78. u32 len;
  79. u32 offset;
  80. u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
  81. };
  82. /*
  83. * Multiple-page buffer array; the pfn array is variable size:
  84. * The number of entries in the PFN array is determined by
  85. * "len" and "offset".
  86. */
  87. struct hv_mpb_array {
  88. /* Length and Offset determines the # of pfns in the array */
  89. u32 len;
  90. u32 offset;
  91. u64 pfn_array[];
  92. };
  93. /* 0x18 includes the proprietary packet header */
  94. #define MAX_PAGE_BUFFER_PACKET (0x18 + \
  95. (sizeof(struct hv_page_buffer) * \
  96. MAX_PAGE_BUFFER_COUNT))
  97. #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
  98. sizeof(struct hv_multipage_buffer))
  99. #pragma pack(pop)
  100. struct hv_ring_buffer {
  101. /* Offset in bytes from the start of ring data below */
  102. u32 write_index;
  103. /* Offset in bytes from the start of ring data below */
  104. u32 read_index;
  105. u32 interrupt_mask;
  106. /*
  107. * WS2012/Win8 and later versions of Hyper-V implement interrupt
  108. * driven flow management. The feature bit feat_pending_send_sz
  109. * is set by the host on the host->guest ring buffer, and by the
  110. * guest on the guest->host ring buffer.
  111. *
  112. * The meaning of the feature bit is a bit complex in that it has
  113. * semantics that apply to both ring buffers. If the guest sets
  114. * the feature bit in the guest->host ring buffer, the guest is
  115. * telling the host that:
  116. * 1) It will set the pending_send_sz field in the guest->host ring
  117. * buffer when it is waiting for space to become available, and
  118. * 2) It will read the pending_send_sz field in the host->guest
  119. * ring buffer and interrupt the host when it frees enough space
  120. *
  121. * Similarly, if the host sets the feature bit in the host->guest
  122. * ring buffer, the host is telling the guest that:
  123. * 1) It will set the pending_send_sz field in the host->guest ring
  124. * buffer when it is waiting for space to become available, and
  125. * 2) It will read the pending_send_sz field in the guest->host
  126. * ring buffer and interrupt the guest when it frees enough space
  127. *
  128. * If either the guest or host does not set the feature bit that it
  129. * owns, that guest or host must do polling if it encounters a full
  130. * ring buffer, and not signal the other end with an interrupt.
  131. */
  132. u32 pending_send_sz;
  133. u32 reserved1[12];
  134. union {
  135. struct {
  136. u32 feat_pending_send_sz:1;
  137. };
  138. u32 value;
  139. } feature_bits;
  140. /* Pad it to PAGE_SIZE so that data starts on page boundary */
  141. u8 reserved2[PAGE_SIZE - 68];
  142. /*
  143. * Ring data starts here + RingDataStartOffset
  144. * !!! DO NOT place any fields below this !!!
  145. */
  146. u8 buffer[];
  147. } __packed;
  148. /* Calculate the proper size of a ringbuffer, it must be page-aligned */
  149. #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
  150. (payload_sz))
  151. struct hv_ring_buffer_info {
  152. struct hv_ring_buffer *ring_buffer;
  153. u32 ring_size; /* Include the shared header */
  154. struct reciprocal_value ring_size_div10_reciprocal;
  155. spinlock_t ring_lock;
  156. u32 ring_datasize; /* < ring_size */
  157. u32 priv_read_index;
  158. /*
  159. * The ring buffer mutex lock. This lock prevents the ring buffer from
  160. * being freed while the ring buffer is being accessed.
  161. */
  162. struct mutex ring_buffer_mutex;
  163. };
  164. static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
  165. {
  166. u32 read_loc, write_loc, dsize, read;
  167. dsize = rbi->ring_datasize;
  168. read_loc = rbi->ring_buffer->read_index;
  169. write_loc = READ_ONCE(rbi->ring_buffer->write_index);
  170. read = write_loc >= read_loc ? (write_loc - read_loc) :
  171. (dsize - read_loc) + write_loc;
  172. return read;
  173. }
  174. static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
  175. {
  176. u32 read_loc, write_loc, dsize, write;
  177. dsize = rbi->ring_datasize;
  178. read_loc = READ_ONCE(rbi->ring_buffer->read_index);
  179. write_loc = rbi->ring_buffer->write_index;
  180. write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
  181. read_loc - write_loc;
  182. return write;
  183. }
  184. static inline u32 hv_get_avail_to_write_percent(
  185. const struct hv_ring_buffer_info *rbi)
  186. {
  187. u32 avail_write = hv_get_bytes_to_write(rbi);
  188. return reciprocal_divide(
  189. (avail_write << 3) + (avail_write << 1),
  190. rbi->ring_size_div10_reciprocal);
  191. }
  192. /*
  193. * VMBUS version is 32 bit entity broken up into
  194. * two 16 bit quantities: major_number. minor_number.
  195. *
  196. * 0 . 13 (Windows Server 2008)
  197. * 1 . 1 (Windows 7)
  198. * 2 . 4 (Windows 8)
  199. * 3 . 0 (Windows 8 R2)
  200. * 4 . 0 (Windows 10)
  201. * 4 . 1 (Windows 10 RS3)
  202. * 5 . 0 (Newer Windows 10)
  203. * 5 . 1 (Windows 10 RS4)
  204. * 5 . 2 (Windows Server 2019, RS5)
  205. */
  206. #define VERSION_WS2008 ((0 << 16) | (13))
  207. #define VERSION_WIN7 ((1 << 16) | (1))
  208. #define VERSION_WIN8 ((2 << 16) | (4))
  209. #define VERSION_WIN8_1 ((3 << 16) | (0))
  210. #define VERSION_WIN10 ((4 << 16) | (0))
  211. #define VERSION_WIN10_V4_1 ((4 << 16) | (1))
  212. #define VERSION_WIN10_V5 ((5 << 16) | (0))
  213. #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
  214. #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
  215. /* Make maximum size of pipe payload of 16K */
  216. #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
  217. /* Define PipeMode values. */
  218. #define VMBUS_PIPE_TYPE_BYTE 0x00000000
  219. #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
  220. /* The size of the user defined data buffer for non-pipe offers. */
  221. #define MAX_USER_DEFINED_BYTES 120
  222. /* The size of the user defined data buffer for pipe offers. */
  223. #define MAX_PIPE_USER_DEFINED_BYTES 116
  224. /*
  225. * At the center of the Channel Management library is the Channel Offer. This
  226. * struct contains the fundamental information about an offer.
  227. */
  228. struct vmbus_channel_offer {
  229. guid_t if_type;
  230. guid_t if_instance;
  231. /*
  232. * These two fields are not currently used.
  233. */
  234. u64 reserved1;
  235. u64 reserved2;
  236. u16 chn_flags;
  237. u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
  238. union {
  239. /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
  240. struct {
  241. unsigned char user_def[MAX_USER_DEFINED_BYTES];
  242. } std;
  243. /*
  244. * Pipes:
  245. * The following sructure is an integrated pipe protocol, which
  246. * is implemented on top of standard user-defined data. Pipe
  247. * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
  248. * use.
  249. */
  250. struct {
  251. u32 pipe_mode;
  252. unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
  253. } pipe;
  254. } u;
  255. /*
  256. * The sub_channel_index is defined in Win8: a value of zero means a
  257. * primary channel and a value of non-zero means a sub-channel.
  258. *
  259. * Before Win8, the field is reserved, meaning it's always zero.
  260. */
  261. u16 sub_channel_index;
  262. u16 reserved3;
  263. } __packed;
  264. /* Server Flags */
  265. #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
  266. #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
  267. #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
  268. #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
  269. #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
  270. #define VMBUS_CHANNEL_PARENT_OFFER 0x200
  271. #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
  272. #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
  273. struct vmpacket_descriptor {
  274. u16 type;
  275. u16 offset8;
  276. u16 len8;
  277. u16 flags;
  278. u64 trans_id;
  279. } __packed;
  280. struct vmpacket_header {
  281. u32 prev_pkt_start_offset;
  282. struct vmpacket_descriptor descriptor;
  283. } __packed;
  284. struct vmtransfer_page_range {
  285. u32 byte_count;
  286. u32 byte_offset;
  287. } __packed;
  288. struct vmtransfer_page_packet_header {
  289. struct vmpacket_descriptor d;
  290. u16 xfer_pageset_id;
  291. u8 sender_owns_set;
  292. u8 reserved;
  293. u32 range_cnt;
  294. struct vmtransfer_page_range ranges[1];
  295. } __packed;
  296. struct vmgpadl_packet_header {
  297. struct vmpacket_descriptor d;
  298. u32 gpadl;
  299. u32 reserved;
  300. } __packed;
  301. struct vmadd_remove_transfer_page_set {
  302. struct vmpacket_descriptor d;
  303. u32 gpadl;
  304. u16 xfer_pageset_id;
  305. u16 reserved;
  306. } __packed;
  307. /*
  308. * This structure defines a range in guest physical space that can be made to
  309. * look virtually contiguous.
  310. */
  311. struct gpa_range {
  312. u32 byte_count;
  313. u32 byte_offset;
  314. u64 pfn_array[];
  315. };
  316. /*
  317. * This is the format for an Establish Gpadl packet, which contains a handle by
  318. * which this GPADL will be known and a set of GPA ranges associated with it.
  319. * This can be converted to a MDL by the guest OS. If there are multiple GPA
  320. * ranges, then the resulting MDL will be "chained," representing multiple VA
  321. * ranges.
  322. */
  323. struct vmestablish_gpadl {
  324. struct vmpacket_descriptor d;
  325. u32 gpadl;
  326. u32 range_cnt;
  327. struct gpa_range range[1];
  328. } __packed;
  329. /*
  330. * This is the format for a Teardown Gpadl packet, which indicates that the
  331. * GPADL handle in the Establish Gpadl packet will never be referenced again.
  332. */
  333. struct vmteardown_gpadl {
  334. struct vmpacket_descriptor d;
  335. u32 gpadl;
  336. u32 reserved; /* for alignment to a 8-byte boundary */
  337. } __packed;
  338. /*
  339. * This is the format for a GPA-Direct packet, which contains a set of GPA
  340. * ranges, in addition to commands and/or data.
  341. */
  342. struct vmdata_gpa_direct {
  343. struct vmpacket_descriptor d;
  344. u32 reserved;
  345. u32 range_cnt;
  346. struct gpa_range range[1];
  347. } __packed;
  348. /* This is the format for a Additional Data Packet. */
  349. struct vmadditional_data {
  350. struct vmpacket_descriptor d;
  351. u64 total_bytes;
  352. u32 offset;
  353. u32 byte_cnt;
  354. unsigned char data[1];
  355. } __packed;
  356. union vmpacket_largest_possible_header {
  357. struct vmpacket_descriptor simple_hdr;
  358. struct vmtransfer_page_packet_header xfer_page_hdr;
  359. struct vmgpadl_packet_header gpadl_hdr;
  360. struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
  361. struct vmestablish_gpadl establish_gpadl_hdr;
  362. struct vmteardown_gpadl teardown_gpadl_hdr;
  363. struct vmdata_gpa_direct data_gpa_direct_hdr;
  364. };
  365. #define VMPACKET_DATA_START_ADDRESS(__packet) \
  366. (void *)(((unsigned char *)__packet) + \
  367. ((struct vmpacket_descriptor)__packet)->offset8 * 8)
  368. #define VMPACKET_DATA_LENGTH(__packet) \
  369. ((((struct vmpacket_descriptor)__packet)->len8 - \
  370. ((struct vmpacket_descriptor)__packet)->offset8) * 8)
  371. #define VMPACKET_TRANSFER_MODE(__packet) \
  372. (((struct IMPACT)__packet)->type)
  373. enum vmbus_packet_type {
  374. VM_PKT_INVALID = 0x0,
  375. VM_PKT_SYNCH = 0x1,
  376. VM_PKT_ADD_XFER_PAGESET = 0x2,
  377. VM_PKT_RM_XFER_PAGESET = 0x3,
  378. VM_PKT_ESTABLISH_GPADL = 0x4,
  379. VM_PKT_TEARDOWN_GPADL = 0x5,
  380. VM_PKT_DATA_INBAND = 0x6,
  381. VM_PKT_DATA_USING_XFER_PAGES = 0x7,
  382. VM_PKT_DATA_USING_GPADL = 0x8,
  383. VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
  384. VM_PKT_CANCEL_REQUEST = 0xa,
  385. VM_PKT_COMP = 0xb,
  386. VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
  387. VM_PKT_ADDITIONAL_DATA = 0xd
  388. };
  389. #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
  390. /* Version 1 messages */
  391. enum vmbus_channel_message_type {
  392. CHANNELMSG_INVALID = 0,
  393. CHANNELMSG_OFFERCHANNEL = 1,
  394. CHANNELMSG_RESCIND_CHANNELOFFER = 2,
  395. CHANNELMSG_REQUESTOFFERS = 3,
  396. CHANNELMSG_ALLOFFERS_DELIVERED = 4,
  397. CHANNELMSG_OPENCHANNEL = 5,
  398. CHANNELMSG_OPENCHANNEL_RESULT = 6,
  399. CHANNELMSG_CLOSECHANNEL = 7,
  400. CHANNELMSG_GPADL_HEADER = 8,
  401. CHANNELMSG_GPADL_BODY = 9,
  402. CHANNELMSG_GPADL_CREATED = 10,
  403. CHANNELMSG_GPADL_TEARDOWN = 11,
  404. CHANNELMSG_GPADL_TORNDOWN = 12,
  405. CHANNELMSG_RELID_RELEASED = 13,
  406. CHANNELMSG_INITIATE_CONTACT = 14,
  407. CHANNELMSG_VERSION_RESPONSE = 15,
  408. CHANNELMSG_UNLOAD = 16,
  409. CHANNELMSG_UNLOAD_RESPONSE = 17,
  410. CHANNELMSG_18 = 18,
  411. CHANNELMSG_19 = 19,
  412. CHANNELMSG_20 = 20,
  413. CHANNELMSG_TL_CONNECT_REQUEST = 21,
  414. CHANNELMSG_MODIFYCHANNEL = 22,
  415. CHANNELMSG_TL_CONNECT_RESULT = 23,
  416. CHANNELMSG_COUNT
  417. };
  418. /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
  419. #define INVALID_RELID U32_MAX
  420. struct vmbus_channel_message_header {
  421. enum vmbus_channel_message_type msgtype;
  422. u32 padding;
  423. } __packed;
  424. /* Query VMBus Version parameters */
  425. struct vmbus_channel_query_vmbus_version {
  426. struct vmbus_channel_message_header header;
  427. u32 version;
  428. } __packed;
  429. /* VMBus Version Supported parameters */
  430. struct vmbus_channel_version_supported {
  431. struct vmbus_channel_message_header header;
  432. u8 version_supported;
  433. } __packed;
  434. /* Offer Channel parameters */
  435. struct vmbus_channel_offer_channel {
  436. struct vmbus_channel_message_header header;
  437. struct vmbus_channel_offer offer;
  438. u32 child_relid;
  439. u8 monitorid;
  440. /*
  441. * win7 and beyond splits this field into a bit field.
  442. */
  443. u8 monitor_allocated:1;
  444. u8 reserved:7;
  445. /*
  446. * These are new fields added in win7 and later.
  447. * Do not access these fields without checking the
  448. * negotiated protocol.
  449. *
  450. * If "is_dedicated_interrupt" is set, we must not set the
  451. * associated bit in the channel bitmap while sending the
  452. * interrupt to the host.
  453. *
  454. * connection_id is to be used in signaling the host.
  455. */
  456. u16 is_dedicated_interrupt:1;
  457. u16 reserved1:15;
  458. u32 connection_id;
  459. } __packed;
  460. /* Rescind Offer parameters */
  461. struct vmbus_channel_rescind_offer {
  462. struct vmbus_channel_message_header header;
  463. u32 child_relid;
  464. } __packed;
  465. static inline u32
  466. hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
  467. {
  468. return rbi->ring_buffer->pending_send_sz;
  469. }
  470. /*
  471. * Request Offer -- no parameters, SynIC message contains the partition ID
  472. * Set Snoop -- no parameters, SynIC message contains the partition ID
  473. * Clear Snoop -- no parameters, SynIC message contains the partition ID
  474. * All Offers Delivered -- no parameters, SynIC message contains the partition
  475. * ID
  476. * Flush Client -- no parameters, SynIC message contains the partition ID
  477. */
  478. /* Open Channel parameters */
  479. struct vmbus_channel_open_channel {
  480. struct vmbus_channel_message_header header;
  481. /* Identifies the specific VMBus channel that is being opened. */
  482. u32 child_relid;
  483. /* ID making a particular open request at a channel offer unique. */
  484. u32 openid;
  485. /* GPADL for the channel's ring buffer. */
  486. u32 ringbuffer_gpadlhandle;
  487. /*
  488. * Starting with win8, this field will be used to specify
  489. * the target virtual processor on which to deliver the interrupt for
  490. * the host to guest communication.
  491. * Prior to win8, incoming channel interrupts would only
  492. * be delivered on cpu 0. Setting this value to 0 would
  493. * preserve the earlier behavior.
  494. */
  495. u32 target_vp;
  496. /*
  497. * The upstream ring buffer begins at offset zero in the memory
  498. * described by RingBufferGpadlHandle. The downstream ring buffer
  499. * follows it at this offset (in pages).
  500. */
  501. u32 downstream_ringbuffer_pageoffset;
  502. /* User-specific data to be passed along to the server endpoint. */
  503. unsigned char userdata[MAX_USER_DEFINED_BYTES];
  504. } __packed;
  505. /* Open Channel Result parameters */
  506. struct vmbus_channel_open_result {
  507. struct vmbus_channel_message_header header;
  508. u32 child_relid;
  509. u32 openid;
  510. u32 status;
  511. } __packed;
  512. /* Close channel parameters; */
  513. struct vmbus_channel_close_channel {
  514. struct vmbus_channel_message_header header;
  515. u32 child_relid;
  516. } __packed;
  517. /* Channel Message GPADL */
  518. #define GPADL_TYPE_RING_BUFFER 1
  519. #define GPADL_TYPE_SERVER_SAVE_AREA 2
  520. #define GPADL_TYPE_TRANSACTION 8
  521. /*
  522. * The number of PFNs in a GPADL message is defined by the number of
  523. * pages that would be spanned by ByteCount and ByteOffset. If the
  524. * implied number of PFNs won't fit in this packet, there will be a
  525. * follow-up packet that contains more.
  526. */
  527. struct vmbus_channel_gpadl_header {
  528. struct vmbus_channel_message_header header;
  529. u32 child_relid;
  530. u32 gpadl;
  531. u16 range_buflen;
  532. u16 rangecount;
  533. struct gpa_range range[];
  534. } __packed;
  535. /* This is the followup packet that contains more PFNs. */
  536. struct vmbus_channel_gpadl_body {
  537. struct vmbus_channel_message_header header;
  538. u32 msgnumber;
  539. u32 gpadl;
  540. u64 pfn[];
  541. } __packed;
  542. struct vmbus_channel_gpadl_created {
  543. struct vmbus_channel_message_header header;
  544. u32 child_relid;
  545. u32 gpadl;
  546. u32 creation_status;
  547. } __packed;
  548. struct vmbus_channel_gpadl_teardown {
  549. struct vmbus_channel_message_header header;
  550. u32 child_relid;
  551. u32 gpadl;
  552. } __packed;
  553. struct vmbus_channel_gpadl_torndown {
  554. struct vmbus_channel_message_header header;
  555. u32 gpadl;
  556. } __packed;
  557. struct vmbus_channel_relid_released {
  558. struct vmbus_channel_message_header header;
  559. u32 child_relid;
  560. } __packed;
  561. struct vmbus_channel_initiate_contact {
  562. struct vmbus_channel_message_header header;
  563. u32 vmbus_version_requested;
  564. u32 target_vcpu; /* The VCPU the host should respond to */
  565. union {
  566. u64 interrupt_page;
  567. struct {
  568. u8 msg_sint;
  569. u8 padding1[3];
  570. u32 padding2;
  571. };
  572. };
  573. u64 monitor_page1;
  574. u64 monitor_page2;
  575. } __packed;
  576. /* Hyper-V socket: guest's connect()-ing to host */
  577. struct vmbus_channel_tl_connect_request {
  578. struct vmbus_channel_message_header header;
  579. guid_t guest_endpoint_id;
  580. guid_t host_service_id;
  581. } __packed;
  582. /* Modify Channel parameters, cf. vmbus_send_modifychannel() */
  583. struct vmbus_channel_modifychannel {
  584. struct vmbus_channel_message_header header;
  585. u32 child_relid;
  586. u32 target_vp;
  587. } __packed;
  588. struct vmbus_channel_version_response {
  589. struct vmbus_channel_message_header header;
  590. u8 version_supported;
  591. u8 connection_state;
  592. u16 padding;
  593. /*
  594. * On new hosts that support VMBus protocol 5.0, we must use
  595. * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
  596. * and for subsequent messages, we must use the Message Connection ID
  597. * field in the host-returned Version Response Message.
  598. *
  599. * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
  600. */
  601. u32 msg_conn_id;
  602. } __packed;
  603. enum vmbus_channel_state {
  604. CHANNEL_OFFER_STATE,
  605. CHANNEL_OPENING_STATE,
  606. CHANNEL_OPEN_STATE,
  607. CHANNEL_OPENED_STATE,
  608. };
  609. /*
  610. * Represents each channel msg on the vmbus connection This is a
  611. * variable-size data structure depending on the msg type itself
  612. */
  613. struct vmbus_channel_msginfo {
  614. /* Bookkeeping stuff */
  615. struct list_head msglistentry;
  616. /* So far, this is only used to handle gpadl body message */
  617. struct list_head submsglist;
  618. /* Synchronize the request/response if needed */
  619. struct completion waitevent;
  620. struct vmbus_channel *waiting_channel;
  621. union {
  622. struct vmbus_channel_version_supported version_supported;
  623. struct vmbus_channel_open_result open_result;
  624. struct vmbus_channel_gpadl_torndown gpadl_torndown;
  625. struct vmbus_channel_gpadl_created gpadl_created;
  626. struct vmbus_channel_version_response version_response;
  627. } response;
  628. u32 msgsize;
  629. /*
  630. * The channel message that goes out on the "wire".
  631. * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
  632. */
  633. unsigned char msg[];
  634. };
  635. struct vmbus_close_msg {
  636. struct vmbus_channel_msginfo info;
  637. struct vmbus_channel_close_channel msg;
  638. };
  639. /* Define connection identifier type. */
  640. union hv_connection_id {
  641. u32 asu32;
  642. struct {
  643. u32 id:24;
  644. u32 reserved:8;
  645. } u;
  646. };
  647. enum vmbus_device_type {
  648. HV_IDE = 0,
  649. HV_SCSI,
  650. HV_FC,
  651. HV_NIC,
  652. HV_ND,
  653. HV_PCIE,
  654. HV_FB,
  655. HV_KBD,
  656. HV_MOUSE,
  657. HV_KVP,
  658. HV_TS,
  659. HV_HB,
  660. HV_SHUTDOWN,
  661. HV_FCOPY,
  662. HV_BACKUP,
  663. HV_DM,
  664. HV_UNKNOWN,
  665. };
  666. struct vmbus_device {
  667. u16 dev_type;
  668. guid_t guid;
  669. bool perf_device;
  670. };
  671. struct vmbus_channel {
  672. struct list_head listentry;
  673. struct hv_device *device_obj;
  674. enum vmbus_channel_state state;
  675. struct vmbus_channel_offer_channel offermsg;
  676. /*
  677. * These are based on the OfferMsg.MonitorId.
  678. * Save it here for easy access.
  679. */
  680. u8 monitor_grp;
  681. u8 monitor_bit;
  682. bool rescind; /* got rescind msg */
  683. struct completion rescind_event;
  684. u32 ringbuffer_gpadlhandle;
  685. /* Allocated memory for ring buffer */
  686. struct page *ringbuffer_page;
  687. u32 ringbuffer_pagecount;
  688. u32 ringbuffer_send_offset;
  689. struct hv_ring_buffer_info outbound; /* send to parent */
  690. struct hv_ring_buffer_info inbound; /* receive from parent */
  691. struct vmbus_close_msg close_msg;
  692. /* Statistics */
  693. u64 interrupts; /* Host to Guest interrupts */
  694. u64 sig_events; /* Guest to Host events */
  695. /*
  696. * Guest to host interrupts caused by the outbound ring buffer changing
  697. * from empty to not empty.
  698. */
  699. u64 intr_out_empty;
  700. /*
  701. * Indicates that a full outbound ring buffer was encountered. The flag
  702. * is set to true when a full outbound ring buffer is encountered and
  703. * set to false when a write to the outbound ring buffer is completed.
  704. */
  705. bool out_full_flag;
  706. /* Channel callback's invoked in softirq context */
  707. struct tasklet_struct callback_event;
  708. void (*onchannel_callback)(void *context);
  709. void *channel_callback_context;
  710. void (*change_target_cpu_callback)(struct vmbus_channel *channel,
  711. u32 old, u32 new);
  712. /*
  713. * Synchronize channel scheduling and channel removal; see the inline
  714. * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
  715. */
  716. spinlock_t sched_lock;
  717. /*
  718. * A channel can be marked for one of three modes of reading:
  719. * BATCHED - callback called from taslket and should read
  720. * channel until empty. Interrupts from the host
  721. * are masked while read is in process (default).
  722. * DIRECT - callback called from tasklet (softirq).
  723. * ISR - callback called in interrupt context and must
  724. * invoke its own deferred processing.
  725. * Host interrupts are disabled and must be re-enabled
  726. * when ring is empty.
  727. */
  728. enum hv_callback_mode {
  729. HV_CALL_BATCHED,
  730. HV_CALL_DIRECT,
  731. HV_CALL_ISR
  732. } callback_mode;
  733. bool is_dedicated_interrupt;
  734. u64 sig_event;
  735. /*
  736. * Starting with win8, this field will be used to specify the
  737. * target CPU on which to deliver the interrupt for the host
  738. * to guest communication.
  739. *
  740. * Prior to win8, incoming channel interrupts would only be
  741. * delivered on CPU 0. Setting this value to 0 would preserve
  742. * the earlier behavior.
  743. */
  744. u32 target_cpu;
  745. /*
  746. * Support for sub-channels. For high performance devices,
  747. * it will be useful to have multiple sub-channels to support
  748. * a scalable communication infrastructure with the host.
  749. * The support for sub-channels is implemented as an extention
  750. * to the current infrastructure.
  751. * The initial offer is considered the primary channel and this
  752. * offer message will indicate if the host supports sub-channels.
  753. * The guest is free to ask for sub-channels to be offerred and can
  754. * open these sub-channels as a normal "primary" channel. However,
  755. * all sub-channels will have the same type and instance guids as the
  756. * primary channel. Requests sent on a given channel will result in a
  757. * response on the same channel.
  758. */
  759. /*
  760. * Sub-channel creation callback. This callback will be called in
  761. * process context when a sub-channel offer is received from the host.
  762. * The guest can open the sub-channel in the context of this callback.
  763. */
  764. void (*sc_creation_callback)(struct vmbus_channel *new_sc);
  765. /*
  766. * Channel rescind callback. Some channels (the hvsock ones), need to
  767. * register a callback which is invoked in vmbus_onoffer_rescind().
  768. */
  769. void (*chn_rescind_callback)(struct vmbus_channel *channel);
  770. /*
  771. * All Sub-channels of a primary channel are linked here.
  772. */
  773. struct list_head sc_list;
  774. /*
  775. * The primary channel this sub-channel belongs to.
  776. * This will be NULL for the primary channel.
  777. */
  778. struct vmbus_channel *primary_channel;
  779. /*
  780. * Support per-channel state for use by vmbus drivers.
  781. */
  782. void *per_channel_state;
  783. /*
  784. * Defer freeing channel until after all cpu's have
  785. * gone through grace period.
  786. */
  787. struct rcu_head rcu;
  788. /*
  789. * For sysfs per-channel properties.
  790. */
  791. struct kobject kobj;
  792. /*
  793. * For performance critical channels (storage, networking
  794. * etc,), Hyper-V has a mechanism to enhance the throughput
  795. * at the expense of latency:
  796. * When the host is to be signaled, we just set a bit in a shared page
  797. * and this bit will be inspected by the hypervisor within a certain
  798. * window and if the bit is set, the host will be signaled. The window
  799. * of time is the monitor latency - currently around 100 usecs. This
  800. * mechanism improves throughput by:
  801. *
  802. * A) Making the host more efficient - each time it wakes up,
  803. * potentially it will process morev number of packets. The
  804. * monitor latency allows a batch to build up.
  805. * B) By deferring the hypercall to signal, we will also minimize
  806. * the interrupts.
  807. *
  808. * Clearly, these optimizations improve throughput at the expense of
  809. * latency. Furthermore, since the channel is shared for both
  810. * control and data messages, control messages currently suffer
  811. * unnecessary latency adversley impacting performance and boot
  812. * time. To fix this issue, permit tagging the channel as being
  813. * in "low latency" mode. In this mode, we will bypass the monitor
  814. * mechanism.
  815. */
  816. bool low_latency;
  817. bool probe_done;
  818. /*
  819. * Cache the device ID here for easy access; this is useful, in
  820. * particular, in situations where the channel's device_obj has
  821. * not been allocated/initialized yet.
  822. */
  823. u16 device_id;
  824. /*
  825. * We must offload the handling of the primary/sub channels
  826. * from the single-threaded vmbus_connection.work_queue to
  827. * two different workqueue, otherwise we can block
  828. * vmbus_connection.work_queue and hang: see vmbus_process_offer().
  829. */
  830. struct work_struct add_channel_work;
  831. /*
  832. * Guest to host interrupts caused by the inbound ring buffer changing
  833. * from full to not full while a packet is waiting.
  834. */
  835. u64 intr_in_full;
  836. /*
  837. * The total number of write operations that encountered a full
  838. * outbound ring buffer.
  839. */
  840. u64 out_full_total;
  841. /*
  842. * The number of write operations that were the first to encounter a
  843. * full outbound ring buffer.
  844. */
  845. u64 out_full_first;
  846. /* enabling/disabling fuzz testing on the channel (default is false)*/
  847. bool fuzz_testing_state;
  848. /*
  849. * Interrupt delay will delay the guest from emptying the ring buffer
  850. * for a specific amount of time. The delay is in microseconds and will
  851. * be between 1 to a maximum of 1000, its default is 0 (no delay).
  852. * The Message delay will delay guest reading on a per message basis
  853. * in microseconds between 1 to 1000 with the default being 0
  854. * (no delay).
  855. */
  856. u32 fuzz_testing_interrupt_delay;
  857. u32 fuzz_testing_message_delay;
  858. };
  859. static inline bool is_hvsock_channel(const struct vmbus_channel *c)
  860. {
  861. return !!(c->offermsg.offer.chn_flags &
  862. VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
  863. }
  864. static inline bool is_sub_channel(const struct vmbus_channel *c)
  865. {
  866. return c->offermsg.offer.sub_channel_index != 0;
  867. }
  868. static inline void set_channel_read_mode(struct vmbus_channel *c,
  869. enum hv_callback_mode mode)
  870. {
  871. c->callback_mode = mode;
  872. }
  873. static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
  874. {
  875. c->per_channel_state = s;
  876. }
  877. static inline void *get_per_channel_state(struct vmbus_channel *c)
  878. {
  879. return c->per_channel_state;
  880. }
  881. static inline void set_channel_pending_send_size(struct vmbus_channel *c,
  882. u32 size)
  883. {
  884. unsigned long flags;
  885. if (size) {
  886. spin_lock_irqsave(&c->outbound.ring_lock, flags);
  887. ++c->out_full_total;
  888. if (!c->out_full_flag) {
  889. ++c->out_full_first;
  890. c->out_full_flag = true;
  891. }
  892. spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
  893. } else {
  894. c->out_full_flag = false;
  895. }
  896. c->outbound.ring_buffer->pending_send_sz = size;
  897. }
  898. static inline void set_low_latency_mode(struct vmbus_channel *c)
  899. {
  900. c->low_latency = true;
  901. }
  902. static inline void clear_low_latency_mode(struct vmbus_channel *c)
  903. {
  904. c->low_latency = false;
  905. }
  906. void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
  907. int vmbus_request_offers(void);
  908. /*
  909. * APIs for managing sub-channels.
  910. */
  911. void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
  912. void (*sc_cr_cb)(struct vmbus_channel *new_sc));
  913. void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
  914. void (*chn_rescind_cb)(struct vmbus_channel *));
  915. /*
  916. * Check if sub-channels have already been offerred. This API will be useful
  917. * when the driver is unloaded after establishing sub-channels. In this case,
  918. * when the driver is re-loaded, the driver would have to check if the
  919. * subchannels have already been established before attempting to request
  920. * the creation of sub-channels.
  921. * This function returns TRUE to indicate that subchannels have already been
  922. * created.
  923. * This function should be invoked after setting the callback function for
  924. * sub-channel creation.
  925. */
  926. bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
  927. /* The format must be the same as struct vmdata_gpa_direct */
  928. struct vmbus_channel_packet_page_buffer {
  929. u16 type;
  930. u16 dataoffset8;
  931. u16 length8;
  932. u16 flags;
  933. u64 transactionid;
  934. u32 reserved;
  935. u32 rangecount;
  936. struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
  937. } __packed;
  938. /* The format must be the same as struct vmdata_gpa_direct */
  939. struct vmbus_channel_packet_multipage_buffer {
  940. u16 type;
  941. u16 dataoffset8;
  942. u16 length8;
  943. u16 flags;
  944. u64 transactionid;
  945. u32 reserved;
  946. u32 rangecount; /* Always 1 in this case */
  947. struct hv_multipage_buffer range;
  948. } __packed;
  949. /* The format must be the same as struct vmdata_gpa_direct */
  950. struct vmbus_packet_mpb_array {
  951. u16 type;
  952. u16 dataoffset8;
  953. u16 length8;
  954. u16 flags;
  955. u64 transactionid;
  956. u32 reserved;
  957. u32 rangecount; /* Always 1 in this case */
  958. struct hv_mpb_array range;
  959. } __packed;
  960. int vmbus_alloc_ring(struct vmbus_channel *channel,
  961. u32 send_size, u32 recv_size);
  962. void vmbus_free_ring(struct vmbus_channel *channel);
  963. int vmbus_connect_ring(struct vmbus_channel *channel,
  964. void (*onchannel_callback)(void *context),
  965. void *context);
  966. int vmbus_disconnect_ring(struct vmbus_channel *channel);
  967. extern int vmbus_open(struct vmbus_channel *channel,
  968. u32 send_ringbuffersize,
  969. u32 recv_ringbuffersize,
  970. void *userdata,
  971. u32 userdatalen,
  972. void (*onchannel_callback)(void *context),
  973. void *context);
  974. extern void vmbus_close(struct vmbus_channel *channel);
  975. extern int vmbus_sendpacket(struct vmbus_channel *channel,
  976. void *buffer,
  977. u32 bufferLen,
  978. u64 requestid,
  979. enum vmbus_packet_type type,
  980. u32 flags);
  981. extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
  982. struct hv_page_buffer pagebuffers[],
  983. u32 pagecount,
  984. void *buffer,
  985. u32 bufferlen,
  986. u64 requestid);
  987. extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
  988. struct vmbus_packet_mpb_array *mpb,
  989. u32 desc_size,
  990. void *buffer,
  991. u32 bufferlen,
  992. u64 requestid);
  993. extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
  994. void *kbuffer,
  995. u32 size,
  996. u32 *gpadl_handle);
  997. extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
  998. u32 gpadl_handle);
  999. void vmbus_reset_channel_cb(struct vmbus_channel *channel);
  1000. extern int vmbus_recvpacket(struct vmbus_channel *channel,
  1001. void *buffer,
  1002. u32 bufferlen,
  1003. u32 *buffer_actual_len,
  1004. u64 *requestid);
  1005. extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
  1006. void *buffer,
  1007. u32 bufferlen,
  1008. u32 *buffer_actual_len,
  1009. u64 *requestid);
  1010. extern void vmbus_ontimer(unsigned long data);
  1011. /* Base driver object */
  1012. struct hv_driver {
  1013. const char *name;
  1014. /*
  1015. * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
  1016. * channel flag, actually doesn't mean a synthetic device because the
  1017. * offer's if_type/if_instance can change for every new hvsock
  1018. * connection.
  1019. *
  1020. * However, to facilitate the notification of new-offer/rescind-offer
  1021. * from vmbus driver to hvsock driver, we can handle hvsock offer as
  1022. * a special vmbus device, and hence we need the below flag to
  1023. * indicate if the driver is the hvsock driver or not: we need to
  1024. * specially treat the hvosck offer & driver in vmbus_match().
  1025. */
  1026. bool hvsock;
  1027. /* the device type supported by this driver */
  1028. guid_t dev_type;
  1029. const struct hv_vmbus_device_id *id_table;
  1030. struct device_driver driver;
  1031. /* dynamic device GUID's */
  1032. struct {
  1033. spinlock_t lock;
  1034. struct list_head list;
  1035. } dynids;
  1036. int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
  1037. int (*remove)(struct hv_device *);
  1038. void (*shutdown)(struct hv_device *);
  1039. int (*suspend)(struct hv_device *);
  1040. int (*resume)(struct hv_device *);
  1041. };
  1042. /* Base device object */
  1043. struct hv_device {
  1044. /* the device type id of this device */
  1045. guid_t dev_type;
  1046. /* the device instance id of this device */
  1047. guid_t dev_instance;
  1048. u16 vendor_id;
  1049. u16 device_id;
  1050. struct device device;
  1051. char *driver_override; /* Driver name to force a match */
  1052. struct vmbus_channel *channel;
  1053. struct kset *channels_kset;
  1054. /* place holder to keep track of the dir for hv device in debugfs */
  1055. struct dentry *debug_dir;
  1056. };
  1057. static inline struct hv_device *device_to_hv_device(struct device *d)
  1058. {
  1059. return container_of(d, struct hv_device, device);
  1060. }
  1061. static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
  1062. {
  1063. return container_of(d, struct hv_driver, driver);
  1064. }
  1065. static inline void hv_set_drvdata(struct hv_device *dev, void *data)
  1066. {
  1067. dev_set_drvdata(&dev->device, data);
  1068. }
  1069. static inline void *hv_get_drvdata(struct hv_device *dev)
  1070. {
  1071. return dev_get_drvdata(&dev->device);
  1072. }
  1073. struct hv_ring_buffer_debug_info {
  1074. u32 current_interrupt_mask;
  1075. u32 current_read_index;
  1076. u32 current_write_index;
  1077. u32 bytes_avail_toread;
  1078. u32 bytes_avail_towrite;
  1079. };
  1080. int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
  1081. struct hv_ring_buffer_debug_info *debug_info);
  1082. /* Vmbus interface */
  1083. #define vmbus_driver_register(driver) \
  1084. __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
  1085. int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
  1086. struct module *owner,
  1087. const char *mod_name);
  1088. void vmbus_driver_unregister(struct hv_driver *hv_driver);
  1089. void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
  1090. int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
  1091. resource_size_t min, resource_size_t max,
  1092. resource_size_t size, resource_size_t align,
  1093. bool fb_overlap_ok);
  1094. void vmbus_free_mmio(resource_size_t start, resource_size_t size);
  1095. /*
  1096. * GUID definitions of various offer types - services offered to the guest.
  1097. */
  1098. /*
  1099. * Network GUID
  1100. * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
  1101. */
  1102. #define HV_NIC_GUID \
  1103. .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
  1104. 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
  1105. /*
  1106. * IDE GUID
  1107. * {32412632-86cb-44a2-9b5c-50d1417354f5}
  1108. */
  1109. #define HV_IDE_GUID \
  1110. .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
  1111. 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
  1112. /*
  1113. * SCSI GUID
  1114. * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
  1115. */
  1116. #define HV_SCSI_GUID \
  1117. .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
  1118. 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
  1119. /*
  1120. * Shutdown GUID
  1121. * {0e0b6031-5213-4934-818b-38d90ced39db}
  1122. */
  1123. #define HV_SHUTDOWN_GUID \
  1124. .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
  1125. 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
  1126. /*
  1127. * Time Synch GUID
  1128. * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
  1129. */
  1130. #define HV_TS_GUID \
  1131. .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
  1132. 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
  1133. /*
  1134. * Heartbeat GUID
  1135. * {57164f39-9115-4e78-ab55-382f3bd5422d}
  1136. */
  1137. #define HV_HEART_BEAT_GUID \
  1138. .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
  1139. 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
  1140. /*
  1141. * KVP GUID
  1142. * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
  1143. */
  1144. #define HV_KVP_GUID \
  1145. .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
  1146. 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
  1147. /*
  1148. * Dynamic memory GUID
  1149. * {525074dc-8985-46e2-8057-a307dc18a502}
  1150. */
  1151. #define HV_DM_GUID \
  1152. .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
  1153. 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
  1154. /*
  1155. * Mouse GUID
  1156. * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
  1157. */
  1158. #define HV_MOUSE_GUID \
  1159. .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
  1160. 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
  1161. /*
  1162. * Keyboard GUID
  1163. * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
  1164. */
  1165. #define HV_KBD_GUID \
  1166. .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
  1167. 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
  1168. /*
  1169. * VSS (Backup/Restore) GUID
  1170. */
  1171. #define HV_VSS_GUID \
  1172. .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
  1173. 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
  1174. /*
  1175. * Synthetic Video GUID
  1176. * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
  1177. */
  1178. #define HV_SYNTHVID_GUID \
  1179. .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
  1180. 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
  1181. /*
  1182. * Synthetic FC GUID
  1183. * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
  1184. */
  1185. #define HV_SYNTHFC_GUID \
  1186. .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
  1187. 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
  1188. /*
  1189. * Guest File Copy Service
  1190. * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
  1191. */
  1192. #define HV_FCOPY_GUID \
  1193. .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
  1194. 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
  1195. /*
  1196. * NetworkDirect. This is the guest RDMA service.
  1197. * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
  1198. */
  1199. #define HV_ND_GUID \
  1200. .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
  1201. 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
  1202. /*
  1203. * PCI Express Pass Through
  1204. * {44C4F61D-4444-4400-9D52-802E27EDE19F}
  1205. */
  1206. #define HV_PCIE_GUID \
  1207. .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
  1208. 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
  1209. /*
  1210. * Linux doesn't support the 3 devices: the first two are for
  1211. * Automatic Virtual Machine Activation, and the third is for
  1212. * Remote Desktop Virtualization.
  1213. * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
  1214. * {3375baf4-9e15-4b30-b765-67acb10d607b}
  1215. * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
  1216. */
  1217. #define HV_AVMA1_GUID \
  1218. .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
  1219. 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
  1220. #define HV_AVMA2_GUID \
  1221. .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
  1222. 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
  1223. #define HV_RDV_GUID \
  1224. .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
  1225. 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
  1226. /*
  1227. * Common header for Hyper-V ICs
  1228. */
  1229. #define ICMSGTYPE_NEGOTIATE 0
  1230. #define ICMSGTYPE_HEARTBEAT 1
  1231. #define ICMSGTYPE_KVPEXCHANGE 2
  1232. #define ICMSGTYPE_SHUTDOWN 3
  1233. #define ICMSGTYPE_TIMESYNC 4
  1234. #define ICMSGTYPE_VSS 5
  1235. #define ICMSGHDRFLAG_TRANSACTION 1
  1236. #define ICMSGHDRFLAG_REQUEST 2
  1237. #define ICMSGHDRFLAG_RESPONSE 4
  1238. /*
  1239. * While we want to handle util services as regular devices,
  1240. * there is only one instance of each of these services; so
  1241. * we statically allocate the service specific state.
  1242. */
  1243. struct hv_util_service {
  1244. u8 *recv_buffer;
  1245. void *channel;
  1246. void (*util_cb)(void *);
  1247. int (*util_init)(struct hv_util_service *);
  1248. void (*util_deinit)(void);
  1249. int (*util_pre_suspend)(void);
  1250. int (*util_pre_resume)(void);
  1251. };
  1252. struct vmbuspipe_hdr {
  1253. u32 flags;
  1254. u32 msgsize;
  1255. } __packed;
  1256. struct ic_version {
  1257. u16 major;
  1258. u16 minor;
  1259. } __packed;
  1260. struct icmsg_hdr {
  1261. struct ic_version icverframe;
  1262. u16 icmsgtype;
  1263. struct ic_version icvermsg;
  1264. u16 icmsgsize;
  1265. u32 status;
  1266. u8 ictransaction_id;
  1267. u8 icflags;
  1268. u8 reserved[2];
  1269. } __packed;
  1270. struct icmsg_negotiate {
  1271. u16 icframe_vercnt;
  1272. u16 icmsg_vercnt;
  1273. u32 reserved;
  1274. struct ic_version icversion_data[1]; /* any size array */
  1275. } __packed;
  1276. struct shutdown_msg_data {
  1277. u32 reason_code;
  1278. u32 timeout_seconds;
  1279. u32 flags;
  1280. u8 display_message[2048];
  1281. } __packed;
  1282. struct heartbeat_msg_data {
  1283. u64 seq_num;
  1284. u32 reserved[8];
  1285. } __packed;
  1286. /* Time Sync IC defs */
  1287. #define ICTIMESYNCFLAG_PROBE 0
  1288. #define ICTIMESYNCFLAG_SYNC 1
  1289. #define ICTIMESYNCFLAG_SAMPLE 2
  1290. #ifdef __x86_64__
  1291. #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
  1292. #else
  1293. #define WLTIMEDELTA 116444736000000000LL
  1294. #endif
  1295. struct ictimesync_data {
  1296. u64 parenttime;
  1297. u64 childtime;
  1298. u64 roundtriptime;
  1299. u8 flags;
  1300. } __packed;
  1301. struct ictimesync_ref_data {
  1302. u64 parenttime;
  1303. u64 vmreferencetime;
  1304. u8 flags;
  1305. char leapflags;
  1306. char stratum;
  1307. u8 reserved[3];
  1308. } __packed;
  1309. struct hyperv_service_callback {
  1310. u8 msg_type;
  1311. char *log_msg;
  1312. guid_t data;
  1313. struct vmbus_channel *channel;
  1314. void (*callback)(void *context);
  1315. };
  1316. #define MAX_SRV_VER 0x7ffffff
  1317. extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
  1318. const int *fw_version, int fw_vercnt,
  1319. const int *srv_version, int srv_vercnt,
  1320. int *nego_fw_version, int *nego_srv_version);
  1321. void hv_process_channel_removal(struct vmbus_channel *channel);
  1322. void vmbus_setevent(struct vmbus_channel *channel);
  1323. /*
  1324. * Negotiated version with the Host.
  1325. */
  1326. extern __u32 vmbus_proto_version;
  1327. int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
  1328. const guid_t *shv_host_servie_id);
  1329. int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
  1330. void vmbus_set_event(struct vmbus_channel *channel);
  1331. /* Get the start of the ring buffer. */
  1332. static inline void *
  1333. hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
  1334. {
  1335. return ring_info->ring_buffer->buffer;
  1336. }
  1337. /*
  1338. * Mask off host interrupt callback notifications
  1339. */
  1340. static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
  1341. {
  1342. rbi->ring_buffer->interrupt_mask = 1;
  1343. /* make sure mask update is not reordered */
  1344. virt_mb();
  1345. }
  1346. /*
  1347. * Re-enable host callback and return number of outstanding bytes
  1348. */
  1349. static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
  1350. {
  1351. rbi->ring_buffer->interrupt_mask = 0;
  1352. /* make sure mask update is not reordered */
  1353. virt_mb();
  1354. /*
  1355. * Now check to see if the ring buffer is still empty.
  1356. * If it is not, we raced and we need to process new
  1357. * incoming messages.
  1358. */
  1359. return hv_get_bytes_to_read(rbi);
  1360. }
  1361. /*
  1362. * An API to support in-place processing of incoming VMBUS packets.
  1363. */
  1364. /* Get data payload associated with descriptor */
  1365. static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
  1366. {
  1367. return (void *)((unsigned long)desc + (desc->offset8 << 3));
  1368. }
  1369. /* Get data size associated with descriptor */
  1370. static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
  1371. {
  1372. return (desc->len8 << 3) - (desc->offset8 << 3);
  1373. }
  1374. struct vmpacket_descriptor *
  1375. hv_pkt_iter_first(struct vmbus_channel *channel);
  1376. struct vmpacket_descriptor *
  1377. __hv_pkt_iter_next(struct vmbus_channel *channel,
  1378. const struct vmpacket_descriptor *pkt);
  1379. void hv_pkt_iter_close(struct vmbus_channel *channel);
  1380. /*
  1381. * Get next packet descriptor from iterator
  1382. * If at end of list, return NULL and update host.
  1383. */
  1384. static inline struct vmpacket_descriptor *
  1385. hv_pkt_iter_next(struct vmbus_channel *channel,
  1386. const struct vmpacket_descriptor *pkt)
  1387. {
  1388. struct vmpacket_descriptor *nxt;
  1389. nxt = __hv_pkt_iter_next(channel, pkt);
  1390. if (!nxt)
  1391. hv_pkt_iter_close(channel);
  1392. return nxt;
  1393. }
  1394. #define foreach_vmbus_pkt(pkt, channel) \
  1395. for (pkt = hv_pkt_iter_first(channel); pkt; \
  1396. pkt = hv_pkt_iter_next(channel, pkt))
  1397. /*
  1398. * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
  1399. * sends requests to read and write blocks. Each block must be 128 bytes or
  1400. * smaller. Optionally, the VF driver can register a callback function which
  1401. * will be invoked when the host says that one or more of the first 64 block
  1402. * IDs is "invalid" which means that the VF driver should reread them.
  1403. */
  1404. #define HV_CONFIG_BLOCK_SIZE_MAX 128
  1405. int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
  1406. unsigned int block_id, unsigned int *bytes_returned);
  1407. int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
  1408. unsigned int block_id);
  1409. int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
  1410. void (*block_invalidate)(void *context,
  1411. u64 block_mask));
  1412. struct hyperv_pci_block_ops {
  1413. int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
  1414. unsigned int block_id, unsigned int *bytes_returned);
  1415. int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
  1416. unsigned int block_id);
  1417. int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
  1418. void (*block_invalidate)(void *context,
  1419. u64 block_mask));
  1420. };
  1421. extern struct hyperv_pci_block_ops hvpci_block_ops;
  1422. static inline unsigned long virt_to_hvpfn(void *addr)
  1423. {
  1424. phys_addr_t paddr;
  1425. if (is_vmalloc_addr(addr))
  1426. paddr = page_to_phys(vmalloc_to_page(addr)) +
  1427. offset_in_page(addr);
  1428. else
  1429. paddr = __pa(addr);
  1430. return paddr >> HV_HYP_PAGE_SHIFT;
  1431. }
  1432. #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
  1433. #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
  1434. #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
  1435. #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
  1436. #endif /* _HYPERV_H */