vmbus_drv.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2009, Microsoft Corporation.
  4. *
  5. * Authors:
  6. * Haiyang Zhang <haiyangz@microsoft.com>
  7. * Hank Janssen <hjanssen@microsoft.com>
  8. * K. Y. Srinivasan <kys@microsoft.com>
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/device.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/sysctl.h>
  16. #include <linux/slab.h>
  17. #include <linux/acpi.h>
  18. #include <linux/completion.h>
  19. #include <linux/hyperv.h>
  20. #include <linux/kernel_stat.h>
  21. #include <linux/clockchips.h>
  22. #include <linux/cpu.h>
  23. #include <linux/sched/task_stack.h>
  24. #include <linux/delay.h>
  25. #include <linux/notifier.h>
  26. #include <linux/ptrace.h>
  27. #include <linux/screen_info.h>
  28. #include <linux/kdebug.h>
  29. #include <linux/efi.h>
  30. #include <linux/random.h>
  31. #include <linux/kernel.h>
  32. #include <linux/syscore_ops.h>
  33. #include <clocksource/hyperv_timer.h>
  34. #include "hyperv_vmbus.h"
  35. struct vmbus_dynid {
  36. struct list_head node;
  37. struct hv_vmbus_device_id id;
  38. };
  39. static struct acpi_device *hv_acpi_dev;
  40. static struct completion probe_event;
  41. static int hyperv_cpuhp_online;
  42. static void *hv_panic_page;
  43. /* Values parsed from ACPI DSDT */
  44. static int vmbus_irq;
  45. int vmbus_interrupt;
  46. /*
  47. * Boolean to control whether to report panic messages over Hyper-V.
  48. *
  49. * It can be set via /proc/sys/kernel/hyperv/record_panic_msg
  50. */
  51. static int sysctl_record_panic_msg = 1;
  52. static int hyperv_report_reg(void)
  53. {
  54. return !sysctl_record_panic_msg || !hv_panic_page;
  55. }
  56. static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
  57. void *args)
  58. {
  59. struct pt_regs *regs;
  60. vmbus_initiate_unload(true);
  61. /*
  62. * Hyper-V should be notified only once about a panic. If we will be
  63. * doing hyperv_report_panic_msg() later with kmsg data, don't do
  64. * the notification here.
  65. */
  66. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
  67. && hyperv_report_reg()) {
  68. regs = current_pt_regs();
  69. hyperv_report_panic(regs, val, false);
  70. }
  71. return NOTIFY_DONE;
  72. }
  73. static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
  74. void *args)
  75. {
  76. struct die_args *die = args;
  77. struct pt_regs *regs = die->regs;
  78. /* Don't notify Hyper-V if the die event is other than oops */
  79. if (val != DIE_OOPS)
  80. return NOTIFY_DONE;
  81. /*
  82. * Hyper-V should be notified only once about a panic. If we will be
  83. * doing hyperv_report_panic_msg() later with kmsg data, don't do
  84. * the notification here.
  85. */
  86. if (hyperv_report_reg())
  87. hyperv_report_panic(regs, val, true);
  88. return NOTIFY_DONE;
  89. }
  90. static struct notifier_block hyperv_die_block = {
  91. .notifier_call = hyperv_die_event,
  92. };
  93. static struct notifier_block hyperv_panic_block = {
  94. .notifier_call = hyperv_panic_event,
  95. };
  96. static const char *fb_mmio_name = "fb_range";
  97. static struct resource *fb_mmio;
  98. static struct resource *hyperv_mmio;
  99. static DEFINE_MUTEX(hyperv_mmio_lock);
  100. static int vmbus_exists(void)
  101. {
  102. if (hv_acpi_dev == NULL)
  103. return -ENODEV;
  104. return 0;
  105. }
  106. static u8 channel_monitor_group(const struct vmbus_channel *channel)
  107. {
  108. return (u8)channel->offermsg.monitorid / 32;
  109. }
  110. static u8 channel_monitor_offset(const struct vmbus_channel *channel)
  111. {
  112. return (u8)channel->offermsg.monitorid % 32;
  113. }
  114. static u32 channel_pending(const struct vmbus_channel *channel,
  115. const struct hv_monitor_page *monitor_page)
  116. {
  117. u8 monitor_group = channel_monitor_group(channel);
  118. return monitor_page->trigger_group[monitor_group].pending;
  119. }
  120. static u32 channel_latency(const struct vmbus_channel *channel,
  121. const struct hv_monitor_page *monitor_page)
  122. {
  123. u8 monitor_group = channel_monitor_group(channel);
  124. u8 monitor_offset = channel_monitor_offset(channel);
  125. return monitor_page->latency[monitor_group][monitor_offset];
  126. }
  127. static u32 channel_conn_id(struct vmbus_channel *channel,
  128. struct hv_monitor_page *monitor_page)
  129. {
  130. u8 monitor_group = channel_monitor_group(channel);
  131. u8 monitor_offset = channel_monitor_offset(channel);
  132. return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
  133. }
  134. static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
  135. char *buf)
  136. {
  137. struct hv_device *hv_dev = device_to_hv_device(dev);
  138. if (!hv_dev->channel)
  139. return -ENODEV;
  140. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
  141. }
  142. static DEVICE_ATTR_RO(id);
  143. static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
  144. char *buf)
  145. {
  146. struct hv_device *hv_dev = device_to_hv_device(dev);
  147. if (!hv_dev->channel)
  148. return -ENODEV;
  149. return sprintf(buf, "%d\n", hv_dev->channel->state);
  150. }
  151. static DEVICE_ATTR_RO(state);
  152. static ssize_t monitor_id_show(struct device *dev,
  153. struct device_attribute *dev_attr, char *buf)
  154. {
  155. struct hv_device *hv_dev = device_to_hv_device(dev);
  156. if (!hv_dev->channel)
  157. return -ENODEV;
  158. return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
  159. }
  160. static DEVICE_ATTR_RO(monitor_id);
  161. static ssize_t class_id_show(struct device *dev,
  162. struct device_attribute *dev_attr, char *buf)
  163. {
  164. struct hv_device *hv_dev = device_to_hv_device(dev);
  165. if (!hv_dev->channel)
  166. return -ENODEV;
  167. return sprintf(buf, "{%pUl}\n",
  168. &hv_dev->channel->offermsg.offer.if_type);
  169. }
  170. static DEVICE_ATTR_RO(class_id);
  171. static ssize_t device_id_show(struct device *dev,
  172. struct device_attribute *dev_attr, char *buf)
  173. {
  174. struct hv_device *hv_dev = device_to_hv_device(dev);
  175. if (!hv_dev->channel)
  176. return -ENODEV;
  177. return sprintf(buf, "{%pUl}\n",
  178. &hv_dev->channel->offermsg.offer.if_instance);
  179. }
  180. static DEVICE_ATTR_RO(device_id);
  181. static ssize_t modalias_show(struct device *dev,
  182. struct device_attribute *dev_attr, char *buf)
  183. {
  184. struct hv_device *hv_dev = device_to_hv_device(dev);
  185. return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
  186. }
  187. static DEVICE_ATTR_RO(modalias);
  188. #ifdef CONFIG_NUMA
  189. static ssize_t numa_node_show(struct device *dev,
  190. struct device_attribute *attr, char *buf)
  191. {
  192. struct hv_device *hv_dev = device_to_hv_device(dev);
  193. if (!hv_dev->channel)
  194. return -ENODEV;
  195. return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
  196. }
  197. static DEVICE_ATTR_RO(numa_node);
  198. #endif
  199. static ssize_t server_monitor_pending_show(struct device *dev,
  200. struct device_attribute *dev_attr,
  201. char *buf)
  202. {
  203. struct hv_device *hv_dev = device_to_hv_device(dev);
  204. if (!hv_dev->channel)
  205. return -ENODEV;
  206. return sprintf(buf, "%d\n",
  207. channel_pending(hv_dev->channel,
  208. vmbus_connection.monitor_pages[0]));
  209. }
  210. static DEVICE_ATTR_RO(server_monitor_pending);
  211. static ssize_t client_monitor_pending_show(struct device *dev,
  212. struct device_attribute *dev_attr,
  213. char *buf)
  214. {
  215. struct hv_device *hv_dev = device_to_hv_device(dev);
  216. if (!hv_dev->channel)
  217. return -ENODEV;
  218. return sprintf(buf, "%d\n",
  219. channel_pending(hv_dev->channel,
  220. vmbus_connection.monitor_pages[1]));
  221. }
  222. static DEVICE_ATTR_RO(client_monitor_pending);
  223. static ssize_t server_monitor_latency_show(struct device *dev,
  224. struct device_attribute *dev_attr,
  225. char *buf)
  226. {
  227. struct hv_device *hv_dev = device_to_hv_device(dev);
  228. if (!hv_dev->channel)
  229. return -ENODEV;
  230. return sprintf(buf, "%d\n",
  231. channel_latency(hv_dev->channel,
  232. vmbus_connection.monitor_pages[0]));
  233. }
  234. static DEVICE_ATTR_RO(server_monitor_latency);
  235. static ssize_t client_monitor_latency_show(struct device *dev,
  236. struct device_attribute *dev_attr,
  237. char *buf)
  238. {
  239. struct hv_device *hv_dev = device_to_hv_device(dev);
  240. if (!hv_dev->channel)
  241. return -ENODEV;
  242. return sprintf(buf, "%d\n",
  243. channel_latency(hv_dev->channel,
  244. vmbus_connection.monitor_pages[1]));
  245. }
  246. static DEVICE_ATTR_RO(client_monitor_latency);
  247. static ssize_t server_monitor_conn_id_show(struct device *dev,
  248. struct device_attribute *dev_attr,
  249. char *buf)
  250. {
  251. struct hv_device *hv_dev = device_to_hv_device(dev);
  252. if (!hv_dev->channel)
  253. return -ENODEV;
  254. return sprintf(buf, "%d\n",
  255. channel_conn_id(hv_dev->channel,
  256. vmbus_connection.monitor_pages[0]));
  257. }
  258. static DEVICE_ATTR_RO(server_monitor_conn_id);
  259. static ssize_t client_monitor_conn_id_show(struct device *dev,
  260. struct device_attribute *dev_attr,
  261. char *buf)
  262. {
  263. struct hv_device *hv_dev = device_to_hv_device(dev);
  264. if (!hv_dev->channel)
  265. return -ENODEV;
  266. return sprintf(buf, "%d\n",
  267. channel_conn_id(hv_dev->channel,
  268. vmbus_connection.monitor_pages[1]));
  269. }
  270. static DEVICE_ATTR_RO(client_monitor_conn_id);
  271. static ssize_t out_intr_mask_show(struct device *dev,
  272. struct device_attribute *dev_attr, char *buf)
  273. {
  274. struct hv_device *hv_dev = device_to_hv_device(dev);
  275. struct hv_ring_buffer_debug_info outbound;
  276. int ret;
  277. if (!hv_dev->channel)
  278. return -ENODEV;
  279. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  280. &outbound);
  281. if (ret < 0)
  282. return ret;
  283. return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
  284. }
  285. static DEVICE_ATTR_RO(out_intr_mask);
  286. static ssize_t out_read_index_show(struct device *dev,
  287. struct device_attribute *dev_attr, char *buf)
  288. {
  289. struct hv_device *hv_dev = device_to_hv_device(dev);
  290. struct hv_ring_buffer_debug_info outbound;
  291. int ret;
  292. if (!hv_dev->channel)
  293. return -ENODEV;
  294. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  295. &outbound);
  296. if (ret < 0)
  297. return ret;
  298. return sprintf(buf, "%d\n", outbound.current_read_index);
  299. }
  300. static DEVICE_ATTR_RO(out_read_index);
  301. static ssize_t out_write_index_show(struct device *dev,
  302. struct device_attribute *dev_attr,
  303. char *buf)
  304. {
  305. struct hv_device *hv_dev = device_to_hv_device(dev);
  306. struct hv_ring_buffer_debug_info outbound;
  307. int ret;
  308. if (!hv_dev->channel)
  309. return -ENODEV;
  310. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  311. &outbound);
  312. if (ret < 0)
  313. return ret;
  314. return sprintf(buf, "%d\n", outbound.current_write_index);
  315. }
  316. static DEVICE_ATTR_RO(out_write_index);
  317. static ssize_t out_read_bytes_avail_show(struct device *dev,
  318. struct device_attribute *dev_attr,
  319. char *buf)
  320. {
  321. struct hv_device *hv_dev = device_to_hv_device(dev);
  322. struct hv_ring_buffer_debug_info outbound;
  323. int ret;
  324. if (!hv_dev->channel)
  325. return -ENODEV;
  326. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  327. &outbound);
  328. if (ret < 0)
  329. return ret;
  330. return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
  331. }
  332. static DEVICE_ATTR_RO(out_read_bytes_avail);
  333. static ssize_t out_write_bytes_avail_show(struct device *dev,
  334. struct device_attribute *dev_attr,
  335. char *buf)
  336. {
  337. struct hv_device *hv_dev = device_to_hv_device(dev);
  338. struct hv_ring_buffer_debug_info outbound;
  339. int ret;
  340. if (!hv_dev->channel)
  341. return -ENODEV;
  342. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
  343. &outbound);
  344. if (ret < 0)
  345. return ret;
  346. return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
  347. }
  348. static DEVICE_ATTR_RO(out_write_bytes_avail);
  349. static ssize_t in_intr_mask_show(struct device *dev,
  350. struct device_attribute *dev_attr, char *buf)
  351. {
  352. struct hv_device *hv_dev = device_to_hv_device(dev);
  353. struct hv_ring_buffer_debug_info inbound;
  354. int ret;
  355. if (!hv_dev->channel)
  356. return -ENODEV;
  357. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  358. if (ret < 0)
  359. return ret;
  360. return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
  361. }
  362. static DEVICE_ATTR_RO(in_intr_mask);
  363. static ssize_t in_read_index_show(struct device *dev,
  364. struct device_attribute *dev_attr, char *buf)
  365. {
  366. struct hv_device *hv_dev = device_to_hv_device(dev);
  367. struct hv_ring_buffer_debug_info inbound;
  368. int ret;
  369. if (!hv_dev->channel)
  370. return -ENODEV;
  371. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  372. if (ret < 0)
  373. return ret;
  374. return sprintf(buf, "%d\n", inbound.current_read_index);
  375. }
  376. static DEVICE_ATTR_RO(in_read_index);
  377. static ssize_t in_write_index_show(struct device *dev,
  378. struct device_attribute *dev_attr, char *buf)
  379. {
  380. struct hv_device *hv_dev = device_to_hv_device(dev);
  381. struct hv_ring_buffer_debug_info inbound;
  382. int ret;
  383. if (!hv_dev->channel)
  384. return -ENODEV;
  385. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  386. if (ret < 0)
  387. return ret;
  388. return sprintf(buf, "%d\n", inbound.current_write_index);
  389. }
  390. static DEVICE_ATTR_RO(in_write_index);
  391. static ssize_t in_read_bytes_avail_show(struct device *dev,
  392. struct device_attribute *dev_attr,
  393. char *buf)
  394. {
  395. struct hv_device *hv_dev = device_to_hv_device(dev);
  396. struct hv_ring_buffer_debug_info inbound;
  397. int ret;
  398. if (!hv_dev->channel)
  399. return -ENODEV;
  400. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  401. if (ret < 0)
  402. return ret;
  403. return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
  404. }
  405. static DEVICE_ATTR_RO(in_read_bytes_avail);
  406. static ssize_t in_write_bytes_avail_show(struct device *dev,
  407. struct device_attribute *dev_attr,
  408. char *buf)
  409. {
  410. struct hv_device *hv_dev = device_to_hv_device(dev);
  411. struct hv_ring_buffer_debug_info inbound;
  412. int ret;
  413. if (!hv_dev->channel)
  414. return -ENODEV;
  415. ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
  416. if (ret < 0)
  417. return ret;
  418. return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
  419. }
  420. static DEVICE_ATTR_RO(in_write_bytes_avail);
  421. static ssize_t channel_vp_mapping_show(struct device *dev,
  422. struct device_attribute *dev_attr,
  423. char *buf)
  424. {
  425. struct hv_device *hv_dev = device_to_hv_device(dev);
  426. struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
  427. int buf_size = PAGE_SIZE, n_written, tot_written;
  428. struct list_head *cur;
  429. if (!channel)
  430. return -ENODEV;
  431. mutex_lock(&vmbus_connection.channel_mutex);
  432. tot_written = snprintf(buf, buf_size, "%u:%u\n",
  433. channel->offermsg.child_relid, channel->target_cpu);
  434. list_for_each(cur, &channel->sc_list) {
  435. if (tot_written >= buf_size - 1)
  436. break;
  437. cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
  438. n_written = scnprintf(buf + tot_written,
  439. buf_size - tot_written,
  440. "%u:%u\n",
  441. cur_sc->offermsg.child_relid,
  442. cur_sc->target_cpu);
  443. tot_written += n_written;
  444. }
  445. mutex_unlock(&vmbus_connection.channel_mutex);
  446. return tot_written;
  447. }
  448. static DEVICE_ATTR_RO(channel_vp_mapping);
  449. static ssize_t vendor_show(struct device *dev,
  450. struct device_attribute *dev_attr,
  451. char *buf)
  452. {
  453. struct hv_device *hv_dev = device_to_hv_device(dev);
  454. return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
  455. }
  456. static DEVICE_ATTR_RO(vendor);
  457. static ssize_t device_show(struct device *dev,
  458. struct device_attribute *dev_attr,
  459. char *buf)
  460. {
  461. struct hv_device *hv_dev = device_to_hv_device(dev);
  462. return sprintf(buf, "0x%x\n", hv_dev->device_id);
  463. }
  464. static DEVICE_ATTR_RO(device);
  465. static ssize_t driver_override_store(struct device *dev,
  466. struct device_attribute *attr,
  467. const char *buf, size_t count)
  468. {
  469. struct hv_device *hv_dev = device_to_hv_device(dev);
  470. char *driver_override, *old, *cp;
  471. /* We need to keep extra room for a newline */
  472. if (count >= (PAGE_SIZE - 1))
  473. return -EINVAL;
  474. driver_override = kstrndup(buf, count, GFP_KERNEL);
  475. if (!driver_override)
  476. return -ENOMEM;
  477. cp = strchr(driver_override, '\n');
  478. if (cp)
  479. *cp = '\0';
  480. device_lock(dev);
  481. old = hv_dev->driver_override;
  482. if (strlen(driver_override)) {
  483. hv_dev->driver_override = driver_override;
  484. } else {
  485. kfree(driver_override);
  486. hv_dev->driver_override = NULL;
  487. }
  488. device_unlock(dev);
  489. kfree(old);
  490. return count;
  491. }
  492. static ssize_t driver_override_show(struct device *dev,
  493. struct device_attribute *attr, char *buf)
  494. {
  495. struct hv_device *hv_dev = device_to_hv_device(dev);
  496. ssize_t len;
  497. device_lock(dev);
  498. len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
  499. device_unlock(dev);
  500. return len;
  501. }
  502. static DEVICE_ATTR_RW(driver_override);
  503. /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
  504. static struct attribute *vmbus_dev_attrs[] = {
  505. &dev_attr_id.attr,
  506. &dev_attr_state.attr,
  507. &dev_attr_monitor_id.attr,
  508. &dev_attr_class_id.attr,
  509. &dev_attr_device_id.attr,
  510. &dev_attr_modalias.attr,
  511. #ifdef CONFIG_NUMA
  512. &dev_attr_numa_node.attr,
  513. #endif
  514. &dev_attr_server_monitor_pending.attr,
  515. &dev_attr_client_monitor_pending.attr,
  516. &dev_attr_server_monitor_latency.attr,
  517. &dev_attr_client_monitor_latency.attr,
  518. &dev_attr_server_monitor_conn_id.attr,
  519. &dev_attr_client_monitor_conn_id.attr,
  520. &dev_attr_out_intr_mask.attr,
  521. &dev_attr_out_read_index.attr,
  522. &dev_attr_out_write_index.attr,
  523. &dev_attr_out_read_bytes_avail.attr,
  524. &dev_attr_out_write_bytes_avail.attr,
  525. &dev_attr_in_intr_mask.attr,
  526. &dev_attr_in_read_index.attr,
  527. &dev_attr_in_write_index.attr,
  528. &dev_attr_in_read_bytes_avail.attr,
  529. &dev_attr_in_write_bytes_avail.attr,
  530. &dev_attr_channel_vp_mapping.attr,
  531. &dev_attr_vendor.attr,
  532. &dev_attr_device.attr,
  533. &dev_attr_driver_override.attr,
  534. NULL,
  535. };
  536. /*
  537. * Device-level attribute_group callback function. Returns the permission for
  538. * each attribute, and returns 0 if an attribute is not visible.
  539. */
  540. static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
  541. struct attribute *attr, int idx)
  542. {
  543. struct device *dev = kobj_to_dev(kobj);
  544. const struct hv_device *hv_dev = device_to_hv_device(dev);
  545. /* Hide the monitor attributes if the monitor mechanism is not used. */
  546. if (!hv_dev->channel->offermsg.monitor_allocated &&
  547. (attr == &dev_attr_monitor_id.attr ||
  548. attr == &dev_attr_server_monitor_pending.attr ||
  549. attr == &dev_attr_client_monitor_pending.attr ||
  550. attr == &dev_attr_server_monitor_latency.attr ||
  551. attr == &dev_attr_client_monitor_latency.attr ||
  552. attr == &dev_attr_server_monitor_conn_id.attr ||
  553. attr == &dev_attr_client_monitor_conn_id.attr))
  554. return 0;
  555. return attr->mode;
  556. }
  557. static const struct attribute_group vmbus_dev_group = {
  558. .attrs = vmbus_dev_attrs,
  559. .is_visible = vmbus_dev_attr_is_visible
  560. };
  561. __ATTRIBUTE_GROUPS(vmbus_dev);
  562. /*
  563. * vmbus_uevent - add uevent for our device
  564. *
  565. * This routine is invoked when a device is added or removed on the vmbus to
  566. * generate a uevent to udev in the userspace. The udev will then look at its
  567. * rule and the uevent generated here to load the appropriate driver
  568. *
  569. * The alias string will be of the form vmbus:guid where guid is the string
  570. * representation of the device guid (each byte of the guid will be
  571. * represented with two hex characters.
  572. */
  573. static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
  574. {
  575. struct hv_device *dev = device_to_hv_device(device);
  576. const char *format = "MODALIAS=vmbus:%*phN";
  577. return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
  578. }
  579. static const struct hv_vmbus_device_id *
  580. hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
  581. {
  582. if (id == NULL)
  583. return NULL; /* empty device table */
  584. for (; !guid_is_null(&id->guid); id++)
  585. if (guid_equal(&id->guid, guid))
  586. return id;
  587. return NULL;
  588. }
  589. static const struct hv_vmbus_device_id *
  590. hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
  591. {
  592. const struct hv_vmbus_device_id *id = NULL;
  593. struct vmbus_dynid *dynid;
  594. spin_lock(&drv->dynids.lock);
  595. list_for_each_entry(dynid, &drv->dynids.list, node) {
  596. if (guid_equal(&dynid->id.guid, guid)) {
  597. id = &dynid->id;
  598. break;
  599. }
  600. }
  601. spin_unlock(&drv->dynids.lock);
  602. return id;
  603. }
  604. static const struct hv_vmbus_device_id vmbus_device_null;
  605. /*
  606. * Return a matching hv_vmbus_device_id pointer.
  607. * If there is no match, return NULL.
  608. */
  609. static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
  610. struct hv_device *dev)
  611. {
  612. const guid_t *guid = &dev->dev_type;
  613. const struct hv_vmbus_device_id *id;
  614. /* When driver_override is set, only bind to the matching driver */
  615. if (dev->driver_override && strcmp(dev->driver_override, drv->name))
  616. return NULL;
  617. /* Look at the dynamic ids first, before the static ones */
  618. id = hv_vmbus_dynid_match(drv, guid);
  619. if (!id)
  620. id = hv_vmbus_dev_match(drv->id_table, guid);
  621. /* driver_override will always match, send a dummy id */
  622. if (!id && dev->driver_override)
  623. id = &vmbus_device_null;
  624. return id;
  625. }
  626. /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
  627. static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
  628. {
  629. struct vmbus_dynid *dynid;
  630. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  631. if (!dynid)
  632. return -ENOMEM;
  633. dynid->id.guid = *guid;
  634. spin_lock(&drv->dynids.lock);
  635. list_add_tail(&dynid->node, &drv->dynids.list);
  636. spin_unlock(&drv->dynids.lock);
  637. return driver_attach(&drv->driver);
  638. }
  639. static void vmbus_free_dynids(struct hv_driver *drv)
  640. {
  641. struct vmbus_dynid *dynid, *n;
  642. spin_lock(&drv->dynids.lock);
  643. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  644. list_del(&dynid->node);
  645. kfree(dynid);
  646. }
  647. spin_unlock(&drv->dynids.lock);
  648. }
  649. /*
  650. * store_new_id - sysfs frontend to vmbus_add_dynid()
  651. *
  652. * Allow GUIDs to be added to an existing driver via sysfs.
  653. */
  654. static ssize_t new_id_store(struct device_driver *driver, const char *buf,
  655. size_t count)
  656. {
  657. struct hv_driver *drv = drv_to_hv_drv(driver);
  658. guid_t guid;
  659. ssize_t retval;
  660. retval = guid_parse(buf, &guid);
  661. if (retval)
  662. return retval;
  663. if (hv_vmbus_dynid_match(drv, &guid))
  664. return -EEXIST;
  665. retval = vmbus_add_dynid(drv, &guid);
  666. if (retval)
  667. return retval;
  668. return count;
  669. }
  670. static DRIVER_ATTR_WO(new_id);
  671. /*
  672. * store_remove_id - remove a PCI device ID from this driver
  673. *
  674. * Removes a dynamic pci device ID to this driver.
  675. */
  676. static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
  677. size_t count)
  678. {
  679. struct hv_driver *drv = drv_to_hv_drv(driver);
  680. struct vmbus_dynid *dynid, *n;
  681. guid_t guid;
  682. ssize_t retval;
  683. retval = guid_parse(buf, &guid);
  684. if (retval)
  685. return retval;
  686. retval = -ENODEV;
  687. spin_lock(&drv->dynids.lock);
  688. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  689. struct hv_vmbus_device_id *id = &dynid->id;
  690. if (guid_equal(&id->guid, &guid)) {
  691. list_del(&dynid->node);
  692. kfree(dynid);
  693. retval = count;
  694. break;
  695. }
  696. }
  697. spin_unlock(&drv->dynids.lock);
  698. return retval;
  699. }
  700. static DRIVER_ATTR_WO(remove_id);
  701. static struct attribute *vmbus_drv_attrs[] = {
  702. &driver_attr_new_id.attr,
  703. &driver_attr_remove_id.attr,
  704. NULL,
  705. };
  706. ATTRIBUTE_GROUPS(vmbus_drv);
  707. /*
  708. * vmbus_match - Attempt to match the specified device to the specified driver
  709. */
  710. static int vmbus_match(struct device *device, struct device_driver *driver)
  711. {
  712. struct hv_driver *drv = drv_to_hv_drv(driver);
  713. struct hv_device *hv_dev = device_to_hv_device(device);
  714. /* The hv_sock driver handles all hv_sock offers. */
  715. if (is_hvsock_channel(hv_dev->channel))
  716. return drv->hvsock;
  717. if (hv_vmbus_get_id(drv, hv_dev))
  718. return 1;
  719. return 0;
  720. }
  721. /*
  722. * vmbus_probe - Add the new vmbus's child device
  723. */
  724. static int vmbus_probe(struct device *child_device)
  725. {
  726. int ret = 0;
  727. struct hv_driver *drv =
  728. drv_to_hv_drv(child_device->driver);
  729. struct hv_device *dev = device_to_hv_device(child_device);
  730. const struct hv_vmbus_device_id *dev_id;
  731. dev_id = hv_vmbus_get_id(drv, dev);
  732. if (drv->probe) {
  733. ret = drv->probe(dev, dev_id);
  734. if (ret != 0)
  735. pr_err("probe failed for device %s (%d)\n",
  736. dev_name(child_device), ret);
  737. } else {
  738. pr_err("probe not set for driver %s\n",
  739. dev_name(child_device));
  740. ret = -ENODEV;
  741. }
  742. return ret;
  743. }
  744. /*
  745. * vmbus_remove - Remove a vmbus device
  746. */
  747. static int vmbus_remove(struct device *child_device)
  748. {
  749. struct hv_driver *drv;
  750. struct hv_device *dev = device_to_hv_device(child_device);
  751. if (child_device->driver) {
  752. drv = drv_to_hv_drv(child_device->driver);
  753. if (drv->remove)
  754. drv->remove(dev);
  755. }
  756. return 0;
  757. }
  758. /*
  759. * vmbus_shutdown - Shutdown a vmbus device
  760. */
  761. static void vmbus_shutdown(struct device *child_device)
  762. {
  763. struct hv_driver *drv;
  764. struct hv_device *dev = device_to_hv_device(child_device);
  765. /* The device may not be attached yet */
  766. if (!child_device->driver)
  767. return;
  768. drv = drv_to_hv_drv(child_device->driver);
  769. if (drv->shutdown)
  770. drv->shutdown(dev);
  771. }
  772. #ifdef CONFIG_PM_SLEEP
  773. /*
  774. * vmbus_suspend - Suspend a vmbus device
  775. */
  776. static int vmbus_suspend(struct device *child_device)
  777. {
  778. struct hv_driver *drv;
  779. struct hv_device *dev = device_to_hv_device(child_device);
  780. /* The device may not be attached yet */
  781. if (!child_device->driver)
  782. return 0;
  783. drv = drv_to_hv_drv(child_device->driver);
  784. if (!drv->suspend)
  785. return -EOPNOTSUPP;
  786. return drv->suspend(dev);
  787. }
  788. /*
  789. * vmbus_resume - Resume a vmbus device
  790. */
  791. static int vmbus_resume(struct device *child_device)
  792. {
  793. struct hv_driver *drv;
  794. struct hv_device *dev = device_to_hv_device(child_device);
  795. /* The device may not be attached yet */
  796. if (!child_device->driver)
  797. return 0;
  798. drv = drv_to_hv_drv(child_device->driver);
  799. if (!drv->resume)
  800. return -EOPNOTSUPP;
  801. return drv->resume(dev);
  802. }
  803. #else
  804. #define vmbus_suspend NULL
  805. #define vmbus_resume NULL
  806. #endif /* CONFIG_PM_SLEEP */
  807. /*
  808. * vmbus_device_release - Final callback release of the vmbus child device
  809. */
  810. static void vmbus_device_release(struct device *device)
  811. {
  812. struct hv_device *hv_dev = device_to_hv_device(device);
  813. struct vmbus_channel *channel = hv_dev->channel;
  814. hv_debug_rm_dev_dir(hv_dev);
  815. mutex_lock(&vmbus_connection.channel_mutex);
  816. hv_process_channel_removal(channel);
  817. mutex_unlock(&vmbus_connection.channel_mutex);
  818. kfree(hv_dev);
  819. }
  820. /*
  821. * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
  822. *
  823. * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
  824. * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
  825. * is no way to wake up a Generation-2 VM.
  826. *
  827. * The other 4 ops are for hibernation.
  828. */
  829. static const struct dev_pm_ops vmbus_pm = {
  830. .suspend_noirq = NULL,
  831. .resume_noirq = NULL,
  832. .freeze_noirq = vmbus_suspend,
  833. .thaw_noirq = vmbus_resume,
  834. .poweroff_noirq = vmbus_suspend,
  835. .restore_noirq = vmbus_resume,
  836. };
  837. /* The one and only one */
  838. static struct bus_type hv_bus = {
  839. .name = "vmbus",
  840. .match = vmbus_match,
  841. .shutdown = vmbus_shutdown,
  842. .remove = vmbus_remove,
  843. .probe = vmbus_probe,
  844. .uevent = vmbus_uevent,
  845. .dev_groups = vmbus_dev_groups,
  846. .drv_groups = vmbus_drv_groups,
  847. .pm = &vmbus_pm,
  848. };
  849. struct onmessage_work_context {
  850. struct work_struct work;
  851. struct {
  852. struct hv_message_header header;
  853. u8 payload[];
  854. } msg;
  855. };
  856. static void vmbus_onmessage_work(struct work_struct *work)
  857. {
  858. struct onmessage_work_context *ctx;
  859. /* Do not process messages if we're in DISCONNECTED state */
  860. if (vmbus_connection.conn_state == DISCONNECTED)
  861. return;
  862. ctx = container_of(work, struct onmessage_work_context,
  863. work);
  864. vmbus_onmessage((struct vmbus_channel_message_header *)
  865. &ctx->msg.payload);
  866. kfree(ctx);
  867. }
  868. void vmbus_on_msg_dpc(unsigned long data)
  869. {
  870. struct hv_per_cpu_context *hv_cpu = (void *)data;
  871. void *page_addr = hv_cpu->synic_message_page;
  872. struct hv_message *msg = (struct hv_message *)page_addr +
  873. VMBUS_MESSAGE_SINT;
  874. struct vmbus_channel_message_header *hdr;
  875. const struct vmbus_channel_message_table_entry *entry;
  876. struct onmessage_work_context *ctx;
  877. u32 message_type = msg->header.message_type;
  878. /*
  879. * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
  880. * it is being used in 'struct vmbus_channel_message_header' definition
  881. * which is supposed to match hypervisor ABI.
  882. */
  883. BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
  884. if (message_type == HVMSG_NONE)
  885. /* no msg */
  886. return;
  887. hdr = (struct vmbus_channel_message_header *)msg->u.payload;
  888. trace_vmbus_on_msg_dpc(hdr);
  889. if (hdr->msgtype >= CHANNELMSG_COUNT) {
  890. WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
  891. goto msg_handled;
  892. }
  893. if (msg->header.payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
  894. WARN_ONCE(1, "payload size is too large (%d)\n",
  895. msg->header.payload_size);
  896. goto msg_handled;
  897. }
  898. entry = &channel_message_table[hdr->msgtype];
  899. if (!entry->message_handler)
  900. goto msg_handled;
  901. if (msg->header.payload_size < entry->min_payload_len) {
  902. WARN_ONCE(1, "message too short: msgtype=%d len=%d\n",
  903. hdr->msgtype, msg->header.payload_size);
  904. goto msg_handled;
  905. }
  906. if (entry->handler_type == VMHT_BLOCKING) {
  907. ctx = kmalloc(sizeof(*ctx) + msg->header.payload_size,
  908. GFP_ATOMIC);
  909. if (ctx == NULL)
  910. return;
  911. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  912. memcpy(&ctx->msg, msg, sizeof(msg->header) +
  913. msg->header.payload_size);
  914. /*
  915. * The host can generate a rescind message while we
  916. * may still be handling the original offer. We deal with
  917. * this condition by relying on the synchronization provided
  918. * by offer_in_progress and by channel_mutex. See also the
  919. * inline comments in vmbus_onoffer_rescind().
  920. */
  921. switch (hdr->msgtype) {
  922. case CHANNELMSG_RESCIND_CHANNELOFFER:
  923. /*
  924. * If we are handling the rescind message;
  925. * schedule the work on the global work queue.
  926. *
  927. * The OFFER message and the RESCIND message should
  928. * not be handled by the same serialized work queue,
  929. * because the OFFER handler may call vmbus_open(),
  930. * which tries to open the channel by sending an
  931. * OPEN_CHANNEL message to the host and waits for
  932. * the host's response; however, if the host has
  933. * rescinded the channel before it receives the
  934. * OPEN_CHANNEL message, the host just silently
  935. * ignores the OPEN_CHANNEL message; as a result,
  936. * the guest's OFFER handler hangs for ever, if we
  937. * handle the RESCIND message in the same serialized
  938. * work queue: the RESCIND handler can not start to
  939. * run before the OFFER handler finishes.
  940. */
  941. schedule_work(&ctx->work);
  942. break;
  943. case CHANNELMSG_OFFERCHANNEL:
  944. /*
  945. * The host sends the offer message of a given channel
  946. * before sending the rescind message of the same
  947. * channel. These messages are sent to the guest's
  948. * connect CPU; the guest then starts processing them
  949. * in the tasklet handler on this CPU:
  950. *
  951. * VMBUS_CONNECT_CPU
  952. *
  953. * [vmbus_on_msg_dpc()]
  954. * atomic_inc() // CHANNELMSG_OFFERCHANNEL
  955. * queue_work()
  956. * ...
  957. * [vmbus_on_msg_dpc()]
  958. * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
  959. *
  960. * We rely on the memory-ordering properties of the
  961. * queue_work() and schedule_work() primitives, which
  962. * guarantee that the atomic increment will be visible
  963. * to the CPUs which will execute the offer & rescind
  964. * works by the time these works will start execution.
  965. */
  966. atomic_inc(&vmbus_connection.offer_in_progress);
  967. fallthrough;
  968. default:
  969. queue_work(vmbus_connection.work_queue, &ctx->work);
  970. }
  971. } else
  972. entry->message_handler(hdr);
  973. msg_handled:
  974. vmbus_signal_eom(msg, message_type);
  975. }
  976. #ifdef CONFIG_PM_SLEEP
  977. /*
  978. * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
  979. * hibernation, because hv_sock connections can not persist across hibernation.
  980. */
  981. static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
  982. {
  983. struct onmessage_work_context *ctx;
  984. struct vmbus_channel_rescind_offer *rescind;
  985. WARN_ON(!is_hvsock_channel(channel));
  986. /*
  987. * Allocation size is small and the allocation should really not fail,
  988. * otherwise the state of the hv_sock connections ends up in limbo.
  989. */
  990. ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
  991. GFP_KERNEL | __GFP_NOFAIL);
  992. /*
  993. * So far, these are not really used by Linux. Just set them to the
  994. * reasonable values conforming to the definitions of the fields.
  995. */
  996. ctx->msg.header.message_type = 1;
  997. ctx->msg.header.payload_size = sizeof(*rescind);
  998. /* These values are actually used by Linux. */
  999. rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
  1000. rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
  1001. rescind->child_relid = channel->offermsg.child_relid;
  1002. INIT_WORK(&ctx->work, vmbus_onmessage_work);
  1003. queue_work(vmbus_connection.work_queue, &ctx->work);
  1004. }
  1005. #endif /* CONFIG_PM_SLEEP */
  1006. /*
  1007. * Schedule all channels with events pending
  1008. */
  1009. static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
  1010. {
  1011. unsigned long *recv_int_page;
  1012. u32 maxbits, relid;
  1013. if (vmbus_proto_version < VERSION_WIN8) {
  1014. maxbits = MAX_NUM_CHANNELS_SUPPORTED;
  1015. recv_int_page = vmbus_connection.recv_int_page;
  1016. } else {
  1017. /*
  1018. * When the host is win8 and beyond, the event page
  1019. * can be directly checked to get the id of the channel
  1020. * that has the interrupt pending.
  1021. */
  1022. void *page_addr = hv_cpu->synic_event_page;
  1023. union hv_synic_event_flags *event
  1024. = (union hv_synic_event_flags *)page_addr +
  1025. VMBUS_MESSAGE_SINT;
  1026. maxbits = HV_EVENT_FLAGS_COUNT;
  1027. recv_int_page = event->flags;
  1028. }
  1029. if (unlikely(!recv_int_page))
  1030. return;
  1031. for_each_set_bit(relid, recv_int_page, maxbits) {
  1032. void (*callback_fn)(void *context);
  1033. struct vmbus_channel *channel;
  1034. if (!sync_test_and_clear_bit(relid, recv_int_page))
  1035. continue;
  1036. /* Special case - vmbus channel protocol msg */
  1037. if (relid == 0)
  1038. continue;
  1039. /*
  1040. * Pairs with the kfree_rcu() in vmbus_chan_release().
  1041. * Guarantees that the channel data structure doesn't
  1042. * get freed while the channel pointer below is being
  1043. * dereferenced.
  1044. */
  1045. rcu_read_lock();
  1046. /* Find channel based on relid */
  1047. channel = relid2channel(relid);
  1048. if (channel == NULL)
  1049. goto sched_unlock_rcu;
  1050. if (channel->rescind)
  1051. goto sched_unlock_rcu;
  1052. /*
  1053. * Make sure that the ring buffer data structure doesn't get
  1054. * freed while we dereference the ring buffer pointer. Test
  1055. * for the channel's onchannel_callback being NULL within a
  1056. * sched_lock critical section. See also the inline comments
  1057. * in vmbus_reset_channel_cb().
  1058. */
  1059. spin_lock(&channel->sched_lock);
  1060. callback_fn = channel->onchannel_callback;
  1061. if (unlikely(callback_fn == NULL))
  1062. goto sched_unlock;
  1063. trace_vmbus_chan_sched(channel);
  1064. ++channel->interrupts;
  1065. switch (channel->callback_mode) {
  1066. case HV_CALL_ISR:
  1067. (*callback_fn)(channel->channel_callback_context);
  1068. break;
  1069. case HV_CALL_BATCHED:
  1070. hv_begin_read(&channel->inbound);
  1071. fallthrough;
  1072. case HV_CALL_DIRECT:
  1073. tasklet_schedule(&channel->callback_event);
  1074. }
  1075. sched_unlock:
  1076. spin_unlock(&channel->sched_lock);
  1077. sched_unlock_rcu:
  1078. rcu_read_unlock();
  1079. }
  1080. }
  1081. static void vmbus_isr(void)
  1082. {
  1083. struct hv_per_cpu_context *hv_cpu
  1084. = this_cpu_ptr(hv_context.cpu_context);
  1085. void *page_addr = hv_cpu->synic_event_page;
  1086. struct hv_message *msg;
  1087. union hv_synic_event_flags *event;
  1088. bool handled = false;
  1089. if (unlikely(page_addr == NULL))
  1090. return;
  1091. event = (union hv_synic_event_flags *)page_addr +
  1092. VMBUS_MESSAGE_SINT;
  1093. /*
  1094. * Check for events before checking for messages. This is the order
  1095. * in which events and messages are checked in Windows guests on
  1096. * Hyper-V, and the Windows team suggested we do the same.
  1097. */
  1098. if ((vmbus_proto_version == VERSION_WS2008) ||
  1099. (vmbus_proto_version == VERSION_WIN7)) {
  1100. /* Since we are a child, we only need to check bit 0 */
  1101. if (sync_test_and_clear_bit(0, event->flags))
  1102. handled = true;
  1103. } else {
  1104. /*
  1105. * Our host is win8 or above. The signaling mechanism
  1106. * has changed and we can directly look at the event page.
  1107. * If bit n is set then we have an interrup on the channel
  1108. * whose id is n.
  1109. */
  1110. handled = true;
  1111. }
  1112. if (handled)
  1113. vmbus_chan_sched(hv_cpu);
  1114. page_addr = hv_cpu->synic_message_page;
  1115. msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
  1116. /* Check if there are actual msgs to be processed */
  1117. if (msg->header.message_type != HVMSG_NONE) {
  1118. if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
  1119. hv_stimer0_isr();
  1120. vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
  1121. } else
  1122. tasklet_schedule(&hv_cpu->msg_dpc);
  1123. }
  1124. add_interrupt_randomness(hv_get_vector(), 0);
  1125. }
  1126. /*
  1127. * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
  1128. * buffer and call into Hyper-V to transfer the data.
  1129. */
  1130. static void hv_kmsg_dump(struct kmsg_dumper *dumper,
  1131. enum kmsg_dump_reason reason)
  1132. {
  1133. size_t bytes_written;
  1134. phys_addr_t panic_pa;
  1135. /* We are only interested in panics. */
  1136. if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
  1137. return;
  1138. panic_pa = virt_to_phys(hv_panic_page);
  1139. /*
  1140. * Write dump contents to the page. No need to synchronize; panic should
  1141. * be single-threaded.
  1142. */
  1143. kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
  1144. &bytes_written);
  1145. if (bytes_written)
  1146. hyperv_report_panic_msg(panic_pa, bytes_written);
  1147. }
  1148. static struct kmsg_dumper hv_kmsg_dumper = {
  1149. .dump = hv_kmsg_dump,
  1150. };
  1151. static struct ctl_table_header *hv_ctl_table_hdr;
  1152. /*
  1153. * sysctl option to allow the user to control whether kmsg data should be
  1154. * reported to Hyper-V on panic.
  1155. */
  1156. static struct ctl_table hv_ctl_table[] = {
  1157. {
  1158. .procname = "hyperv_record_panic_msg",
  1159. .data = &sysctl_record_panic_msg,
  1160. .maxlen = sizeof(int),
  1161. .mode = 0644,
  1162. .proc_handler = proc_dointvec_minmax,
  1163. .extra1 = SYSCTL_ZERO,
  1164. .extra2 = SYSCTL_ONE
  1165. },
  1166. {}
  1167. };
  1168. static struct ctl_table hv_root_table[] = {
  1169. {
  1170. .procname = "kernel",
  1171. .mode = 0555,
  1172. .child = hv_ctl_table
  1173. },
  1174. {}
  1175. };
  1176. /*
  1177. * vmbus_bus_init -Main vmbus driver initialization routine.
  1178. *
  1179. * Here, we
  1180. * - initialize the vmbus driver context
  1181. * - invoke the vmbus hv main init routine
  1182. * - retrieve the channel offers
  1183. */
  1184. static int vmbus_bus_init(void)
  1185. {
  1186. int ret;
  1187. ret = hv_init();
  1188. if (ret != 0) {
  1189. pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
  1190. return ret;
  1191. }
  1192. ret = bus_register(&hv_bus);
  1193. if (ret)
  1194. return ret;
  1195. ret = hv_setup_vmbus_irq(vmbus_irq, vmbus_isr);
  1196. if (ret)
  1197. goto err_setup;
  1198. ret = hv_synic_alloc();
  1199. if (ret)
  1200. goto err_alloc;
  1201. /*
  1202. * Initialize the per-cpu interrupt state and stimer state.
  1203. * Then connect to the host.
  1204. */
  1205. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
  1206. hv_synic_init, hv_synic_cleanup);
  1207. if (ret < 0)
  1208. goto err_cpuhp;
  1209. hyperv_cpuhp_online = ret;
  1210. ret = vmbus_connect();
  1211. if (ret)
  1212. goto err_connect;
  1213. /*
  1214. * Only register if the crash MSRs are available
  1215. */
  1216. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  1217. u64 hyperv_crash_ctl;
  1218. /*
  1219. * Sysctl registration is not fatal, since by default
  1220. * reporting is enabled.
  1221. */
  1222. hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
  1223. if (!hv_ctl_table_hdr)
  1224. pr_err("Hyper-V: sysctl table register error");
  1225. /*
  1226. * Register for panic kmsg callback only if the right
  1227. * capability is supported by the hypervisor.
  1228. */
  1229. hv_get_crash_ctl(hyperv_crash_ctl);
  1230. if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) {
  1231. hv_panic_page = (void *)hv_alloc_hyperv_zeroed_page();
  1232. if (hv_panic_page) {
  1233. ret = kmsg_dump_register(&hv_kmsg_dumper);
  1234. if (ret) {
  1235. pr_err("Hyper-V: kmsg dump register "
  1236. "error 0x%x\n", ret);
  1237. hv_free_hyperv_page(
  1238. (unsigned long)hv_panic_page);
  1239. hv_panic_page = NULL;
  1240. }
  1241. } else
  1242. pr_err("Hyper-V: panic message page memory "
  1243. "allocation failed");
  1244. }
  1245. register_die_notifier(&hyperv_die_block);
  1246. }
  1247. /*
  1248. * Always register the panic notifier because we need to unload
  1249. * the VMbus channel connection to prevent any VMbus
  1250. * activity after the VM panics.
  1251. */
  1252. atomic_notifier_chain_register(&panic_notifier_list,
  1253. &hyperv_panic_block);
  1254. vmbus_request_offers();
  1255. return 0;
  1256. err_connect:
  1257. cpuhp_remove_state(hyperv_cpuhp_online);
  1258. err_cpuhp:
  1259. hv_synic_free();
  1260. err_alloc:
  1261. hv_remove_vmbus_irq();
  1262. err_setup:
  1263. bus_unregister(&hv_bus);
  1264. unregister_sysctl_table(hv_ctl_table_hdr);
  1265. hv_ctl_table_hdr = NULL;
  1266. return ret;
  1267. }
  1268. /**
  1269. * __vmbus_child_driver_register() - Register a vmbus's driver
  1270. * @hv_driver: Pointer to driver structure you want to register
  1271. * @owner: owner module of the drv
  1272. * @mod_name: module name string
  1273. *
  1274. * Registers the given driver with Linux through the 'driver_register()' call
  1275. * and sets up the hyper-v vmbus handling for this driver.
  1276. * It will return the state of the 'driver_register()' call.
  1277. *
  1278. */
  1279. int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
  1280. {
  1281. int ret;
  1282. pr_info("registering driver %s\n", hv_driver->name);
  1283. ret = vmbus_exists();
  1284. if (ret < 0)
  1285. return ret;
  1286. hv_driver->driver.name = hv_driver->name;
  1287. hv_driver->driver.owner = owner;
  1288. hv_driver->driver.mod_name = mod_name;
  1289. hv_driver->driver.bus = &hv_bus;
  1290. spin_lock_init(&hv_driver->dynids.lock);
  1291. INIT_LIST_HEAD(&hv_driver->dynids.list);
  1292. ret = driver_register(&hv_driver->driver);
  1293. return ret;
  1294. }
  1295. EXPORT_SYMBOL_GPL(__vmbus_driver_register);
  1296. /**
  1297. * vmbus_driver_unregister() - Unregister a vmbus's driver
  1298. * @hv_driver: Pointer to driver structure you want to
  1299. * un-register
  1300. *
  1301. * Un-register the given driver that was previous registered with a call to
  1302. * vmbus_driver_register()
  1303. */
  1304. void vmbus_driver_unregister(struct hv_driver *hv_driver)
  1305. {
  1306. pr_info("unregistering driver %s\n", hv_driver->name);
  1307. if (!vmbus_exists()) {
  1308. driver_unregister(&hv_driver->driver);
  1309. vmbus_free_dynids(hv_driver);
  1310. }
  1311. }
  1312. EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
  1313. /*
  1314. * Called when last reference to channel is gone.
  1315. */
  1316. static void vmbus_chan_release(struct kobject *kobj)
  1317. {
  1318. struct vmbus_channel *channel
  1319. = container_of(kobj, struct vmbus_channel, kobj);
  1320. kfree_rcu(channel, rcu);
  1321. }
  1322. struct vmbus_chan_attribute {
  1323. struct attribute attr;
  1324. ssize_t (*show)(struct vmbus_channel *chan, char *buf);
  1325. ssize_t (*store)(struct vmbus_channel *chan,
  1326. const char *buf, size_t count);
  1327. };
  1328. #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
  1329. struct vmbus_chan_attribute chan_attr_##_name \
  1330. = __ATTR(_name, _mode, _show, _store)
  1331. #define VMBUS_CHAN_ATTR_RW(_name) \
  1332. struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
  1333. #define VMBUS_CHAN_ATTR_RO(_name) \
  1334. struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
  1335. #define VMBUS_CHAN_ATTR_WO(_name) \
  1336. struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
  1337. static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
  1338. struct attribute *attr, char *buf)
  1339. {
  1340. const struct vmbus_chan_attribute *attribute
  1341. = container_of(attr, struct vmbus_chan_attribute, attr);
  1342. struct vmbus_channel *chan
  1343. = container_of(kobj, struct vmbus_channel, kobj);
  1344. if (!attribute->show)
  1345. return -EIO;
  1346. return attribute->show(chan, buf);
  1347. }
  1348. static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
  1349. struct attribute *attr, const char *buf,
  1350. size_t count)
  1351. {
  1352. const struct vmbus_chan_attribute *attribute
  1353. = container_of(attr, struct vmbus_chan_attribute, attr);
  1354. struct vmbus_channel *chan
  1355. = container_of(kobj, struct vmbus_channel, kobj);
  1356. if (!attribute->store)
  1357. return -EIO;
  1358. return attribute->store(chan, buf, count);
  1359. }
  1360. static const struct sysfs_ops vmbus_chan_sysfs_ops = {
  1361. .show = vmbus_chan_attr_show,
  1362. .store = vmbus_chan_attr_store,
  1363. };
  1364. static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
  1365. {
  1366. struct hv_ring_buffer_info *rbi = &channel->outbound;
  1367. ssize_t ret;
  1368. mutex_lock(&rbi->ring_buffer_mutex);
  1369. if (!rbi->ring_buffer) {
  1370. mutex_unlock(&rbi->ring_buffer_mutex);
  1371. return -EINVAL;
  1372. }
  1373. ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
  1374. mutex_unlock(&rbi->ring_buffer_mutex);
  1375. return ret;
  1376. }
  1377. static VMBUS_CHAN_ATTR_RO(out_mask);
  1378. static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
  1379. {
  1380. struct hv_ring_buffer_info *rbi = &channel->inbound;
  1381. ssize_t ret;
  1382. mutex_lock(&rbi->ring_buffer_mutex);
  1383. if (!rbi->ring_buffer) {
  1384. mutex_unlock(&rbi->ring_buffer_mutex);
  1385. return -EINVAL;
  1386. }
  1387. ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
  1388. mutex_unlock(&rbi->ring_buffer_mutex);
  1389. return ret;
  1390. }
  1391. static VMBUS_CHAN_ATTR_RO(in_mask);
  1392. static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
  1393. {
  1394. struct hv_ring_buffer_info *rbi = &channel->inbound;
  1395. ssize_t ret;
  1396. mutex_lock(&rbi->ring_buffer_mutex);
  1397. if (!rbi->ring_buffer) {
  1398. mutex_unlock(&rbi->ring_buffer_mutex);
  1399. return -EINVAL;
  1400. }
  1401. ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
  1402. mutex_unlock(&rbi->ring_buffer_mutex);
  1403. return ret;
  1404. }
  1405. static VMBUS_CHAN_ATTR_RO(read_avail);
  1406. static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
  1407. {
  1408. struct hv_ring_buffer_info *rbi = &channel->outbound;
  1409. ssize_t ret;
  1410. mutex_lock(&rbi->ring_buffer_mutex);
  1411. if (!rbi->ring_buffer) {
  1412. mutex_unlock(&rbi->ring_buffer_mutex);
  1413. return -EINVAL;
  1414. }
  1415. ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
  1416. mutex_unlock(&rbi->ring_buffer_mutex);
  1417. return ret;
  1418. }
  1419. static VMBUS_CHAN_ATTR_RO(write_avail);
  1420. static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
  1421. {
  1422. return sprintf(buf, "%u\n", channel->target_cpu);
  1423. }
  1424. static ssize_t target_cpu_store(struct vmbus_channel *channel,
  1425. const char *buf, size_t count)
  1426. {
  1427. u32 target_cpu, origin_cpu;
  1428. ssize_t ret = count;
  1429. if (vmbus_proto_version < VERSION_WIN10_V4_1)
  1430. return -EIO;
  1431. if (sscanf(buf, "%uu", &target_cpu) != 1)
  1432. return -EIO;
  1433. /* Validate target_cpu for the cpumask_test_cpu() operation below. */
  1434. if (target_cpu >= nr_cpumask_bits)
  1435. return -EINVAL;
  1436. /* No CPUs should come up or down during this. */
  1437. cpus_read_lock();
  1438. if (!cpu_online(target_cpu)) {
  1439. cpus_read_unlock();
  1440. return -EINVAL;
  1441. }
  1442. /*
  1443. * Synchronizes target_cpu_store() and channel closure:
  1444. *
  1445. * { Initially: state = CHANNEL_OPENED }
  1446. *
  1447. * CPU1 CPU2
  1448. *
  1449. * [target_cpu_store()] [vmbus_disconnect_ring()]
  1450. *
  1451. * LOCK channel_mutex LOCK channel_mutex
  1452. * LOAD r1 = state LOAD r2 = state
  1453. * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
  1454. * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
  1455. * [...] SEND CLOSECHANNEL
  1456. * UNLOCK channel_mutex UNLOCK channel_mutex
  1457. *
  1458. * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
  1459. * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
  1460. *
  1461. * Note. The host processes the channel messages "sequentially", in
  1462. * the order in which they are received on a per-partition basis.
  1463. */
  1464. mutex_lock(&vmbus_connection.channel_mutex);
  1465. /*
  1466. * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
  1467. * avoid sending the message and fail here for such channels.
  1468. */
  1469. if (channel->state != CHANNEL_OPENED_STATE) {
  1470. ret = -EIO;
  1471. goto cpu_store_unlock;
  1472. }
  1473. origin_cpu = channel->target_cpu;
  1474. if (target_cpu == origin_cpu)
  1475. goto cpu_store_unlock;
  1476. if (vmbus_send_modifychannel(channel->offermsg.child_relid,
  1477. hv_cpu_number_to_vp_number(target_cpu))) {
  1478. ret = -EIO;
  1479. goto cpu_store_unlock;
  1480. }
  1481. /*
  1482. * Warning. At this point, there is *no* guarantee that the host will
  1483. * have successfully processed the vmbus_send_modifychannel() request.
  1484. * See the header comment of vmbus_send_modifychannel() for more info.
  1485. *
  1486. * Lags in the processing of the above vmbus_send_modifychannel() can
  1487. * result in missed interrupts if the "old" target CPU is taken offline
  1488. * before Hyper-V starts sending interrupts to the "new" target CPU.
  1489. * But apart from this offlining scenario, the code tolerates such
  1490. * lags. It will function correctly even if a channel interrupt comes
  1491. * in on a CPU that is different from the channel target_cpu value.
  1492. */
  1493. channel->target_cpu = target_cpu;
  1494. /* See init_vp_index(). */
  1495. if (hv_is_perf_channel(channel))
  1496. hv_update_alloced_cpus(origin_cpu, target_cpu);
  1497. /* Currently set only for storvsc channels. */
  1498. if (channel->change_target_cpu_callback) {
  1499. (*channel->change_target_cpu_callback)(channel,
  1500. origin_cpu, target_cpu);
  1501. }
  1502. cpu_store_unlock:
  1503. mutex_unlock(&vmbus_connection.channel_mutex);
  1504. cpus_read_unlock();
  1505. return ret;
  1506. }
  1507. static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
  1508. static ssize_t channel_pending_show(struct vmbus_channel *channel,
  1509. char *buf)
  1510. {
  1511. return sprintf(buf, "%d\n",
  1512. channel_pending(channel,
  1513. vmbus_connection.monitor_pages[1]));
  1514. }
  1515. static VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
  1516. static ssize_t channel_latency_show(struct vmbus_channel *channel,
  1517. char *buf)
  1518. {
  1519. return sprintf(buf, "%d\n",
  1520. channel_latency(channel,
  1521. vmbus_connection.monitor_pages[1]));
  1522. }
  1523. static VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
  1524. static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
  1525. {
  1526. return sprintf(buf, "%llu\n", channel->interrupts);
  1527. }
  1528. static VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
  1529. static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
  1530. {
  1531. return sprintf(buf, "%llu\n", channel->sig_events);
  1532. }
  1533. static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
  1534. static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
  1535. char *buf)
  1536. {
  1537. return sprintf(buf, "%llu\n",
  1538. (unsigned long long)channel->intr_in_full);
  1539. }
  1540. static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
  1541. static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
  1542. char *buf)
  1543. {
  1544. return sprintf(buf, "%llu\n",
  1545. (unsigned long long)channel->intr_out_empty);
  1546. }
  1547. static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
  1548. static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
  1549. char *buf)
  1550. {
  1551. return sprintf(buf, "%llu\n",
  1552. (unsigned long long)channel->out_full_first);
  1553. }
  1554. static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
  1555. static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
  1556. char *buf)
  1557. {
  1558. return sprintf(buf, "%llu\n",
  1559. (unsigned long long)channel->out_full_total);
  1560. }
  1561. static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
  1562. static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
  1563. char *buf)
  1564. {
  1565. return sprintf(buf, "%u\n", channel->offermsg.monitorid);
  1566. }
  1567. static VMBUS_CHAN_ATTR(monitor_id, S_IRUGO, subchannel_monitor_id_show, NULL);
  1568. static ssize_t subchannel_id_show(struct vmbus_channel *channel,
  1569. char *buf)
  1570. {
  1571. return sprintf(buf, "%u\n",
  1572. channel->offermsg.offer.sub_channel_index);
  1573. }
  1574. static VMBUS_CHAN_ATTR_RO(subchannel_id);
  1575. static struct attribute *vmbus_chan_attrs[] = {
  1576. &chan_attr_out_mask.attr,
  1577. &chan_attr_in_mask.attr,
  1578. &chan_attr_read_avail.attr,
  1579. &chan_attr_write_avail.attr,
  1580. &chan_attr_cpu.attr,
  1581. &chan_attr_pending.attr,
  1582. &chan_attr_latency.attr,
  1583. &chan_attr_interrupts.attr,
  1584. &chan_attr_events.attr,
  1585. &chan_attr_intr_in_full.attr,
  1586. &chan_attr_intr_out_empty.attr,
  1587. &chan_attr_out_full_first.attr,
  1588. &chan_attr_out_full_total.attr,
  1589. &chan_attr_monitor_id.attr,
  1590. &chan_attr_subchannel_id.attr,
  1591. NULL
  1592. };
  1593. /*
  1594. * Channel-level attribute_group callback function. Returns the permission for
  1595. * each attribute, and returns 0 if an attribute is not visible.
  1596. */
  1597. static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
  1598. struct attribute *attr, int idx)
  1599. {
  1600. const struct vmbus_channel *channel =
  1601. container_of(kobj, struct vmbus_channel, kobj);
  1602. /* Hide the monitor attributes if the monitor mechanism is not used. */
  1603. if (!channel->offermsg.monitor_allocated &&
  1604. (attr == &chan_attr_pending.attr ||
  1605. attr == &chan_attr_latency.attr ||
  1606. attr == &chan_attr_monitor_id.attr))
  1607. return 0;
  1608. return attr->mode;
  1609. }
  1610. static struct attribute_group vmbus_chan_group = {
  1611. .attrs = vmbus_chan_attrs,
  1612. .is_visible = vmbus_chan_attr_is_visible
  1613. };
  1614. static struct kobj_type vmbus_chan_ktype = {
  1615. .sysfs_ops = &vmbus_chan_sysfs_ops,
  1616. .release = vmbus_chan_release,
  1617. };
  1618. /*
  1619. * vmbus_add_channel_kobj - setup a sub-directory under device/channels
  1620. */
  1621. int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
  1622. {
  1623. const struct device *device = &dev->device;
  1624. struct kobject *kobj = &channel->kobj;
  1625. u32 relid = channel->offermsg.child_relid;
  1626. int ret;
  1627. kobj->kset = dev->channels_kset;
  1628. ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
  1629. "%u", relid);
  1630. if (ret) {
  1631. kobject_put(kobj);
  1632. return ret;
  1633. }
  1634. ret = sysfs_create_group(kobj, &vmbus_chan_group);
  1635. if (ret) {
  1636. /*
  1637. * The calling functions' error handling paths will cleanup the
  1638. * empty channel directory.
  1639. */
  1640. kobject_put(kobj);
  1641. dev_err(device, "Unable to set up channel sysfs files\n");
  1642. return ret;
  1643. }
  1644. kobject_uevent(kobj, KOBJ_ADD);
  1645. return 0;
  1646. }
  1647. /*
  1648. * vmbus_remove_channel_attr_group - remove the channel's attribute group
  1649. */
  1650. void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
  1651. {
  1652. sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
  1653. }
  1654. /*
  1655. * vmbus_device_create - Creates and registers a new child device
  1656. * on the vmbus.
  1657. */
  1658. struct hv_device *vmbus_device_create(const guid_t *type,
  1659. const guid_t *instance,
  1660. struct vmbus_channel *channel)
  1661. {
  1662. struct hv_device *child_device_obj;
  1663. child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
  1664. if (!child_device_obj) {
  1665. pr_err("Unable to allocate device object for child device\n");
  1666. return NULL;
  1667. }
  1668. child_device_obj->channel = channel;
  1669. guid_copy(&child_device_obj->dev_type, type);
  1670. guid_copy(&child_device_obj->dev_instance, instance);
  1671. child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
  1672. return child_device_obj;
  1673. }
  1674. /*
  1675. * vmbus_device_register - Register the child device
  1676. */
  1677. int vmbus_device_register(struct hv_device *child_device_obj)
  1678. {
  1679. struct kobject *kobj = &child_device_obj->device.kobj;
  1680. int ret;
  1681. dev_set_name(&child_device_obj->device, "%pUl",
  1682. &child_device_obj->channel->offermsg.offer.if_instance);
  1683. child_device_obj->device.bus = &hv_bus;
  1684. child_device_obj->device.parent = &hv_acpi_dev->dev;
  1685. child_device_obj->device.release = vmbus_device_release;
  1686. /*
  1687. * Register with the LDM. This will kick off the driver/device
  1688. * binding...which will eventually call vmbus_match() and vmbus_probe()
  1689. */
  1690. ret = device_register(&child_device_obj->device);
  1691. if (ret) {
  1692. pr_err("Unable to register child device\n");
  1693. return ret;
  1694. }
  1695. child_device_obj->channels_kset = kset_create_and_add("channels",
  1696. NULL, kobj);
  1697. if (!child_device_obj->channels_kset) {
  1698. ret = -ENOMEM;
  1699. goto err_dev_unregister;
  1700. }
  1701. ret = vmbus_add_channel_kobj(child_device_obj,
  1702. child_device_obj->channel);
  1703. if (ret) {
  1704. pr_err("Unable to register primary channeln");
  1705. goto err_kset_unregister;
  1706. }
  1707. hv_debug_add_dev_dir(child_device_obj);
  1708. return 0;
  1709. err_kset_unregister:
  1710. kset_unregister(child_device_obj->channels_kset);
  1711. err_dev_unregister:
  1712. device_unregister(&child_device_obj->device);
  1713. return ret;
  1714. }
  1715. /*
  1716. * vmbus_device_unregister - Remove the specified child device
  1717. * from the vmbus.
  1718. */
  1719. void vmbus_device_unregister(struct hv_device *device_obj)
  1720. {
  1721. pr_debug("child device %s unregistered\n",
  1722. dev_name(&device_obj->device));
  1723. kset_unregister(device_obj->channels_kset);
  1724. /*
  1725. * Kick off the process of unregistering the device.
  1726. * This will call vmbus_remove() and eventually vmbus_device_release()
  1727. */
  1728. device_unregister(&device_obj->device);
  1729. }
  1730. /*
  1731. * VMBUS is an acpi enumerated device. Get the information we
  1732. * need from DSDT.
  1733. */
  1734. #define VTPM_BASE_ADDRESS 0xfed40000
  1735. static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
  1736. {
  1737. resource_size_t start = 0;
  1738. resource_size_t end = 0;
  1739. struct resource *new_res;
  1740. struct resource **old_res = &hyperv_mmio;
  1741. struct resource **prev_res = NULL;
  1742. struct resource r;
  1743. switch (res->type) {
  1744. /*
  1745. * "Address" descriptors are for bus windows. Ignore
  1746. * "memory" descriptors, which are for registers on
  1747. * devices.
  1748. */
  1749. case ACPI_RESOURCE_TYPE_ADDRESS32:
  1750. start = res->data.address32.address.minimum;
  1751. end = res->data.address32.address.maximum;
  1752. break;
  1753. case ACPI_RESOURCE_TYPE_ADDRESS64:
  1754. start = res->data.address64.address.minimum;
  1755. end = res->data.address64.address.maximum;
  1756. break;
  1757. /*
  1758. * The IRQ information is needed only on ARM64, which Hyper-V
  1759. * sets up in the extended format. IRQ information is present
  1760. * on x86/x64 in the non-extended format but it is not used by
  1761. * Linux. So don't bother checking for the non-extended format.
  1762. */
  1763. case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
  1764. if (!acpi_dev_resource_interrupt(res, 0, &r)) {
  1765. pr_err("Unable to parse Hyper-V ACPI interrupt\n");
  1766. return AE_ERROR;
  1767. }
  1768. /* ARM64 INTID for VMbus */
  1769. vmbus_interrupt = res->data.extended_irq.interrupts[0];
  1770. /* Linux IRQ number */
  1771. vmbus_irq = r.start;
  1772. return AE_OK;
  1773. default:
  1774. /* Unused resource type */
  1775. return AE_OK;
  1776. }
  1777. /*
  1778. * Ignore ranges that are below 1MB, as they're not
  1779. * necessary or useful here.
  1780. */
  1781. if (end < 0x100000)
  1782. return AE_OK;
  1783. new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
  1784. if (!new_res)
  1785. return AE_NO_MEMORY;
  1786. /* If this range overlaps the virtual TPM, truncate it. */
  1787. if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
  1788. end = VTPM_BASE_ADDRESS;
  1789. new_res->name = "hyperv mmio";
  1790. new_res->flags = IORESOURCE_MEM;
  1791. new_res->start = start;
  1792. new_res->end = end;
  1793. /*
  1794. * If two ranges are adjacent, merge them.
  1795. */
  1796. do {
  1797. if (!*old_res) {
  1798. *old_res = new_res;
  1799. break;
  1800. }
  1801. if (((*old_res)->end + 1) == new_res->start) {
  1802. (*old_res)->end = new_res->end;
  1803. kfree(new_res);
  1804. break;
  1805. }
  1806. if ((*old_res)->start == new_res->end + 1) {
  1807. (*old_res)->start = new_res->start;
  1808. kfree(new_res);
  1809. break;
  1810. }
  1811. if ((*old_res)->start > new_res->end) {
  1812. new_res->sibling = *old_res;
  1813. if (prev_res)
  1814. (*prev_res)->sibling = new_res;
  1815. *old_res = new_res;
  1816. break;
  1817. }
  1818. prev_res = old_res;
  1819. old_res = &(*old_res)->sibling;
  1820. } while (1);
  1821. return AE_OK;
  1822. }
  1823. static int vmbus_acpi_remove(struct acpi_device *device)
  1824. {
  1825. struct resource *cur_res;
  1826. struct resource *next_res;
  1827. if (hyperv_mmio) {
  1828. if (fb_mmio) {
  1829. __release_region(hyperv_mmio, fb_mmio->start,
  1830. resource_size(fb_mmio));
  1831. fb_mmio = NULL;
  1832. }
  1833. for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
  1834. next_res = cur_res->sibling;
  1835. kfree(cur_res);
  1836. }
  1837. }
  1838. return 0;
  1839. }
  1840. static void vmbus_reserve_fb(void)
  1841. {
  1842. int size;
  1843. /*
  1844. * Make a claim for the frame buffer in the resource tree under the
  1845. * first node, which will be the one below 4GB. The length seems to
  1846. * be underreported, particularly in a Generation 1 VM. So start out
  1847. * reserving a larger area and make it smaller until it succeeds.
  1848. */
  1849. if (screen_info.lfb_base) {
  1850. if (efi_enabled(EFI_BOOT))
  1851. size = max_t(__u32, screen_info.lfb_size, 0x800000);
  1852. else
  1853. size = max_t(__u32, screen_info.lfb_size, 0x4000000);
  1854. for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
  1855. fb_mmio = __request_region(hyperv_mmio,
  1856. screen_info.lfb_base, size,
  1857. fb_mmio_name, 0);
  1858. }
  1859. }
  1860. }
  1861. /**
  1862. * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
  1863. * @new: If successful, supplied a pointer to the
  1864. * allocated MMIO space.
  1865. * @device_obj: Identifies the caller
  1866. * @min: Minimum guest physical address of the
  1867. * allocation
  1868. * @max: Maximum guest physical address
  1869. * @size: Size of the range to be allocated
  1870. * @align: Alignment of the range to be allocated
  1871. * @fb_overlap_ok: Whether this allocation can be allowed
  1872. * to overlap the video frame buffer.
  1873. *
  1874. * This function walks the resources granted to VMBus by the
  1875. * _CRS object in the ACPI namespace underneath the parent
  1876. * "bridge" whether that's a root PCI bus in the Generation 1
  1877. * case or a Module Device in the Generation 2 case. It then
  1878. * attempts to allocate from the global MMIO pool in a way that
  1879. * matches the constraints supplied in these parameters and by
  1880. * that _CRS.
  1881. *
  1882. * Return: 0 on success, -errno on failure
  1883. */
  1884. int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
  1885. resource_size_t min, resource_size_t max,
  1886. resource_size_t size, resource_size_t align,
  1887. bool fb_overlap_ok)
  1888. {
  1889. struct resource *iter, *shadow;
  1890. resource_size_t range_min, range_max, start;
  1891. const char *dev_n = dev_name(&device_obj->device);
  1892. int retval;
  1893. retval = -ENXIO;
  1894. mutex_lock(&hyperv_mmio_lock);
  1895. /*
  1896. * If overlaps with frame buffers are allowed, then first attempt to
  1897. * make the allocation from within the reserved region. Because it
  1898. * is already reserved, no shadow allocation is necessary.
  1899. */
  1900. if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
  1901. !(max < fb_mmio->start)) {
  1902. range_min = fb_mmio->start;
  1903. range_max = fb_mmio->end;
  1904. start = (range_min + align - 1) & ~(align - 1);
  1905. for (; start + size - 1 <= range_max; start += align) {
  1906. *new = request_mem_region_exclusive(start, size, dev_n);
  1907. if (*new) {
  1908. retval = 0;
  1909. goto exit;
  1910. }
  1911. }
  1912. }
  1913. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  1914. if ((iter->start >= max) || (iter->end <= min))
  1915. continue;
  1916. range_min = iter->start;
  1917. range_max = iter->end;
  1918. start = (range_min + align - 1) & ~(align - 1);
  1919. for (; start + size - 1 <= range_max; start += align) {
  1920. shadow = __request_region(iter, start, size, NULL,
  1921. IORESOURCE_BUSY);
  1922. if (!shadow)
  1923. continue;
  1924. *new = request_mem_region_exclusive(start, size, dev_n);
  1925. if (*new) {
  1926. shadow->name = (char *)*new;
  1927. retval = 0;
  1928. goto exit;
  1929. }
  1930. __release_region(iter, start, size);
  1931. }
  1932. }
  1933. exit:
  1934. mutex_unlock(&hyperv_mmio_lock);
  1935. return retval;
  1936. }
  1937. EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
  1938. /**
  1939. * vmbus_free_mmio() - Free a memory-mapped I/O range.
  1940. * @start: Base address of region to release.
  1941. * @size: Size of the range to be allocated
  1942. *
  1943. * This function releases anything requested by
  1944. * vmbus_mmio_allocate().
  1945. */
  1946. void vmbus_free_mmio(resource_size_t start, resource_size_t size)
  1947. {
  1948. struct resource *iter;
  1949. mutex_lock(&hyperv_mmio_lock);
  1950. for (iter = hyperv_mmio; iter; iter = iter->sibling) {
  1951. if ((iter->start >= start + size) || (iter->end <= start))
  1952. continue;
  1953. __release_region(iter, start, size);
  1954. }
  1955. release_mem_region(start, size);
  1956. mutex_unlock(&hyperv_mmio_lock);
  1957. }
  1958. EXPORT_SYMBOL_GPL(vmbus_free_mmio);
  1959. static int vmbus_acpi_add(struct acpi_device *device)
  1960. {
  1961. acpi_status result;
  1962. int ret_val = -ENODEV;
  1963. struct acpi_device *ancestor;
  1964. hv_acpi_dev = device;
  1965. result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
  1966. vmbus_walk_resources, NULL);
  1967. if (ACPI_FAILURE(result))
  1968. goto acpi_walk_err;
  1969. /*
  1970. * Some ancestor of the vmbus acpi device (Gen1 or Gen2
  1971. * firmware) is the VMOD that has the mmio ranges. Get that.
  1972. */
  1973. for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
  1974. result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
  1975. vmbus_walk_resources, NULL);
  1976. if (ACPI_FAILURE(result))
  1977. continue;
  1978. if (hyperv_mmio) {
  1979. vmbus_reserve_fb();
  1980. break;
  1981. }
  1982. }
  1983. ret_val = 0;
  1984. acpi_walk_err:
  1985. complete(&probe_event);
  1986. if (ret_val)
  1987. vmbus_acpi_remove(device);
  1988. return ret_val;
  1989. }
  1990. #ifdef CONFIG_PM_SLEEP
  1991. static int vmbus_bus_suspend(struct device *dev)
  1992. {
  1993. struct vmbus_channel *channel, *sc;
  1994. while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
  1995. /*
  1996. * We wait here until the completion of any channel
  1997. * offers that are currently in progress.
  1998. */
  1999. msleep(1);
  2000. }
  2001. mutex_lock(&vmbus_connection.channel_mutex);
  2002. list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
  2003. if (!is_hvsock_channel(channel))
  2004. continue;
  2005. vmbus_force_channel_rescinded(channel);
  2006. }
  2007. mutex_unlock(&vmbus_connection.channel_mutex);
  2008. /*
  2009. * Wait until all the sub-channels and hv_sock channels have been
  2010. * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
  2011. * they would conflict with the new sub-channels that will be created
  2012. * in the resume path. hv_sock channels should also be destroyed, but
  2013. * a hv_sock channel of an established hv_sock connection can not be
  2014. * really destroyed since it may still be referenced by the userspace
  2015. * application, so we just force the hv_sock channel to be rescinded
  2016. * by vmbus_force_channel_rescinded(), and the userspace application
  2017. * will thoroughly destroy the channel after hibernation.
  2018. *
  2019. * Note: the counter nr_chan_close_on_suspend may never go above 0 if
  2020. * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
  2021. */
  2022. if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
  2023. wait_for_completion(&vmbus_connection.ready_for_suspend_event);
  2024. if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
  2025. pr_err("Can not suspend due to a previous failed resuming\n");
  2026. return -EBUSY;
  2027. }
  2028. mutex_lock(&vmbus_connection.channel_mutex);
  2029. list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
  2030. /*
  2031. * Remove the channel from the array of channels and invalidate
  2032. * the channel's relid. Upon resume, vmbus_onoffer() will fix
  2033. * up the relid (and other fields, if necessary) and add the
  2034. * channel back to the array.
  2035. */
  2036. vmbus_channel_unmap_relid(channel);
  2037. channel->offermsg.child_relid = INVALID_RELID;
  2038. if (is_hvsock_channel(channel)) {
  2039. if (!channel->rescind) {
  2040. pr_err("hv_sock channel not rescinded!\n");
  2041. WARN_ON_ONCE(1);
  2042. }
  2043. continue;
  2044. }
  2045. list_for_each_entry(sc, &channel->sc_list, sc_list) {
  2046. pr_err("Sub-channel not deleted!\n");
  2047. WARN_ON_ONCE(1);
  2048. }
  2049. atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
  2050. }
  2051. mutex_unlock(&vmbus_connection.channel_mutex);
  2052. vmbus_initiate_unload(false);
  2053. /* Reset the event for the next resume. */
  2054. reinit_completion(&vmbus_connection.ready_for_resume_event);
  2055. return 0;
  2056. }
  2057. static int vmbus_bus_resume(struct device *dev)
  2058. {
  2059. struct vmbus_channel_msginfo *msginfo;
  2060. size_t msgsize;
  2061. int ret;
  2062. /*
  2063. * We only use the 'vmbus_proto_version', which was in use before
  2064. * hibernation, to re-negotiate with the host.
  2065. */
  2066. if (!vmbus_proto_version) {
  2067. pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
  2068. return -EINVAL;
  2069. }
  2070. msgsize = sizeof(*msginfo) +
  2071. sizeof(struct vmbus_channel_initiate_contact);
  2072. msginfo = kzalloc(msgsize, GFP_KERNEL);
  2073. if (msginfo == NULL)
  2074. return -ENOMEM;
  2075. ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
  2076. kfree(msginfo);
  2077. if (ret != 0)
  2078. return ret;
  2079. WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
  2080. vmbus_request_offers();
  2081. if (wait_for_completion_timeout(
  2082. &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
  2083. pr_err("Some vmbus device is missing after suspending?\n");
  2084. /* Reset the event for the next suspend. */
  2085. reinit_completion(&vmbus_connection.ready_for_suspend_event);
  2086. return 0;
  2087. }
  2088. #else
  2089. #define vmbus_bus_suspend NULL
  2090. #define vmbus_bus_resume NULL
  2091. #endif /* CONFIG_PM_SLEEP */
  2092. static const struct acpi_device_id vmbus_acpi_device_ids[] = {
  2093. {"VMBUS", 0},
  2094. {"VMBus", 0},
  2095. {"", 0},
  2096. };
  2097. MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
  2098. /*
  2099. * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
  2100. * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
  2101. * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
  2102. * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
  2103. * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
  2104. * resume callback must also run via the "noirq" ops.
  2105. *
  2106. * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
  2107. * earlier in this file before vmbus_pm.
  2108. */
  2109. static const struct dev_pm_ops vmbus_bus_pm = {
  2110. .suspend_noirq = NULL,
  2111. .resume_noirq = NULL,
  2112. .freeze_noirq = vmbus_bus_suspend,
  2113. .thaw_noirq = vmbus_bus_resume,
  2114. .poweroff_noirq = vmbus_bus_suspend,
  2115. .restore_noirq = vmbus_bus_resume
  2116. };
  2117. static struct acpi_driver vmbus_acpi_driver = {
  2118. .name = "vmbus",
  2119. .ids = vmbus_acpi_device_ids,
  2120. .ops = {
  2121. .add = vmbus_acpi_add,
  2122. .remove = vmbus_acpi_remove,
  2123. },
  2124. .drv.pm = &vmbus_bus_pm,
  2125. };
  2126. static void hv_kexec_handler(void)
  2127. {
  2128. hv_stimer_global_cleanup();
  2129. vmbus_initiate_unload(false);
  2130. /* Make sure conn_state is set as hv_synic_cleanup checks for it */
  2131. mb();
  2132. cpuhp_remove_state(hyperv_cpuhp_online);
  2133. };
  2134. static void hv_crash_handler(struct pt_regs *regs)
  2135. {
  2136. int cpu;
  2137. vmbus_initiate_unload(true);
  2138. /*
  2139. * In crash handler we can't schedule synic cleanup for all CPUs,
  2140. * doing the cleanup for current CPU only. This should be sufficient
  2141. * for kdump.
  2142. */
  2143. cpu = smp_processor_id();
  2144. hv_stimer_cleanup(cpu);
  2145. hv_synic_disable_regs(cpu);
  2146. };
  2147. static int hv_synic_suspend(void)
  2148. {
  2149. /*
  2150. * When we reach here, all the non-boot CPUs have been offlined.
  2151. * If we're in a legacy configuration where stimer Direct Mode is
  2152. * not enabled, the stimers on the non-boot CPUs have been unbound
  2153. * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
  2154. * hv_stimer_cleanup() -> clockevents_unbind_device().
  2155. *
  2156. * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
  2157. * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
  2158. * 1) it's unnecessary as interrupts remain disabled between
  2159. * syscore_suspend() and syscore_resume(): see create_image() and
  2160. * resume_target_kernel()
  2161. * 2) the stimer on CPU0 is automatically disabled later by
  2162. * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
  2163. * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
  2164. * 3) a warning would be triggered if we call
  2165. * clockevents_unbind_device(), which may sleep, in an
  2166. * interrupts-disabled context.
  2167. */
  2168. hv_synic_disable_regs(0);
  2169. return 0;
  2170. }
  2171. static void hv_synic_resume(void)
  2172. {
  2173. hv_synic_enable_regs(0);
  2174. /*
  2175. * Note: we don't need to call hv_stimer_init(0), because the timer
  2176. * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
  2177. * automatically re-enabled in timekeeping_resume().
  2178. */
  2179. }
  2180. /* The callbacks run only on CPU0, with irqs_disabled. */
  2181. static struct syscore_ops hv_synic_syscore_ops = {
  2182. .suspend = hv_synic_suspend,
  2183. .resume = hv_synic_resume,
  2184. };
  2185. static int __init hv_acpi_init(void)
  2186. {
  2187. int ret, t;
  2188. if (!hv_is_hyperv_initialized())
  2189. return -ENODEV;
  2190. init_completion(&probe_event);
  2191. /*
  2192. * Get ACPI resources first.
  2193. */
  2194. ret = acpi_bus_register_driver(&vmbus_acpi_driver);
  2195. if (ret)
  2196. return ret;
  2197. t = wait_for_completion_timeout(&probe_event, 5*HZ);
  2198. if (t == 0) {
  2199. ret = -ETIMEDOUT;
  2200. goto cleanup;
  2201. }
  2202. hv_debug_init();
  2203. ret = vmbus_bus_init();
  2204. if (ret)
  2205. goto cleanup;
  2206. hv_setup_kexec_handler(hv_kexec_handler);
  2207. hv_setup_crash_handler(hv_crash_handler);
  2208. register_syscore_ops(&hv_synic_syscore_ops);
  2209. return 0;
  2210. cleanup:
  2211. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  2212. hv_acpi_dev = NULL;
  2213. return ret;
  2214. }
  2215. static void __exit vmbus_exit(void)
  2216. {
  2217. int cpu;
  2218. unregister_syscore_ops(&hv_synic_syscore_ops);
  2219. hv_remove_kexec_handler();
  2220. hv_remove_crash_handler();
  2221. vmbus_connection.conn_state = DISCONNECTED;
  2222. hv_stimer_global_cleanup();
  2223. vmbus_disconnect();
  2224. hv_remove_vmbus_irq();
  2225. for_each_online_cpu(cpu) {
  2226. struct hv_per_cpu_context *hv_cpu
  2227. = per_cpu_ptr(hv_context.cpu_context, cpu);
  2228. tasklet_kill(&hv_cpu->msg_dpc);
  2229. }
  2230. hv_debug_rm_all_dir();
  2231. vmbus_free_channels();
  2232. kfree(vmbus_connection.channels);
  2233. if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
  2234. kmsg_dump_unregister(&hv_kmsg_dumper);
  2235. unregister_die_notifier(&hyperv_die_block);
  2236. }
  2237. /*
  2238. * The panic notifier is always registered, hence we should
  2239. * also unconditionally unregister it here as well.
  2240. */
  2241. atomic_notifier_chain_unregister(&panic_notifier_list,
  2242. &hyperv_panic_block);
  2243. free_page((unsigned long)hv_panic_page);
  2244. unregister_sysctl_table(hv_ctl_table_hdr);
  2245. hv_ctl_table_hdr = NULL;
  2246. bus_unregister(&hv_bus);
  2247. cpuhp_remove_state(hyperv_cpuhp_online);
  2248. hv_synic_free();
  2249. acpi_bus_unregister_driver(&vmbus_acpi_driver);
  2250. }
  2251. MODULE_LICENSE("GPL");
  2252. MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
  2253. subsys_initcall(hv_acpi_init);
  2254. module_exit(vmbus_exit);