vmw_balloon.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * VMware Balloon driver.
  4. *
  5. * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
  6. *
  7. * This is VMware physical memory management driver for Linux. The driver
  8. * acts like a "balloon" that can be inflated to reclaim physical pages by
  9. * reserving them in the guest and invalidating them in the monitor,
  10. * freeing up the underlying machine pages so they can be allocated to
  11. * other guests. The balloon can also be deflated to allow the guest to
  12. * use more physical memory. Higher level policies can control the sizes
  13. * of balloons in VMs in order to manage physical memory resources.
  14. */
  15. //#define DEBUG
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/types.h>
  18. #include <linux/io.h>
  19. #include <linux/kernel.h>
  20. #include <linux/mm.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/sched.h>
  23. #include <linux/module.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/seq_file.h>
  27. #include <linux/rwsem.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/mount.h>
  31. #include <linux/pseudo_fs.h>
  32. #include <linux/balloon_compaction.h>
  33. #include <linux/vmw_vmci_defs.h>
  34. #include <linux/vmw_vmci_api.h>
  35. #include <asm/hypervisor.h>
  36. MODULE_AUTHOR("VMware, Inc.");
  37. MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
  38. MODULE_ALIAS("dmi:*:svnVMware*:*");
  39. MODULE_ALIAS("vmware_vmmemctl");
  40. MODULE_LICENSE("GPL");
  41. static bool __read_mostly vmwballoon_shrinker_enable;
  42. module_param(vmwballoon_shrinker_enable, bool, 0444);
  43. MODULE_PARM_DESC(vmwballoon_shrinker_enable,
  44. "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
  45. /* Delay in seconds after shrink before inflation. */
  46. #define VMBALLOON_SHRINK_DELAY (5)
  47. /* Maximum number of refused pages we accumulate during inflation cycle */
  48. #define VMW_BALLOON_MAX_REFUSED 16
  49. /* Magic number for the balloon mount-point */
  50. #define BALLOON_VMW_MAGIC 0x0ba11007
  51. /*
  52. * Hypervisor communication port definitions.
  53. */
  54. #define VMW_BALLOON_HV_PORT 0x5670
  55. #define VMW_BALLOON_HV_MAGIC 0x456c6d6f
  56. #define VMW_BALLOON_GUEST_ID 1 /* Linux */
  57. enum vmwballoon_capabilities {
  58. /*
  59. * Bit 0 is reserved and not associated to any capability.
  60. */
  61. VMW_BALLOON_BASIC_CMDS = (1 << 1),
  62. VMW_BALLOON_BATCHED_CMDS = (1 << 2),
  63. VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
  64. VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
  65. VMW_BALLOON_64_BIT_TARGET = (1 << 5)
  66. };
  67. #define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
  68. | VMW_BALLOON_BATCHED_CMDS \
  69. | VMW_BALLOON_BATCHED_2M_CMDS \
  70. | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
  71. #define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
  72. /*
  73. * 64-bit targets are only supported in 64-bit
  74. */
  75. #ifdef CONFIG_64BIT
  76. #define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
  77. | VMW_BALLOON_64_BIT_TARGET)
  78. #else
  79. #define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
  80. #endif
  81. enum vmballoon_page_size_type {
  82. VMW_BALLOON_4K_PAGE,
  83. VMW_BALLOON_2M_PAGE,
  84. VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
  85. };
  86. #define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
  87. static const char * const vmballoon_page_size_names[] = {
  88. [VMW_BALLOON_4K_PAGE] = "4k",
  89. [VMW_BALLOON_2M_PAGE] = "2M"
  90. };
  91. enum vmballoon_op {
  92. VMW_BALLOON_INFLATE,
  93. VMW_BALLOON_DEFLATE
  94. };
  95. enum vmballoon_op_stat_type {
  96. VMW_BALLOON_OP_STAT,
  97. VMW_BALLOON_OP_FAIL_STAT
  98. };
  99. #define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
  100. /**
  101. * enum vmballoon_cmd_type - backdoor commands.
  102. *
  103. * Availability of the commands is as followed:
  104. *
  105. * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
  106. * %VMW_BALLOON_CMD_GUEST_ID are always available.
  107. *
  108. * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
  109. * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
  110. *
  111. * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
  112. * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
  113. * are available.
  114. *
  115. * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
  116. * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
  117. * are supported.
  118. *
  119. * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
  120. * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
  121. *
  122. * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
  123. * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
  124. * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
  125. * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
  126. * to be deflated from the balloon.
  127. * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
  128. * runs in the VM.
  129. * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
  130. * ballooned pages (up to 512).
  131. * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
  132. * pages that are about to be deflated from the
  133. * balloon (up to 512).
  134. * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
  135. * for 2MB pages.
  136. * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
  137. * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
  138. * pages.
  139. * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
  140. * that would be invoked when the balloon
  141. * size changes.
  142. * @VMW_BALLOON_CMD_LAST: Value of the last command.
  143. */
  144. enum vmballoon_cmd_type {
  145. VMW_BALLOON_CMD_START,
  146. VMW_BALLOON_CMD_GET_TARGET,
  147. VMW_BALLOON_CMD_LOCK,
  148. VMW_BALLOON_CMD_UNLOCK,
  149. VMW_BALLOON_CMD_GUEST_ID,
  150. /* No command 5 */
  151. VMW_BALLOON_CMD_BATCHED_LOCK = 6,
  152. VMW_BALLOON_CMD_BATCHED_UNLOCK,
  153. VMW_BALLOON_CMD_BATCHED_2M_LOCK,
  154. VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
  155. VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
  156. VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
  157. };
  158. #define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
  159. enum vmballoon_error_codes {
  160. VMW_BALLOON_SUCCESS,
  161. VMW_BALLOON_ERROR_CMD_INVALID,
  162. VMW_BALLOON_ERROR_PPN_INVALID,
  163. VMW_BALLOON_ERROR_PPN_LOCKED,
  164. VMW_BALLOON_ERROR_PPN_UNLOCKED,
  165. VMW_BALLOON_ERROR_PPN_PINNED,
  166. VMW_BALLOON_ERROR_PPN_NOTNEEDED,
  167. VMW_BALLOON_ERROR_RESET,
  168. VMW_BALLOON_ERROR_BUSY
  169. };
  170. #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
  171. #define VMW_BALLOON_CMD_WITH_TARGET_MASK \
  172. ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
  173. (1UL << VMW_BALLOON_CMD_LOCK) | \
  174. (1UL << VMW_BALLOON_CMD_UNLOCK) | \
  175. (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
  176. (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
  177. (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
  178. (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
  179. static const char * const vmballoon_cmd_names[] = {
  180. [VMW_BALLOON_CMD_START] = "start",
  181. [VMW_BALLOON_CMD_GET_TARGET] = "target",
  182. [VMW_BALLOON_CMD_LOCK] = "lock",
  183. [VMW_BALLOON_CMD_UNLOCK] = "unlock",
  184. [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
  185. [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
  186. [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
  187. [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
  188. [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
  189. [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
  190. };
  191. enum vmballoon_stat_page {
  192. VMW_BALLOON_PAGE_STAT_ALLOC,
  193. VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
  194. VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
  195. VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
  196. VMW_BALLOON_PAGE_STAT_FREE,
  197. VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
  198. };
  199. #define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
  200. enum vmballoon_stat_general {
  201. VMW_BALLOON_STAT_TIMER,
  202. VMW_BALLOON_STAT_DOORBELL,
  203. VMW_BALLOON_STAT_RESET,
  204. VMW_BALLOON_STAT_SHRINK,
  205. VMW_BALLOON_STAT_SHRINK_FREE,
  206. VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
  207. };
  208. #define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
  209. static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
  210. static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
  211. struct vmballoon_ctl {
  212. struct list_head pages;
  213. struct list_head refused_pages;
  214. struct list_head prealloc_pages;
  215. unsigned int n_refused_pages;
  216. unsigned int n_pages;
  217. enum vmballoon_page_size_type page_size;
  218. enum vmballoon_op op;
  219. };
  220. /**
  221. * struct vmballoon_batch_entry - a batch entry for lock or unlock.
  222. *
  223. * @status: the status of the operation, which is written by the hypervisor.
  224. * @reserved: reserved for future use. Must be set to zero.
  225. * @pfn: the physical frame number of the page to be locked or unlocked.
  226. */
  227. struct vmballoon_batch_entry {
  228. u64 status : 5;
  229. u64 reserved : PAGE_SHIFT - 5;
  230. u64 pfn : 52;
  231. } __packed;
  232. struct vmballoon {
  233. /**
  234. * @max_page_size: maximum supported page size for ballooning.
  235. *
  236. * Protected by @conf_sem
  237. */
  238. enum vmballoon_page_size_type max_page_size;
  239. /**
  240. * @size: balloon actual size in basic page size (frames).
  241. *
  242. * While we currently do not support size which is bigger than 32-bit,
  243. * in preparation for future support, use 64-bits.
  244. */
  245. atomic64_t size;
  246. /**
  247. * @target: balloon target size in basic page size (frames).
  248. *
  249. * We do not protect the target under the assumption that setting the
  250. * value is always done through a single write. If this assumption ever
  251. * breaks, we would have to use X_ONCE for accesses, and suffer the less
  252. * optimized code. Although we may read stale target value if multiple
  253. * accesses happen at once, the performance impact should be minor.
  254. */
  255. unsigned long target;
  256. /**
  257. * @reset_required: reset flag
  258. *
  259. * Setting this flag may introduce races, but the code is expected to
  260. * handle them gracefully. In the worst case, another operation will
  261. * fail as reset did not take place. Clearing the flag is done while
  262. * holding @conf_sem for write.
  263. */
  264. bool reset_required;
  265. /**
  266. * @capabilities: hypervisor balloon capabilities.
  267. *
  268. * Protected by @conf_sem.
  269. */
  270. unsigned long capabilities;
  271. /**
  272. * @batch_page: pointer to communication batch page.
  273. *
  274. * When batching is used, batch_page points to a page, which holds up to
  275. * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
  276. */
  277. struct vmballoon_batch_entry *batch_page;
  278. /**
  279. * @batch_max_pages: maximum pages that can be locked/unlocked.
  280. *
  281. * Indicates the number of pages that the hypervisor can lock or unlock
  282. * at once, according to whether batching is enabled. If batching is
  283. * disabled, only a single page can be locked/unlock on each operation.
  284. *
  285. * Protected by @conf_sem.
  286. */
  287. unsigned int batch_max_pages;
  288. /**
  289. * @page: page to be locked/unlocked by the hypervisor
  290. *
  291. * @page is only used when batching is disabled and a single page is
  292. * reclaimed on each iteration.
  293. *
  294. * Protected by @comm_lock.
  295. */
  296. struct page *page;
  297. /**
  298. * @shrink_timeout: timeout until the next inflation.
  299. *
  300. * After an shrink event, indicates the time in jiffies after which
  301. * inflation is allowed again. Can be written concurrently with reads,
  302. * so must use READ_ONCE/WRITE_ONCE when accessing.
  303. */
  304. unsigned long shrink_timeout;
  305. /* statistics */
  306. struct vmballoon_stats *stats;
  307. #ifdef CONFIG_DEBUG_FS
  308. /* debugfs file exporting statistics */
  309. struct dentry *dbg_entry;
  310. #endif
  311. /**
  312. * @b_dev_info: balloon device information descriptor.
  313. */
  314. struct balloon_dev_info b_dev_info;
  315. struct delayed_work dwork;
  316. /**
  317. * @huge_pages - list of the inflated 2MB pages.
  318. *
  319. * Protected by @b_dev_info.pages_lock .
  320. */
  321. struct list_head huge_pages;
  322. /**
  323. * @vmci_doorbell.
  324. *
  325. * Protected by @conf_sem.
  326. */
  327. struct vmci_handle vmci_doorbell;
  328. /**
  329. * @conf_sem: semaphore to protect the configuration and the statistics.
  330. */
  331. struct rw_semaphore conf_sem;
  332. /**
  333. * @comm_lock: lock to protect the communication with the host.
  334. *
  335. * Lock ordering: @conf_sem -> @comm_lock .
  336. */
  337. spinlock_t comm_lock;
  338. /**
  339. * @shrinker: shrinker interface that is used to avoid over-inflation.
  340. */
  341. struct shrinker shrinker;
  342. /**
  343. * @shrinker_registered: whether the shrinker was registered.
  344. *
  345. * The shrinker interface does not handle gracefully the removal of
  346. * shrinker that was not registered before. This indication allows to
  347. * simplify the unregistration process.
  348. */
  349. bool shrinker_registered;
  350. };
  351. static struct vmballoon balloon;
  352. struct vmballoon_stats {
  353. /* timer / doorbell operations */
  354. atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
  355. /* allocation statistics for huge and small pages */
  356. atomic64_t
  357. page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
  358. /* Monitor operations: total operations, and failures */
  359. atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
  360. };
  361. static inline bool is_vmballoon_stats_on(void)
  362. {
  363. return IS_ENABLED(CONFIG_DEBUG_FS) &&
  364. static_branch_unlikely(&balloon_stat_enabled);
  365. }
  366. static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
  367. enum vmballoon_op_stat_type type)
  368. {
  369. if (is_vmballoon_stats_on())
  370. atomic64_inc(&b->stats->ops[op][type]);
  371. }
  372. static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
  373. enum vmballoon_stat_general stat)
  374. {
  375. if (is_vmballoon_stats_on())
  376. atomic64_inc(&b->stats->general_stat[stat]);
  377. }
  378. static inline void vmballoon_stats_gen_add(struct vmballoon *b,
  379. enum vmballoon_stat_general stat,
  380. unsigned int val)
  381. {
  382. if (is_vmballoon_stats_on())
  383. atomic64_add(val, &b->stats->general_stat[stat]);
  384. }
  385. static inline void vmballoon_stats_page_inc(struct vmballoon *b,
  386. enum vmballoon_stat_page stat,
  387. enum vmballoon_page_size_type size)
  388. {
  389. if (is_vmballoon_stats_on())
  390. atomic64_inc(&b->stats->page_stat[stat][size]);
  391. }
  392. static inline void vmballoon_stats_page_add(struct vmballoon *b,
  393. enum vmballoon_stat_page stat,
  394. enum vmballoon_page_size_type size,
  395. unsigned int val)
  396. {
  397. if (is_vmballoon_stats_on())
  398. atomic64_add(val, &b->stats->page_stat[stat][size]);
  399. }
  400. static inline unsigned long
  401. __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
  402. unsigned long arg2, unsigned long *result)
  403. {
  404. unsigned long status, dummy1, dummy2, dummy3, local_result;
  405. vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
  406. asm volatile ("inl %%dx" :
  407. "=a"(status),
  408. "=c"(dummy1),
  409. "=d"(dummy2),
  410. "=b"(local_result),
  411. "=S"(dummy3) :
  412. "0"(VMW_BALLOON_HV_MAGIC),
  413. "1"(cmd),
  414. "2"(VMW_BALLOON_HV_PORT),
  415. "3"(arg1),
  416. "4"(arg2) :
  417. "memory");
  418. /* update the result if needed */
  419. if (result)
  420. *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
  421. local_result;
  422. /* update target when applicable */
  423. if (status == VMW_BALLOON_SUCCESS &&
  424. ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
  425. WRITE_ONCE(b->target, local_result);
  426. if (status != VMW_BALLOON_SUCCESS &&
  427. status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
  428. vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
  429. pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
  430. __func__, vmballoon_cmd_names[cmd], arg1, arg2,
  431. status);
  432. }
  433. /* mark reset required accordingly */
  434. if (status == VMW_BALLOON_ERROR_RESET)
  435. b->reset_required = true;
  436. return status;
  437. }
  438. static __always_inline unsigned long
  439. vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
  440. unsigned long arg2)
  441. {
  442. unsigned long dummy;
  443. return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
  444. }
  445. /*
  446. * Send "start" command to the host, communicating supported version
  447. * of the protocol.
  448. */
  449. static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
  450. {
  451. unsigned long status, capabilities;
  452. status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
  453. &capabilities);
  454. switch (status) {
  455. case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
  456. b->capabilities = capabilities;
  457. break;
  458. case VMW_BALLOON_SUCCESS:
  459. b->capabilities = VMW_BALLOON_BASIC_CMDS;
  460. break;
  461. default:
  462. return -EIO;
  463. }
  464. /*
  465. * 2MB pages are only supported with batching. If batching is for some
  466. * reason disabled, do not use 2MB pages, since otherwise the legacy
  467. * mechanism is used with 2MB pages, causing a failure.
  468. */
  469. b->max_page_size = VMW_BALLOON_4K_PAGE;
  470. if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
  471. (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
  472. b->max_page_size = VMW_BALLOON_2M_PAGE;
  473. return 0;
  474. }
  475. /**
  476. * vmballoon_send_guest_id - communicate guest type to the host.
  477. *
  478. * @b: pointer to the balloon.
  479. *
  480. * Communicate guest type to the host so that it can adjust ballooning
  481. * algorithm to the one most appropriate for the guest. This command
  482. * is normally issued after sending "start" command and is part of
  483. * standard reset sequence.
  484. *
  485. * Return: zero on success or appropriate error code.
  486. */
  487. static int vmballoon_send_guest_id(struct vmballoon *b)
  488. {
  489. unsigned long status;
  490. status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
  491. VMW_BALLOON_GUEST_ID, 0);
  492. return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
  493. }
  494. /**
  495. * vmballoon_page_order() - return the order of the page
  496. * @page_size: the size of the page.
  497. *
  498. * Return: the allocation order.
  499. */
  500. static inline
  501. unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
  502. {
  503. return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
  504. }
  505. /**
  506. * vmballoon_page_in_frames() - returns the number of frames in a page.
  507. * @page_size: the size of the page.
  508. *
  509. * Return: the number of 4k frames.
  510. */
  511. static inline unsigned int
  512. vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
  513. {
  514. return 1 << vmballoon_page_order(page_size);
  515. }
  516. /**
  517. * vmballoon_mark_page_offline() - mark a page as offline
  518. * @page: pointer for the page.
  519. * @page_size: the size of the page.
  520. */
  521. static void
  522. vmballoon_mark_page_offline(struct page *page,
  523. enum vmballoon_page_size_type page_size)
  524. {
  525. int i;
  526. for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
  527. __SetPageOffline(page + i);
  528. }
  529. /**
  530. * vmballoon_mark_page_online() - mark a page as online
  531. * @page: pointer for the page.
  532. * @page_size: the size of the page.
  533. */
  534. static void
  535. vmballoon_mark_page_online(struct page *page,
  536. enum vmballoon_page_size_type page_size)
  537. {
  538. int i;
  539. for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
  540. __ClearPageOffline(page + i);
  541. }
  542. /**
  543. * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
  544. *
  545. * @b: pointer to the balloon.
  546. *
  547. * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
  548. * by the host-guest protocol and EIO if an error occurred in communicating with
  549. * the host.
  550. */
  551. static int vmballoon_send_get_target(struct vmballoon *b)
  552. {
  553. unsigned long status;
  554. unsigned long limit;
  555. limit = totalram_pages();
  556. /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
  557. if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
  558. limit != (u32)limit)
  559. return -EINVAL;
  560. status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
  561. return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
  562. }
  563. /**
  564. * vmballoon_alloc_page_list - allocates a list of pages.
  565. *
  566. * @b: pointer to the balloon.
  567. * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
  568. * @req_n_pages: the number of requested pages.
  569. *
  570. * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
  571. * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
  572. *
  573. * Return: zero on success or error code otherwise.
  574. */
  575. static int vmballoon_alloc_page_list(struct vmballoon *b,
  576. struct vmballoon_ctl *ctl,
  577. unsigned int req_n_pages)
  578. {
  579. struct page *page;
  580. unsigned int i;
  581. for (i = 0; i < req_n_pages; i++) {
  582. /*
  583. * First check if we happen to have pages that were allocated
  584. * before. This happens when 2MB page rejected during inflation
  585. * by the hypervisor, and then split into 4KB pages.
  586. */
  587. if (!list_empty(&ctl->prealloc_pages)) {
  588. page = list_first_entry(&ctl->prealloc_pages,
  589. struct page, lru);
  590. list_del(&page->lru);
  591. } else {
  592. if (ctl->page_size == VMW_BALLOON_2M_PAGE)
  593. page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
  594. __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
  595. else
  596. page = balloon_page_alloc();
  597. vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
  598. ctl->page_size);
  599. }
  600. if (page) {
  601. /* Success. Add the page to the list and continue. */
  602. list_add(&page->lru, &ctl->pages);
  603. continue;
  604. }
  605. /* Allocation failed. Update statistics and stop. */
  606. vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
  607. ctl->page_size);
  608. break;
  609. }
  610. ctl->n_pages = i;
  611. return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
  612. }
  613. /**
  614. * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
  615. *
  616. * @b: pointer for %struct vmballoon.
  617. * @page: pointer for the page whose result should be handled.
  618. * @page_size: size of the page.
  619. * @status: status of the operation as provided by the hypervisor.
  620. */
  621. static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
  622. enum vmballoon_page_size_type page_size,
  623. unsigned long status)
  624. {
  625. /* On success do nothing. The page is already on the balloon list. */
  626. if (likely(status == VMW_BALLOON_SUCCESS))
  627. return 0;
  628. pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
  629. page_to_pfn(page), status,
  630. vmballoon_page_size_names[page_size]);
  631. /* Error occurred */
  632. vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
  633. page_size);
  634. return -EIO;
  635. }
  636. /**
  637. * vmballoon_status_page - returns the status of (un)lock operation
  638. *
  639. * @b: pointer to the balloon.
  640. * @idx: index for the page for which the operation is performed.
  641. * @p: pointer to where the page struct is returned.
  642. *
  643. * Following a lock or unlock operation, returns the status of the operation for
  644. * an individual page. Provides the page that the operation was performed on on
  645. * the @page argument.
  646. *
  647. * Returns: The status of a lock or unlock operation for an individual page.
  648. */
  649. static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
  650. struct page **p)
  651. {
  652. if (static_branch_likely(&vmw_balloon_batching)) {
  653. /* batching mode */
  654. *p = pfn_to_page(b->batch_page[idx].pfn);
  655. return b->batch_page[idx].status;
  656. }
  657. /* non-batching mode */
  658. *p = b->page;
  659. /*
  660. * If a failure occurs, the indication will be provided in the status
  661. * of the entire operation, which is considered before the individual
  662. * page status. So for non-batching mode, the indication is always of
  663. * success.
  664. */
  665. return VMW_BALLOON_SUCCESS;
  666. }
  667. /**
  668. * vmballoon_lock_op - notifies the host about inflated/deflated pages.
  669. * @b: pointer to the balloon.
  670. * @num_pages: number of inflated/deflated pages.
  671. * @page_size: size of the page.
  672. * @op: the type of operation (lock or unlock).
  673. *
  674. * Notify the host about page(s) that were ballooned (or removed from the
  675. * balloon) so that host can use it without fear that guest will need it (or
  676. * stop using them since the VM does). Host may reject some pages, we need to
  677. * check the return value and maybe submit a different page. The pages that are
  678. * inflated/deflated are pointed by @b->page.
  679. *
  680. * Return: result as provided by the hypervisor.
  681. */
  682. static unsigned long vmballoon_lock_op(struct vmballoon *b,
  683. unsigned int num_pages,
  684. enum vmballoon_page_size_type page_size,
  685. enum vmballoon_op op)
  686. {
  687. unsigned long cmd, pfn;
  688. lockdep_assert_held(&b->comm_lock);
  689. if (static_branch_likely(&vmw_balloon_batching)) {
  690. if (op == VMW_BALLOON_INFLATE)
  691. cmd = page_size == VMW_BALLOON_2M_PAGE ?
  692. VMW_BALLOON_CMD_BATCHED_2M_LOCK :
  693. VMW_BALLOON_CMD_BATCHED_LOCK;
  694. else
  695. cmd = page_size == VMW_BALLOON_2M_PAGE ?
  696. VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
  697. VMW_BALLOON_CMD_BATCHED_UNLOCK;
  698. pfn = PHYS_PFN(virt_to_phys(b->batch_page));
  699. } else {
  700. cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
  701. VMW_BALLOON_CMD_UNLOCK;
  702. pfn = page_to_pfn(b->page);
  703. /* In non-batching mode, PFNs must fit in 32-bit */
  704. if (unlikely(pfn != (u32)pfn))
  705. return VMW_BALLOON_ERROR_PPN_INVALID;
  706. }
  707. return vmballoon_cmd(b, cmd, pfn, num_pages);
  708. }
  709. /**
  710. * vmballoon_add_page - adds a page towards lock/unlock operation.
  711. *
  712. * @b: pointer to the balloon.
  713. * @idx: index of the page to be ballooned in this batch.
  714. * @p: pointer to the page that is about to be ballooned.
  715. *
  716. * Adds the page to be ballooned. Must be called while holding @comm_lock.
  717. */
  718. static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
  719. struct page *p)
  720. {
  721. lockdep_assert_held(&b->comm_lock);
  722. if (static_branch_likely(&vmw_balloon_batching))
  723. b->batch_page[idx] = (struct vmballoon_batch_entry)
  724. { .pfn = page_to_pfn(p) };
  725. else
  726. b->page = p;
  727. }
  728. /**
  729. * vmballoon_lock - lock or unlock a batch of pages.
  730. *
  731. * @b: pointer to the balloon.
  732. * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
  733. *
  734. * Notifies the host of about ballooned pages (after inflation or deflation,
  735. * according to @ctl). If the host rejects the page put it on the
  736. * @ctl refuse list. These refused page are then released when moving to the
  737. * next size of pages.
  738. *
  739. * Note that we neither free any @page here nor put them back on the ballooned
  740. * pages list. Instead we queue it for later processing. We do that for several
  741. * reasons. First, we do not want to free the page under the lock. Second, it
  742. * allows us to unify the handling of lock and unlock. In the inflate case, the
  743. * caller will check if there are too many refused pages and release them.
  744. * Although it is not identical to the past behavior, it should not affect
  745. * performance.
  746. */
  747. static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
  748. {
  749. unsigned long batch_status;
  750. struct page *page;
  751. unsigned int i, num_pages;
  752. num_pages = ctl->n_pages;
  753. if (num_pages == 0)
  754. return 0;
  755. /* communication with the host is done under the communication lock */
  756. spin_lock(&b->comm_lock);
  757. i = 0;
  758. list_for_each_entry(page, &ctl->pages, lru)
  759. vmballoon_add_page(b, i++, page);
  760. batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
  761. ctl->op);
  762. /*
  763. * Iterate over the pages in the provided list. Since we are changing
  764. * @ctl->n_pages we are saving the original value in @num_pages and
  765. * use this value to bound the loop.
  766. */
  767. for (i = 0; i < num_pages; i++) {
  768. unsigned long status;
  769. status = vmballoon_status_page(b, i, &page);
  770. /*
  771. * Failure of the whole batch overrides a single operation
  772. * results.
  773. */
  774. if (batch_status != VMW_BALLOON_SUCCESS)
  775. status = batch_status;
  776. /* Continue if no error happened */
  777. if (!vmballoon_handle_one_result(b, page, ctl->page_size,
  778. status))
  779. continue;
  780. /*
  781. * Error happened. Move the pages to the refused list and update
  782. * the pages number.
  783. */
  784. list_move(&page->lru, &ctl->refused_pages);
  785. ctl->n_pages--;
  786. ctl->n_refused_pages++;
  787. }
  788. spin_unlock(&b->comm_lock);
  789. return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
  790. }
  791. /**
  792. * vmballoon_release_page_list() - Releases a page list
  793. *
  794. * @page_list: list of pages to release.
  795. * @n_pages: pointer to the number of pages.
  796. * @page_size: whether the pages in the list are 2MB (or else 4KB).
  797. *
  798. * Releases the list of pages and zeros the number of pages.
  799. */
  800. static void vmballoon_release_page_list(struct list_head *page_list,
  801. int *n_pages,
  802. enum vmballoon_page_size_type page_size)
  803. {
  804. struct page *page, *tmp;
  805. list_for_each_entry_safe(page, tmp, page_list, lru) {
  806. list_del(&page->lru);
  807. __free_pages(page, vmballoon_page_order(page_size));
  808. }
  809. if (n_pages)
  810. *n_pages = 0;
  811. }
  812. /*
  813. * Release pages that were allocated while attempting to inflate the
  814. * balloon but were refused by the host for one reason or another.
  815. */
  816. static void vmballoon_release_refused_pages(struct vmballoon *b,
  817. struct vmballoon_ctl *ctl)
  818. {
  819. vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
  820. ctl->page_size);
  821. vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
  822. ctl->page_size);
  823. }
  824. /**
  825. * vmballoon_change - retrieve the required balloon change
  826. *
  827. * @b: pointer for the balloon.
  828. *
  829. * Return: the required change for the balloon size. A positive number
  830. * indicates inflation, a negative number indicates a deflation.
  831. */
  832. static int64_t vmballoon_change(struct vmballoon *b)
  833. {
  834. int64_t size, target;
  835. size = atomic64_read(&b->size);
  836. target = READ_ONCE(b->target);
  837. /*
  838. * We must cast first because of int sizes
  839. * Otherwise we might get huge positives instead of negatives
  840. */
  841. if (b->reset_required)
  842. return 0;
  843. /* consider a 2MB slack on deflate, unless the balloon is emptied */
  844. if (target < size && target != 0 &&
  845. size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
  846. return 0;
  847. /* If an out-of-memory recently occurred, inflation is disallowed. */
  848. if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
  849. return 0;
  850. return target - size;
  851. }
  852. /**
  853. * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
  854. *
  855. * @b: pointer to balloon.
  856. * @pages: list of pages to enqueue.
  857. * @n_pages: pointer to number of pages in list. The value is zeroed.
  858. * @page_size: whether the pages are 2MB or 4KB pages.
  859. *
  860. * Enqueues the provides list of pages in the ballooned page list, clears the
  861. * list and zeroes the number of pages that was provided.
  862. */
  863. static void vmballoon_enqueue_page_list(struct vmballoon *b,
  864. struct list_head *pages,
  865. unsigned int *n_pages,
  866. enum vmballoon_page_size_type page_size)
  867. {
  868. unsigned long flags;
  869. struct page *page;
  870. if (page_size == VMW_BALLOON_4K_PAGE) {
  871. balloon_page_list_enqueue(&b->b_dev_info, pages);
  872. } else {
  873. /*
  874. * Keep the huge pages in a local list which is not available
  875. * for the balloon compaction mechanism.
  876. */
  877. spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
  878. list_for_each_entry(page, pages, lru) {
  879. vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
  880. }
  881. list_splice_init(pages, &b->huge_pages);
  882. __count_vm_events(BALLOON_INFLATE, *n_pages *
  883. vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
  884. spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
  885. }
  886. *n_pages = 0;
  887. }
  888. /**
  889. * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
  890. *
  891. * @b: pointer to balloon.
  892. * @pages: list of pages to enqueue.
  893. * @n_pages: pointer to number of pages in list. The value is zeroed.
  894. * @page_size: whether the pages are 2MB or 4KB pages.
  895. * @n_req_pages: the number of requested pages.
  896. *
  897. * Dequeues the number of requested pages from the balloon for deflation. The
  898. * number of dequeued pages may be lower, if not enough pages in the requested
  899. * size are available.
  900. */
  901. static void vmballoon_dequeue_page_list(struct vmballoon *b,
  902. struct list_head *pages,
  903. unsigned int *n_pages,
  904. enum vmballoon_page_size_type page_size,
  905. unsigned int n_req_pages)
  906. {
  907. struct page *page, *tmp;
  908. unsigned int i = 0;
  909. unsigned long flags;
  910. /* In the case of 4k pages, use the compaction infrastructure */
  911. if (page_size == VMW_BALLOON_4K_PAGE) {
  912. *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
  913. n_req_pages);
  914. return;
  915. }
  916. /* 2MB pages */
  917. spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
  918. list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
  919. vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
  920. list_move(&page->lru, pages);
  921. if (++i == n_req_pages)
  922. break;
  923. }
  924. __count_vm_events(BALLOON_DEFLATE,
  925. i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
  926. spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
  927. *n_pages = i;
  928. }
  929. /**
  930. * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
  931. *
  932. * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
  933. * due to one or few 4KB pages. These 2MB pages may keep being allocated and
  934. * then being refused. To prevent this case, this function splits the refused
  935. * pages into 4KB pages and adds them into @prealloc_pages list.
  936. *
  937. * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
  938. */
  939. static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
  940. {
  941. struct page *page, *tmp;
  942. unsigned int i, order;
  943. order = vmballoon_page_order(ctl->page_size);
  944. list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
  945. list_del(&page->lru);
  946. split_page(page, order);
  947. for (i = 0; i < (1 << order); i++)
  948. list_add(&page[i].lru, &ctl->prealloc_pages);
  949. }
  950. ctl->n_refused_pages = 0;
  951. }
  952. /**
  953. * vmballoon_inflate() - Inflate the balloon towards its target size.
  954. *
  955. * @b: pointer to the balloon.
  956. */
  957. static void vmballoon_inflate(struct vmballoon *b)
  958. {
  959. int64_t to_inflate_frames;
  960. struct vmballoon_ctl ctl = {
  961. .pages = LIST_HEAD_INIT(ctl.pages),
  962. .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
  963. .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
  964. .page_size = b->max_page_size,
  965. .op = VMW_BALLOON_INFLATE
  966. };
  967. while ((to_inflate_frames = vmballoon_change(b)) > 0) {
  968. unsigned int to_inflate_pages, page_in_frames;
  969. int alloc_error, lock_error = 0;
  970. VM_BUG_ON(!list_empty(&ctl.pages));
  971. VM_BUG_ON(ctl.n_pages != 0);
  972. page_in_frames = vmballoon_page_in_frames(ctl.page_size);
  973. to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
  974. DIV_ROUND_UP_ULL(to_inflate_frames,
  975. page_in_frames));
  976. /* Start by allocating */
  977. alloc_error = vmballoon_alloc_page_list(b, &ctl,
  978. to_inflate_pages);
  979. /* Actually lock the pages by telling the hypervisor */
  980. lock_error = vmballoon_lock(b, &ctl);
  981. /*
  982. * If an error indicates that something serious went wrong,
  983. * stop the inflation.
  984. */
  985. if (lock_error)
  986. break;
  987. /* Update the balloon size */
  988. atomic64_add(ctl.n_pages * page_in_frames, &b->size);
  989. vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
  990. ctl.page_size);
  991. /*
  992. * If allocation failed or the number of refused pages exceeds
  993. * the maximum allowed, move to the next page size.
  994. */
  995. if (alloc_error ||
  996. ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
  997. if (ctl.page_size == VMW_BALLOON_4K_PAGE)
  998. break;
  999. /*
  1000. * Split the refused pages to 4k. This will also empty
  1001. * the refused pages list.
  1002. */
  1003. vmballoon_split_refused_pages(&ctl);
  1004. ctl.page_size--;
  1005. }
  1006. cond_resched();
  1007. }
  1008. /*
  1009. * Release pages that were allocated while attempting to inflate the
  1010. * balloon but were refused by the host for one reason or another,
  1011. * and update the statistics.
  1012. */
  1013. if (ctl.n_refused_pages != 0)
  1014. vmballoon_release_refused_pages(b, &ctl);
  1015. vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
  1016. }
  1017. /**
  1018. * vmballoon_deflate() - Decrease the size of the balloon.
  1019. *
  1020. * @b: pointer to the balloon
  1021. * @n_frames: the number of frames to deflate. If zero, automatically
  1022. * calculated according to the target size.
  1023. * @coordinated: whether to coordinate with the host
  1024. *
  1025. * Decrease the size of the balloon allowing guest to use more memory.
  1026. *
  1027. * Return: The number of deflated frames (i.e., basic page size units)
  1028. */
  1029. static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
  1030. bool coordinated)
  1031. {
  1032. unsigned long deflated_frames = 0;
  1033. unsigned long tried_frames = 0;
  1034. struct vmballoon_ctl ctl = {
  1035. .pages = LIST_HEAD_INIT(ctl.pages),
  1036. .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
  1037. .page_size = VMW_BALLOON_4K_PAGE,
  1038. .op = VMW_BALLOON_DEFLATE
  1039. };
  1040. /* free pages to reach target */
  1041. while (true) {
  1042. unsigned int to_deflate_pages, n_unlocked_frames;
  1043. unsigned int page_in_frames;
  1044. int64_t to_deflate_frames;
  1045. bool deflated_all;
  1046. page_in_frames = vmballoon_page_in_frames(ctl.page_size);
  1047. VM_BUG_ON(!list_empty(&ctl.pages));
  1048. VM_BUG_ON(ctl.n_pages);
  1049. VM_BUG_ON(!list_empty(&ctl.refused_pages));
  1050. VM_BUG_ON(ctl.n_refused_pages);
  1051. /*
  1052. * If we were requested a specific number of frames, we try to
  1053. * deflate this number of frames. Otherwise, deflation is
  1054. * performed according to the target and balloon size.
  1055. */
  1056. to_deflate_frames = n_frames ? n_frames - tried_frames :
  1057. -vmballoon_change(b);
  1058. /* break if no work to do */
  1059. if (to_deflate_frames <= 0)
  1060. break;
  1061. /*
  1062. * Calculate the number of frames based on current page size,
  1063. * but limit the deflated frames to a single chunk
  1064. */
  1065. to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
  1066. DIV_ROUND_UP_ULL(to_deflate_frames,
  1067. page_in_frames));
  1068. /* First take the pages from the balloon pages. */
  1069. vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
  1070. ctl.page_size, to_deflate_pages);
  1071. /*
  1072. * Before pages are moving to the refused list, count their
  1073. * frames as frames that we tried to deflate.
  1074. */
  1075. tried_frames += ctl.n_pages * page_in_frames;
  1076. /*
  1077. * Unlock the pages by communicating with the hypervisor if the
  1078. * communication is coordinated (i.e., not pop). We ignore the
  1079. * return code. Instead we check if all the pages we manage to
  1080. * unlock all the pages. If we failed, we will move to the next
  1081. * page size, and would eventually try again later.
  1082. */
  1083. if (coordinated)
  1084. vmballoon_lock(b, &ctl);
  1085. /*
  1086. * Check if we deflated enough. We will move to the next page
  1087. * size if we did not manage to do so. This calculation takes
  1088. * place now, as once the pages are released, the number of
  1089. * pages is zeroed.
  1090. */
  1091. deflated_all = (ctl.n_pages == to_deflate_pages);
  1092. /* Update local and global counters */
  1093. n_unlocked_frames = ctl.n_pages * page_in_frames;
  1094. atomic64_sub(n_unlocked_frames, &b->size);
  1095. deflated_frames += n_unlocked_frames;
  1096. vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
  1097. ctl.page_size, ctl.n_pages);
  1098. /* free the ballooned pages */
  1099. vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
  1100. ctl.page_size);
  1101. /* Return the refused pages to the ballooned list. */
  1102. vmballoon_enqueue_page_list(b, &ctl.refused_pages,
  1103. &ctl.n_refused_pages,
  1104. ctl.page_size);
  1105. /* If we failed to unlock all the pages, move to next size. */
  1106. if (!deflated_all) {
  1107. if (ctl.page_size == b->max_page_size)
  1108. break;
  1109. ctl.page_size++;
  1110. }
  1111. cond_resched();
  1112. }
  1113. return deflated_frames;
  1114. }
  1115. /**
  1116. * vmballoon_deinit_batching - disables batching mode.
  1117. *
  1118. * @b: pointer to &struct vmballoon.
  1119. *
  1120. * Disables batching, by deallocating the page for communication with the
  1121. * hypervisor and disabling the static key to indicate that batching is off.
  1122. */
  1123. static void vmballoon_deinit_batching(struct vmballoon *b)
  1124. {
  1125. free_page((unsigned long)b->batch_page);
  1126. b->batch_page = NULL;
  1127. static_branch_disable(&vmw_balloon_batching);
  1128. b->batch_max_pages = 1;
  1129. }
  1130. /**
  1131. * vmballoon_init_batching - enable batching mode.
  1132. *
  1133. * @b: pointer to &struct vmballoon.
  1134. *
  1135. * Enables batching, by allocating a page for communication with the hypervisor
  1136. * and enabling the static_key to use batching.
  1137. *
  1138. * Return: zero on success or an appropriate error-code.
  1139. */
  1140. static int vmballoon_init_batching(struct vmballoon *b)
  1141. {
  1142. struct page *page;
  1143. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  1144. if (!page)
  1145. return -ENOMEM;
  1146. b->batch_page = page_address(page);
  1147. b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
  1148. static_branch_enable(&vmw_balloon_batching);
  1149. return 0;
  1150. }
  1151. /*
  1152. * Receive notification and resize balloon
  1153. */
  1154. static void vmballoon_doorbell(void *client_data)
  1155. {
  1156. struct vmballoon *b = client_data;
  1157. vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
  1158. mod_delayed_work(system_freezable_wq, &b->dwork, 0);
  1159. }
  1160. /*
  1161. * Clean up vmci doorbell
  1162. */
  1163. static void vmballoon_vmci_cleanup(struct vmballoon *b)
  1164. {
  1165. vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
  1166. VMCI_INVALID_ID, VMCI_INVALID_ID);
  1167. if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
  1168. vmci_doorbell_destroy(b->vmci_doorbell);
  1169. b->vmci_doorbell = VMCI_INVALID_HANDLE;
  1170. }
  1171. }
  1172. /**
  1173. * vmballoon_vmci_init - Initialize vmci doorbell.
  1174. *
  1175. * @b: pointer to the balloon.
  1176. *
  1177. * Return: zero on success or when wakeup command not supported. Error-code
  1178. * otherwise.
  1179. *
  1180. * Initialize vmci doorbell, to get notified as soon as balloon changes.
  1181. */
  1182. static int vmballoon_vmci_init(struct vmballoon *b)
  1183. {
  1184. unsigned long error;
  1185. if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
  1186. return 0;
  1187. error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
  1188. VMCI_PRIVILEGE_FLAG_RESTRICTED,
  1189. vmballoon_doorbell, b);
  1190. if (error != VMCI_SUCCESS)
  1191. goto fail;
  1192. error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
  1193. b->vmci_doorbell.context,
  1194. b->vmci_doorbell.resource, NULL);
  1195. if (error != VMW_BALLOON_SUCCESS)
  1196. goto fail;
  1197. return 0;
  1198. fail:
  1199. vmballoon_vmci_cleanup(b);
  1200. return -EIO;
  1201. }
  1202. /**
  1203. * vmballoon_pop - Quickly release all pages allocate for the balloon.
  1204. *
  1205. * @b: pointer to the balloon.
  1206. *
  1207. * This function is called when host decides to "reset" balloon for one reason
  1208. * or another. Unlike normal "deflate" we do not (shall not) notify host of the
  1209. * pages being released.
  1210. */
  1211. static void vmballoon_pop(struct vmballoon *b)
  1212. {
  1213. unsigned long size;
  1214. while ((size = atomic64_read(&b->size)))
  1215. vmballoon_deflate(b, size, false);
  1216. }
  1217. /*
  1218. * Perform standard reset sequence by popping the balloon (in case it
  1219. * is not empty) and then restarting protocol. This operation normally
  1220. * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
  1221. */
  1222. static void vmballoon_reset(struct vmballoon *b)
  1223. {
  1224. int error;
  1225. down_write(&b->conf_sem);
  1226. vmballoon_vmci_cleanup(b);
  1227. /* free all pages, skipping monitor unlock */
  1228. vmballoon_pop(b);
  1229. if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
  1230. goto unlock;
  1231. if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
  1232. if (vmballoon_init_batching(b)) {
  1233. /*
  1234. * We failed to initialize batching, inform the monitor
  1235. * about it by sending a null capability.
  1236. *
  1237. * The guest will retry in one second.
  1238. */
  1239. vmballoon_send_start(b, 0);
  1240. goto unlock;
  1241. }
  1242. } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
  1243. vmballoon_deinit_batching(b);
  1244. }
  1245. vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
  1246. b->reset_required = false;
  1247. error = vmballoon_vmci_init(b);
  1248. if (error)
  1249. pr_err("failed to initialize vmci doorbell\n");
  1250. if (vmballoon_send_guest_id(b))
  1251. pr_err("failed to send guest ID to the host\n");
  1252. unlock:
  1253. up_write(&b->conf_sem);
  1254. }
  1255. /**
  1256. * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
  1257. *
  1258. * @work: pointer to the &work_struct which is provided by the workqueue.
  1259. *
  1260. * Resets the protocol if needed, gets the new size and adjusts balloon as
  1261. * needed. Repeat in 1 sec.
  1262. */
  1263. static void vmballoon_work(struct work_struct *work)
  1264. {
  1265. struct delayed_work *dwork = to_delayed_work(work);
  1266. struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
  1267. int64_t change = 0;
  1268. if (b->reset_required)
  1269. vmballoon_reset(b);
  1270. down_read(&b->conf_sem);
  1271. /*
  1272. * Update the stats while holding the semaphore to ensure that
  1273. * @stats_enabled is consistent with whether the stats are actually
  1274. * enabled
  1275. */
  1276. vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
  1277. if (!vmballoon_send_get_target(b))
  1278. change = vmballoon_change(b);
  1279. if (change != 0) {
  1280. pr_debug("%s - size: %llu, target %lu\n", __func__,
  1281. atomic64_read(&b->size), READ_ONCE(b->target));
  1282. if (change > 0)
  1283. vmballoon_inflate(b);
  1284. else /* (change < 0) */
  1285. vmballoon_deflate(b, 0, true);
  1286. }
  1287. up_read(&b->conf_sem);
  1288. /*
  1289. * We are using a freezable workqueue so that balloon operations are
  1290. * stopped while the system transitions to/from sleep/hibernation.
  1291. */
  1292. queue_delayed_work(system_freezable_wq,
  1293. dwork, round_jiffies_relative(HZ));
  1294. }
  1295. /**
  1296. * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
  1297. * @shrinker: pointer to the balloon shrinker.
  1298. * @sc: page reclaim information.
  1299. *
  1300. * Returns: number of pages that were freed during deflation.
  1301. */
  1302. static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
  1303. struct shrink_control *sc)
  1304. {
  1305. struct vmballoon *b = &balloon;
  1306. unsigned long deflated_frames;
  1307. pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
  1308. vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
  1309. /*
  1310. * If the lock is also contended for read, we cannot easily reclaim and
  1311. * we bail out.
  1312. */
  1313. if (!down_read_trylock(&b->conf_sem))
  1314. return 0;
  1315. deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
  1316. vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
  1317. deflated_frames);
  1318. /*
  1319. * Delay future inflation for some time to mitigate the situations in
  1320. * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
  1321. * the access is asynchronous.
  1322. */
  1323. WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
  1324. up_read(&b->conf_sem);
  1325. return deflated_frames;
  1326. }
  1327. /**
  1328. * vmballoon_shrinker_count() - return the number of ballooned pages.
  1329. * @shrinker: pointer to the balloon shrinker.
  1330. * @sc: page reclaim information.
  1331. *
  1332. * Returns: number of 4k pages that are allocated for the balloon and can
  1333. * therefore be reclaimed under pressure.
  1334. */
  1335. static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
  1336. struct shrink_control *sc)
  1337. {
  1338. struct vmballoon *b = &balloon;
  1339. return atomic64_read(&b->size);
  1340. }
  1341. static void vmballoon_unregister_shrinker(struct vmballoon *b)
  1342. {
  1343. if (b->shrinker_registered)
  1344. unregister_shrinker(&b->shrinker);
  1345. b->shrinker_registered = false;
  1346. }
  1347. static int vmballoon_register_shrinker(struct vmballoon *b)
  1348. {
  1349. int r;
  1350. /* Do nothing if the shrinker is not enabled */
  1351. if (!vmwballoon_shrinker_enable)
  1352. return 0;
  1353. b->shrinker.scan_objects = vmballoon_shrinker_scan;
  1354. b->shrinker.count_objects = vmballoon_shrinker_count;
  1355. b->shrinker.seeks = DEFAULT_SEEKS;
  1356. r = register_shrinker(&b->shrinker);
  1357. if (r == 0)
  1358. b->shrinker_registered = true;
  1359. return r;
  1360. }
  1361. /*
  1362. * DEBUGFS Interface
  1363. */
  1364. #ifdef CONFIG_DEBUG_FS
  1365. static const char * const vmballoon_stat_page_names[] = {
  1366. [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
  1367. [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
  1368. [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
  1369. [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
  1370. [VMW_BALLOON_PAGE_STAT_FREE] = "free"
  1371. };
  1372. static const char * const vmballoon_stat_names[] = {
  1373. [VMW_BALLOON_STAT_TIMER] = "timer",
  1374. [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
  1375. [VMW_BALLOON_STAT_RESET] = "reset",
  1376. [VMW_BALLOON_STAT_SHRINK] = "shrink",
  1377. [VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree"
  1378. };
  1379. static int vmballoon_enable_stats(struct vmballoon *b)
  1380. {
  1381. int r = 0;
  1382. down_write(&b->conf_sem);
  1383. /* did we somehow race with another reader which enabled stats? */
  1384. if (b->stats)
  1385. goto out;
  1386. b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
  1387. if (!b->stats) {
  1388. /* allocation failed */
  1389. r = -ENOMEM;
  1390. goto out;
  1391. }
  1392. static_key_enable(&balloon_stat_enabled.key);
  1393. out:
  1394. up_write(&b->conf_sem);
  1395. return r;
  1396. }
  1397. /**
  1398. * vmballoon_debug_show - shows statistics of balloon operations.
  1399. * @f: pointer to the &struct seq_file.
  1400. * @offset: ignored.
  1401. *
  1402. * Provides the statistics that can be accessed in vmmemctl in the debugfs.
  1403. * To avoid the overhead - mainly that of memory - of collecting the statistics,
  1404. * we only collect statistics after the first time the counters are read.
  1405. *
  1406. * Return: zero on success or an error code.
  1407. */
  1408. static int vmballoon_debug_show(struct seq_file *f, void *offset)
  1409. {
  1410. struct vmballoon *b = f->private;
  1411. int i, j;
  1412. /* enables stats if they are disabled */
  1413. if (!b->stats) {
  1414. int r = vmballoon_enable_stats(b);
  1415. if (r)
  1416. return r;
  1417. }
  1418. /* format capabilities info */
  1419. seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
  1420. VMW_BALLOON_CAPABILITIES);
  1421. seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
  1422. seq_printf(f, "%-22s: %16s\n", "is resetting",
  1423. b->reset_required ? "y" : "n");
  1424. /* format size info */
  1425. seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
  1426. seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
  1427. for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
  1428. if (vmballoon_cmd_names[i] == NULL)
  1429. continue;
  1430. seq_printf(f, "%-22s: %16llu (%llu failed)\n",
  1431. vmballoon_cmd_names[i],
  1432. atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
  1433. atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
  1434. }
  1435. for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
  1436. seq_printf(f, "%-22s: %16llu\n",
  1437. vmballoon_stat_names[i],
  1438. atomic64_read(&b->stats->general_stat[i]));
  1439. for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
  1440. for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
  1441. seq_printf(f, "%-18s(%s): %16llu\n",
  1442. vmballoon_stat_page_names[i],
  1443. vmballoon_page_size_names[j],
  1444. atomic64_read(&b->stats->page_stat[i][j]));
  1445. }
  1446. return 0;
  1447. }
  1448. DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
  1449. static void __init vmballoon_debugfs_init(struct vmballoon *b)
  1450. {
  1451. b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
  1452. &vmballoon_debug_fops);
  1453. }
  1454. static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
  1455. {
  1456. static_key_disable(&balloon_stat_enabled.key);
  1457. debugfs_remove(b->dbg_entry);
  1458. kfree(b->stats);
  1459. b->stats = NULL;
  1460. }
  1461. #else
  1462. static inline void vmballoon_debugfs_init(struct vmballoon *b)
  1463. {
  1464. }
  1465. static inline void vmballoon_debugfs_exit(struct vmballoon *b)
  1466. {
  1467. }
  1468. #endif /* CONFIG_DEBUG_FS */
  1469. #ifdef CONFIG_BALLOON_COMPACTION
  1470. static int vmballoon_init_fs_context(struct fs_context *fc)
  1471. {
  1472. return init_pseudo(fc, BALLOON_VMW_MAGIC) ? 0 : -ENOMEM;
  1473. }
  1474. static struct file_system_type vmballoon_fs = {
  1475. .name = "balloon-vmware",
  1476. .init_fs_context = vmballoon_init_fs_context,
  1477. .kill_sb = kill_anon_super,
  1478. };
  1479. static struct vfsmount *vmballoon_mnt;
  1480. /**
  1481. * vmballoon_migratepage() - migrates a balloon page.
  1482. * @b_dev_info: balloon device information descriptor.
  1483. * @newpage: the page to which @page should be migrated.
  1484. * @page: a ballooned page that should be migrated.
  1485. * @mode: migration mode, ignored.
  1486. *
  1487. * This function is really open-coded, but that is according to the interface
  1488. * that balloon_compaction provides.
  1489. *
  1490. * Return: zero on success, -EAGAIN when migration cannot be performed
  1491. * momentarily, and -EBUSY if migration failed and should be retried
  1492. * with that specific page.
  1493. */
  1494. static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
  1495. struct page *newpage, struct page *page,
  1496. enum migrate_mode mode)
  1497. {
  1498. unsigned long status, flags;
  1499. struct vmballoon *b;
  1500. int ret;
  1501. b = container_of(b_dev_info, struct vmballoon, b_dev_info);
  1502. /*
  1503. * If the semaphore is taken, there is ongoing configuration change
  1504. * (i.e., balloon reset), so try again.
  1505. */
  1506. if (!down_read_trylock(&b->conf_sem))
  1507. return -EAGAIN;
  1508. spin_lock(&b->comm_lock);
  1509. /*
  1510. * We must start by deflating and not inflating, as otherwise the
  1511. * hypervisor may tell us that it has enough memory and the new page is
  1512. * not needed. Since the old page is isolated, we cannot use the list
  1513. * interface to unlock it, as the LRU field is used for isolation.
  1514. * Instead, we use the native interface directly.
  1515. */
  1516. vmballoon_add_page(b, 0, page);
  1517. status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
  1518. VMW_BALLOON_DEFLATE);
  1519. if (status == VMW_BALLOON_SUCCESS)
  1520. status = vmballoon_status_page(b, 0, &page);
  1521. /*
  1522. * If a failure happened, let the migration mechanism know that it
  1523. * should not retry.
  1524. */
  1525. if (status != VMW_BALLOON_SUCCESS) {
  1526. spin_unlock(&b->comm_lock);
  1527. ret = -EBUSY;
  1528. goto out_unlock;
  1529. }
  1530. /*
  1531. * The page is isolated, so it is safe to delete it without holding
  1532. * @pages_lock . We keep holding @comm_lock since we will need it in a
  1533. * second.
  1534. */
  1535. balloon_page_delete(page);
  1536. put_page(page);
  1537. /* Inflate */
  1538. vmballoon_add_page(b, 0, newpage);
  1539. status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
  1540. VMW_BALLOON_INFLATE);
  1541. if (status == VMW_BALLOON_SUCCESS)
  1542. status = vmballoon_status_page(b, 0, &newpage);
  1543. spin_unlock(&b->comm_lock);
  1544. if (status != VMW_BALLOON_SUCCESS) {
  1545. /*
  1546. * A failure happened. While we can deflate the page we just
  1547. * inflated, this deflation can also encounter an error. Instead
  1548. * we will decrease the size of the balloon to reflect the
  1549. * change and report failure.
  1550. */
  1551. atomic64_dec(&b->size);
  1552. ret = -EBUSY;
  1553. } else {
  1554. /*
  1555. * Success. Take a reference for the page, and we will add it to
  1556. * the list after acquiring the lock.
  1557. */
  1558. get_page(newpage);
  1559. ret = MIGRATEPAGE_SUCCESS;
  1560. }
  1561. /* Update the balloon list under the @pages_lock */
  1562. spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
  1563. /*
  1564. * On inflation success, we already took a reference for the @newpage.
  1565. * If we succeed just insert it to the list and update the statistics
  1566. * under the lock.
  1567. */
  1568. if (ret == MIGRATEPAGE_SUCCESS) {
  1569. balloon_page_insert(&b->b_dev_info, newpage);
  1570. __count_vm_event(BALLOON_MIGRATE);
  1571. }
  1572. /*
  1573. * We deflated successfully, so regardless to the inflation success, we
  1574. * need to reduce the number of isolated_pages.
  1575. */
  1576. b->b_dev_info.isolated_pages--;
  1577. spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
  1578. out_unlock:
  1579. up_read(&b->conf_sem);
  1580. return ret;
  1581. }
  1582. /**
  1583. * vmballoon_compaction_deinit() - removes compaction related data.
  1584. *
  1585. * @b: pointer to the balloon.
  1586. */
  1587. static void vmballoon_compaction_deinit(struct vmballoon *b)
  1588. {
  1589. if (!IS_ERR(b->b_dev_info.inode))
  1590. iput(b->b_dev_info.inode);
  1591. b->b_dev_info.inode = NULL;
  1592. kern_unmount(vmballoon_mnt);
  1593. vmballoon_mnt = NULL;
  1594. }
  1595. /**
  1596. * vmballoon_compaction_init() - initialized compaction for the balloon.
  1597. *
  1598. * @b: pointer to the balloon.
  1599. *
  1600. * If during the initialization a failure occurred, this function does not
  1601. * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
  1602. * case.
  1603. *
  1604. * Return: zero on success or error code on failure.
  1605. */
  1606. static __init int vmballoon_compaction_init(struct vmballoon *b)
  1607. {
  1608. vmballoon_mnt = kern_mount(&vmballoon_fs);
  1609. if (IS_ERR(vmballoon_mnt))
  1610. return PTR_ERR(vmballoon_mnt);
  1611. b->b_dev_info.migratepage = vmballoon_migratepage;
  1612. b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
  1613. if (IS_ERR(b->b_dev_info.inode))
  1614. return PTR_ERR(b->b_dev_info.inode);
  1615. b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
  1616. return 0;
  1617. }
  1618. #else /* CONFIG_BALLOON_COMPACTION */
  1619. static void vmballoon_compaction_deinit(struct vmballoon *b)
  1620. {
  1621. }
  1622. static int vmballoon_compaction_init(struct vmballoon *b)
  1623. {
  1624. return 0;
  1625. }
  1626. #endif /* CONFIG_BALLOON_COMPACTION */
  1627. static int __init vmballoon_init(void)
  1628. {
  1629. int error;
  1630. /*
  1631. * Check if we are running on VMware's hypervisor and bail out
  1632. * if we are not.
  1633. */
  1634. if (x86_hyper_type != X86_HYPER_VMWARE)
  1635. return -ENODEV;
  1636. INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
  1637. error = vmballoon_register_shrinker(&balloon);
  1638. if (error)
  1639. goto fail;
  1640. /*
  1641. * Initialization of compaction must be done after the call to
  1642. * balloon_devinfo_init() .
  1643. */
  1644. balloon_devinfo_init(&balloon.b_dev_info);
  1645. error = vmballoon_compaction_init(&balloon);
  1646. if (error)
  1647. goto fail;
  1648. INIT_LIST_HEAD(&balloon.huge_pages);
  1649. spin_lock_init(&balloon.comm_lock);
  1650. init_rwsem(&balloon.conf_sem);
  1651. balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
  1652. balloon.batch_page = NULL;
  1653. balloon.page = NULL;
  1654. balloon.reset_required = true;
  1655. queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
  1656. vmballoon_debugfs_init(&balloon);
  1657. return 0;
  1658. fail:
  1659. vmballoon_unregister_shrinker(&balloon);
  1660. vmballoon_compaction_deinit(&balloon);
  1661. return error;
  1662. }
  1663. /*
  1664. * Using late_initcall() instead of module_init() allows the balloon to use the
  1665. * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
  1666. * VMCI is probed only after the balloon is initialized. If the balloon is used
  1667. * as a module, late_initcall() is equivalent to module_init().
  1668. */
  1669. late_initcall(vmballoon_init);
  1670. static void __exit vmballoon_exit(void)
  1671. {
  1672. vmballoon_unregister_shrinker(&balloon);
  1673. vmballoon_vmci_cleanup(&balloon);
  1674. cancel_delayed_work_sync(&balloon.dwork);
  1675. vmballoon_debugfs_exit(&balloon);
  1676. /*
  1677. * Deallocate all reserved memory, and reset connection with monitor.
  1678. * Reset connection before deallocating memory to avoid potential for
  1679. * additional spurious resets from guest touching deallocated pages.
  1680. */
  1681. vmballoon_send_start(&balloon, 0);
  1682. vmballoon_pop(&balloon);
  1683. /* Only once we popped the balloon, compaction can be deinit */
  1684. vmballoon_compaction_deinit(&balloon);
  1685. }
  1686. module_exit(vmballoon_exit);