z3fold.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * z3fold.c
  4. *
  5. * Author: Vitaly Wool <vitaly.wool@konsulko.com>
  6. * Copyright (C) 2016, Sony Mobile Communications Inc.
  7. *
  8. * This implementation is based on zbud written by Seth Jennings.
  9. *
  10. * z3fold is an special purpose allocator for storing compressed pages. It
  11. * can store up to three compressed pages per page which improves the
  12. * compression ratio of zbud while retaining its main concepts (e. g. always
  13. * storing an integral number of objects per page) and simplicity.
  14. * It still has simple and deterministic reclaim properties that make it
  15. * preferable to a higher density approach (with no requirement on integral
  16. * number of object per page) when reclaim is used.
  17. *
  18. * As in zbud, pages are divided into "chunks". The size of the chunks is
  19. * fixed at compile time and is determined by NCHUNKS_ORDER below.
  20. *
  21. * z3fold doesn't export any API and is meant to be used via zpool API.
  22. */
  23. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  24. #include <linux/atomic.h>
  25. #include <linux/sched.h>
  26. #include <linux/cpumask.h>
  27. #include <linux/list.h>
  28. #include <linux/mm.h>
  29. #include <linux/module.h>
  30. #include <linux/page-flags.h>
  31. #include <linux/migrate.h>
  32. #include <linux/node.h>
  33. #include <linux/compaction.h>
  34. #include <linux/percpu.h>
  35. #include <linux/mount.h>
  36. #include <linux/pseudo_fs.h>
  37. #include <linux/fs.h>
  38. #include <linux/preempt.h>
  39. #include <linux/workqueue.h>
  40. #include <linux/slab.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/zpool.h>
  43. #include <linux/magic.h>
  44. #include <linux/kmemleak.h>
  45. /*
  46. * NCHUNKS_ORDER determines the internal allocation granularity, effectively
  47. * adjusting internal fragmentation. It also determines the number of
  48. * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
  49. * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
  50. * in the beginning of an allocated page are occupied by z3fold header, so
  51. * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
  52. * which shows the max number of free chunks in z3fold page, also there will
  53. * be 63, or 62, respectively, freelists per pool.
  54. */
  55. #define NCHUNKS_ORDER 6
  56. #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
  57. #define CHUNK_SIZE (1 << CHUNK_SHIFT)
  58. #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
  59. #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
  60. #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
  61. #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
  62. #define BUDDY_MASK (0x3)
  63. #define BUDDY_SHIFT 2
  64. #define SLOTS_ALIGN (0x40)
  65. /*****************
  66. * Structures
  67. *****************/
  68. struct z3fold_pool;
  69. struct z3fold_ops {
  70. int (*evict)(struct z3fold_pool *pool, unsigned long handle);
  71. };
  72. enum buddy {
  73. HEADLESS = 0,
  74. FIRST,
  75. MIDDLE,
  76. LAST,
  77. BUDDIES_MAX = LAST
  78. };
  79. struct z3fold_buddy_slots {
  80. /*
  81. * we are using BUDDY_MASK in handle_to_buddy etc. so there should
  82. * be enough slots to hold all possible variants
  83. */
  84. unsigned long slot[BUDDY_MASK + 1];
  85. unsigned long pool; /* back link */
  86. rwlock_t lock;
  87. };
  88. #define HANDLE_FLAG_MASK (0x03)
  89. /*
  90. * struct z3fold_header - z3fold page metadata occupying first chunks of each
  91. * z3fold page, except for HEADLESS pages
  92. * @buddy: links the z3fold page into the relevant list in the
  93. * pool
  94. * @page_lock: per-page lock
  95. * @refcount: reference count for the z3fold page
  96. * @work: work_struct for page layout optimization
  97. * @slots: pointer to the structure holding buddy slots
  98. * @pool: pointer to the containing pool
  99. * @cpu: CPU which this page "belongs" to
  100. * @first_chunks: the size of the first buddy in chunks, 0 if free
  101. * @middle_chunks: the size of the middle buddy in chunks, 0 if free
  102. * @last_chunks: the size of the last buddy in chunks, 0 if free
  103. * @first_num: the starting number (for the first handle)
  104. * @mapped_count: the number of objects currently mapped
  105. */
  106. struct z3fold_header {
  107. struct list_head buddy;
  108. spinlock_t page_lock;
  109. struct kref refcount;
  110. struct work_struct work;
  111. struct z3fold_buddy_slots *slots;
  112. struct z3fold_pool *pool;
  113. short cpu;
  114. unsigned short first_chunks;
  115. unsigned short middle_chunks;
  116. unsigned short last_chunks;
  117. unsigned short start_middle;
  118. unsigned short first_num:2;
  119. unsigned short mapped_count:2;
  120. unsigned short foreign_handles:2;
  121. };
  122. /**
  123. * struct z3fold_pool - stores metadata for each z3fold pool
  124. * @name: pool name
  125. * @lock: protects pool unbuddied/lru lists
  126. * @stale_lock: protects pool stale page list
  127. * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
  128. * buddies; the list each z3fold page is added to depends on
  129. * the size of its free region.
  130. * @lru: list tracking the z3fold pages in LRU order by most recently
  131. * added buddy.
  132. * @stale: list of pages marked for freeing
  133. * @pages_nr: number of z3fold pages in the pool.
  134. * @c_handle: cache for z3fold_buddy_slots allocation
  135. * @ops: pointer to a structure of user defined operations specified at
  136. * pool creation time.
  137. * @compact_wq: workqueue for page layout background optimization
  138. * @release_wq: workqueue for safe page release
  139. * @work: work_struct for safe page release
  140. * @inode: inode for z3fold pseudo filesystem
  141. *
  142. * This structure is allocated at pool creation time and maintains metadata
  143. * pertaining to a particular z3fold pool.
  144. */
  145. struct z3fold_pool {
  146. const char *name;
  147. spinlock_t lock;
  148. spinlock_t stale_lock;
  149. struct list_head *unbuddied;
  150. struct list_head lru;
  151. struct list_head stale;
  152. atomic64_t pages_nr;
  153. struct kmem_cache *c_handle;
  154. const struct z3fold_ops *ops;
  155. struct zpool *zpool;
  156. const struct zpool_ops *zpool_ops;
  157. struct workqueue_struct *compact_wq;
  158. struct workqueue_struct *release_wq;
  159. struct work_struct work;
  160. struct inode *inode;
  161. };
  162. /*
  163. * Internal z3fold page flags
  164. */
  165. enum z3fold_page_flags {
  166. PAGE_HEADLESS = 0,
  167. MIDDLE_CHUNK_MAPPED,
  168. NEEDS_COMPACTING,
  169. PAGE_STALE,
  170. PAGE_CLAIMED, /* by either reclaim or free */
  171. };
  172. /*
  173. * handle flags, go under HANDLE_FLAG_MASK
  174. */
  175. enum z3fold_handle_flags {
  176. HANDLES_NOFREE = 0,
  177. };
  178. /*
  179. * Forward declarations
  180. */
  181. static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool);
  182. static void compact_page_work(struct work_struct *w);
  183. /*****************
  184. * Helpers
  185. *****************/
  186. /* Converts an allocation size in bytes to size in z3fold chunks */
  187. static int size_to_chunks(size_t size)
  188. {
  189. return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
  190. }
  191. #define for_each_unbuddied_list(_iter, _begin) \
  192. for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
  193. static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
  194. gfp_t gfp)
  195. {
  196. struct z3fold_buddy_slots *slots;
  197. slots = kmem_cache_zalloc(pool->c_handle,
  198. (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
  199. if (slots) {
  200. /* It will be freed separately in free_handle(). */
  201. kmemleak_not_leak(slots);
  202. slots->pool = (unsigned long)pool;
  203. rwlock_init(&slots->lock);
  204. }
  205. return slots;
  206. }
  207. static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
  208. {
  209. return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
  210. }
  211. static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
  212. {
  213. return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
  214. }
  215. /* Lock a z3fold page */
  216. static inline void z3fold_page_lock(struct z3fold_header *zhdr)
  217. {
  218. spin_lock(&zhdr->page_lock);
  219. }
  220. /* Try to lock a z3fold page */
  221. static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
  222. {
  223. return spin_trylock(&zhdr->page_lock);
  224. }
  225. /* Unlock a z3fold page */
  226. static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
  227. {
  228. spin_unlock(&zhdr->page_lock);
  229. }
  230. static inline struct z3fold_header *__get_z3fold_header(unsigned long handle,
  231. bool lock)
  232. {
  233. struct z3fold_buddy_slots *slots;
  234. struct z3fold_header *zhdr;
  235. int locked = 0;
  236. if (!(handle & (1 << PAGE_HEADLESS))) {
  237. slots = handle_to_slots(handle);
  238. do {
  239. unsigned long addr;
  240. read_lock(&slots->lock);
  241. addr = *(unsigned long *)handle;
  242. zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
  243. if (lock)
  244. locked = z3fold_page_trylock(zhdr);
  245. read_unlock(&slots->lock);
  246. if (locked)
  247. break;
  248. cpu_relax();
  249. } while (lock);
  250. } else {
  251. zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
  252. }
  253. return zhdr;
  254. }
  255. /* Returns the z3fold page where a given handle is stored */
  256. static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
  257. {
  258. return __get_z3fold_header(h, false);
  259. }
  260. /* return locked z3fold page if it's not headless */
  261. static inline struct z3fold_header *get_z3fold_header(unsigned long h)
  262. {
  263. return __get_z3fold_header(h, true);
  264. }
  265. static inline void put_z3fold_header(struct z3fold_header *zhdr)
  266. {
  267. struct page *page = virt_to_page(zhdr);
  268. if (!test_bit(PAGE_HEADLESS, &page->private))
  269. z3fold_page_unlock(zhdr);
  270. }
  271. static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
  272. {
  273. struct z3fold_buddy_slots *slots;
  274. int i;
  275. bool is_free;
  276. if (handle & (1 << PAGE_HEADLESS))
  277. return;
  278. if (WARN_ON(*(unsigned long *)handle == 0))
  279. return;
  280. slots = handle_to_slots(handle);
  281. write_lock(&slots->lock);
  282. *(unsigned long *)handle = 0;
  283. if (test_bit(HANDLES_NOFREE, &slots->pool)) {
  284. write_unlock(&slots->lock);
  285. return; /* simple case, nothing else to do */
  286. }
  287. if (zhdr->slots != slots)
  288. zhdr->foreign_handles--;
  289. is_free = true;
  290. for (i = 0; i <= BUDDY_MASK; i++) {
  291. if (slots->slot[i]) {
  292. is_free = false;
  293. break;
  294. }
  295. }
  296. write_unlock(&slots->lock);
  297. if (is_free) {
  298. struct z3fold_pool *pool = slots_to_pool(slots);
  299. if (zhdr->slots == slots)
  300. zhdr->slots = NULL;
  301. kmem_cache_free(pool->c_handle, slots);
  302. }
  303. }
  304. static int z3fold_init_fs_context(struct fs_context *fc)
  305. {
  306. return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
  307. }
  308. static struct file_system_type z3fold_fs = {
  309. .name = "z3fold",
  310. .init_fs_context = z3fold_init_fs_context,
  311. .kill_sb = kill_anon_super,
  312. };
  313. static struct vfsmount *z3fold_mnt;
  314. static int z3fold_mount(void)
  315. {
  316. int ret = 0;
  317. z3fold_mnt = kern_mount(&z3fold_fs);
  318. if (IS_ERR(z3fold_mnt))
  319. ret = PTR_ERR(z3fold_mnt);
  320. return ret;
  321. }
  322. static void z3fold_unmount(void)
  323. {
  324. kern_unmount(z3fold_mnt);
  325. }
  326. static const struct address_space_operations z3fold_aops;
  327. static int z3fold_register_migration(struct z3fold_pool *pool)
  328. {
  329. pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
  330. if (IS_ERR(pool->inode)) {
  331. pool->inode = NULL;
  332. return 1;
  333. }
  334. pool->inode->i_mapping->private_data = pool;
  335. pool->inode->i_mapping->a_ops = &z3fold_aops;
  336. return 0;
  337. }
  338. static void z3fold_unregister_migration(struct z3fold_pool *pool)
  339. {
  340. if (pool->inode)
  341. iput(pool->inode);
  342. }
  343. /* Initializes the z3fold header of a newly allocated z3fold page */
  344. static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
  345. struct z3fold_pool *pool, gfp_t gfp)
  346. {
  347. struct z3fold_header *zhdr = page_address(page);
  348. struct z3fold_buddy_slots *slots;
  349. INIT_LIST_HEAD(&page->lru);
  350. clear_bit(PAGE_HEADLESS, &page->private);
  351. clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  352. clear_bit(NEEDS_COMPACTING, &page->private);
  353. clear_bit(PAGE_STALE, &page->private);
  354. clear_bit(PAGE_CLAIMED, &page->private);
  355. if (headless)
  356. return zhdr;
  357. slots = alloc_slots(pool, gfp);
  358. if (!slots)
  359. return NULL;
  360. spin_lock_init(&zhdr->page_lock);
  361. kref_init(&zhdr->refcount);
  362. zhdr->first_chunks = 0;
  363. zhdr->middle_chunks = 0;
  364. zhdr->last_chunks = 0;
  365. zhdr->first_num = 0;
  366. zhdr->start_middle = 0;
  367. zhdr->cpu = -1;
  368. zhdr->foreign_handles = 0;
  369. zhdr->mapped_count = 0;
  370. zhdr->slots = slots;
  371. zhdr->pool = pool;
  372. INIT_LIST_HEAD(&zhdr->buddy);
  373. INIT_WORK(&zhdr->work, compact_page_work);
  374. return zhdr;
  375. }
  376. /* Resets the struct page fields and frees the page */
  377. static void free_z3fold_page(struct page *page, bool headless)
  378. {
  379. if (!headless) {
  380. lock_page(page);
  381. __ClearPageMovable(page);
  382. unlock_page(page);
  383. }
  384. ClearPagePrivate(page);
  385. __free_page(page);
  386. }
  387. /* Helper function to build the index */
  388. static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
  389. {
  390. return (bud + zhdr->first_num) & BUDDY_MASK;
  391. }
  392. /*
  393. * Encodes the handle of a particular buddy within a z3fold page
  394. * Pool lock should be held as this function accesses first_num
  395. */
  396. static unsigned long __encode_handle(struct z3fold_header *zhdr,
  397. struct z3fold_buddy_slots *slots,
  398. enum buddy bud)
  399. {
  400. unsigned long h = (unsigned long)zhdr;
  401. int idx = 0;
  402. /*
  403. * For a headless page, its handle is its pointer with the extra
  404. * PAGE_HEADLESS bit set
  405. */
  406. if (bud == HEADLESS)
  407. return h | (1 << PAGE_HEADLESS);
  408. /* otherwise, return pointer to encoded handle */
  409. idx = __idx(zhdr, bud);
  410. h += idx;
  411. if (bud == LAST)
  412. h |= (zhdr->last_chunks << BUDDY_SHIFT);
  413. write_lock(&slots->lock);
  414. slots->slot[idx] = h;
  415. write_unlock(&slots->lock);
  416. return (unsigned long)&slots->slot[idx];
  417. }
  418. static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
  419. {
  420. return __encode_handle(zhdr, zhdr->slots, bud);
  421. }
  422. /* only for LAST bud, returns zero otherwise */
  423. static unsigned short handle_to_chunks(unsigned long handle)
  424. {
  425. struct z3fold_buddy_slots *slots = handle_to_slots(handle);
  426. unsigned long addr;
  427. read_lock(&slots->lock);
  428. addr = *(unsigned long *)handle;
  429. read_unlock(&slots->lock);
  430. return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
  431. }
  432. /*
  433. * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
  434. * but that doesn't matter. because the masking will result in the
  435. * correct buddy number.
  436. */
  437. static enum buddy handle_to_buddy(unsigned long handle)
  438. {
  439. struct z3fold_header *zhdr;
  440. struct z3fold_buddy_slots *slots = handle_to_slots(handle);
  441. unsigned long addr;
  442. read_lock(&slots->lock);
  443. WARN_ON(handle & (1 << PAGE_HEADLESS));
  444. addr = *(unsigned long *)handle;
  445. read_unlock(&slots->lock);
  446. zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
  447. return (addr - zhdr->first_num) & BUDDY_MASK;
  448. }
  449. static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
  450. {
  451. return zhdr->pool;
  452. }
  453. static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
  454. {
  455. struct page *page = virt_to_page(zhdr);
  456. struct z3fold_pool *pool = zhdr_to_pool(zhdr);
  457. WARN_ON(!list_empty(&zhdr->buddy));
  458. set_bit(PAGE_STALE, &page->private);
  459. clear_bit(NEEDS_COMPACTING, &page->private);
  460. spin_lock(&pool->lock);
  461. if (!list_empty(&page->lru))
  462. list_del_init(&page->lru);
  463. spin_unlock(&pool->lock);
  464. if (locked)
  465. z3fold_page_unlock(zhdr);
  466. spin_lock(&pool->stale_lock);
  467. list_add(&zhdr->buddy, &pool->stale);
  468. queue_work(pool->release_wq, &pool->work);
  469. spin_unlock(&pool->stale_lock);
  470. }
  471. static void __attribute__((__unused__))
  472. release_z3fold_page(struct kref *ref)
  473. {
  474. struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
  475. refcount);
  476. __release_z3fold_page(zhdr, false);
  477. }
  478. static void release_z3fold_page_locked(struct kref *ref)
  479. {
  480. struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
  481. refcount);
  482. WARN_ON(z3fold_page_trylock(zhdr));
  483. __release_z3fold_page(zhdr, true);
  484. }
  485. static void release_z3fold_page_locked_list(struct kref *ref)
  486. {
  487. struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
  488. refcount);
  489. struct z3fold_pool *pool = zhdr_to_pool(zhdr);
  490. spin_lock(&pool->lock);
  491. list_del_init(&zhdr->buddy);
  492. spin_unlock(&pool->lock);
  493. WARN_ON(z3fold_page_trylock(zhdr));
  494. __release_z3fold_page(zhdr, true);
  495. }
  496. static void free_pages_work(struct work_struct *w)
  497. {
  498. struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
  499. spin_lock(&pool->stale_lock);
  500. while (!list_empty(&pool->stale)) {
  501. struct z3fold_header *zhdr = list_first_entry(&pool->stale,
  502. struct z3fold_header, buddy);
  503. struct page *page = virt_to_page(zhdr);
  504. list_del(&zhdr->buddy);
  505. if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
  506. continue;
  507. spin_unlock(&pool->stale_lock);
  508. cancel_work_sync(&zhdr->work);
  509. free_z3fold_page(page, false);
  510. cond_resched();
  511. spin_lock(&pool->stale_lock);
  512. }
  513. spin_unlock(&pool->stale_lock);
  514. }
  515. /*
  516. * Returns the number of free chunks in a z3fold page.
  517. * NB: can't be used with HEADLESS pages.
  518. */
  519. static int num_free_chunks(struct z3fold_header *zhdr)
  520. {
  521. int nfree;
  522. /*
  523. * If there is a middle object, pick up the bigger free space
  524. * either before or after it. Otherwise just subtract the number
  525. * of chunks occupied by the first and the last objects.
  526. */
  527. if (zhdr->middle_chunks != 0) {
  528. int nfree_before = zhdr->first_chunks ?
  529. 0 : zhdr->start_middle - ZHDR_CHUNKS;
  530. int nfree_after = zhdr->last_chunks ?
  531. 0 : TOTAL_CHUNKS -
  532. (zhdr->start_middle + zhdr->middle_chunks);
  533. nfree = max(nfree_before, nfree_after);
  534. } else
  535. nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
  536. return nfree;
  537. }
  538. /* Add to the appropriate unbuddied list */
  539. static inline void add_to_unbuddied(struct z3fold_pool *pool,
  540. struct z3fold_header *zhdr)
  541. {
  542. if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
  543. zhdr->middle_chunks == 0) {
  544. struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
  545. int freechunks = num_free_chunks(zhdr);
  546. spin_lock(&pool->lock);
  547. list_add(&zhdr->buddy, &unbuddied[freechunks]);
  548. spin_unlock(&pool->lock);
  549. zhdr->cpu = smp_processor_id();
  550. put_cpu_ptr(pool->unbuddied);
  551. }
  552. }
  553. static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
  554. {
  555. enum buddy bud = HEADLESS;
  556. if (zhdr->middle_chunks) {
  557. if (!zhdr->first_chunks &&
  558. chunks <= zhdr->start_middle - ZHDR_CHUNKS)
  559. bud = FIRST;
  560. else if (!zhdr->last_chunks)
  561. bud = LAST;
  562. } else {
  563. if (!zhdr->first_chunks)
  564. bud = FIRST;
  565. else if (!zhdr->last_chunks)
  566. bud = LAST;
  567. else
  568. bud = MIDDLE;
  569. }
  570. return bud;
  571. }
  572. static inline void *mchunk_memmove(struct z3fold_header *zhdr,
  573. unsigned short dst_chunk)
  574. {
  575. void *beg = zhdr;
  576. return memmove(beg + (dst_chunk << CHUNK_SHIFT),
  577. beg + (zhdr->start_middle << CHUNK_SHIFT),
  578. zhdr->middle_chunks << CHUNK_SHIFT);
  579. }
  580. static inline bool buddy_single(struct z3fold_header *zhdr)
  581. {
  582. return !((zhdr->first_chunks && zhdr->middle_chunks) ||
  583. (zhdr->first_chunks && zhdr->last_chunks) ||
  584. (zhdr->middle_chunks && zhdr->last_chunks));
  585. }
  586. static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
  587. {
  588. struct z3fold_pool *pool = zhdr_to_pool(zhdr);
  589. void *p = zhdr;
  590. unsigned long old_handle = 0;
  591. size_t sz = 0;
  592. struct z3fold_header *new_zhdr = NULL;
  593. int first_idx = __idx(zhdr, FIRST);
  594. int middle_idx = __idx(zhdr, MIDDLE);
  595. int last_idx = __idx(zhdr, LAST);
  596. unsigned short *moved_chunks = NULL;
  597. /*
  598. * No need to protect slots here -- all the slots are "local" and
  599. * the page lock is already taken
  600. */
  601. if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
  602. p += ZHDR_SIZE_ALIGNED;
  603. sz = zhdr->first_chunks << CHUNK_SHIFT;
  604. old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
  605. moved_chunks = &zhdr->first_chunks;
  606. } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
  607. p += zhdr->start_middle << CHUNK_SHIFT;
  608. sz = zhdr->middle_chunks << CHUNK_SHIFT;
  609. old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
  610. moved_chunks = &zhdr->middle_chunks;
  611. } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
  612. p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
  613. sz = zhdr->last_chunks << CHUNK_SHIFT;
  614. old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
  615. moved_chunks = &zhdr->last_chunks;
  616. }
  617. if (sz > 0) {
  618. enum buddy new_bud = HEADLESS;
  619. short chunks = size_to_chunks(sz);
  620. void *q;
  621. new_zhdr = __z3fold_alloc(pool, sz, false);
  622. if (!new_zhdr)
  623. return NULL;
  624. if (WARN_ON(new_zhdr == zhdr))
  625. goto out_fail;
  626. new_bud = get_free_buddy(new_zhdr, chunks);
  627. q = new_zhdr;
  628. switch (new_bud) {
  629. case FIRST:
  630. new_zhdr->first_chunks = chunks;
  631. q += ZHDR_SIZE_ALIGNED;
  632. break;
  633. case MIDDLE:
  634. new_zhdr->middle_chunks = chunks;
  635. new_zhdr->start_middle =
  636. new_zhdr->first_chunks + ZHDR_CHUNKS;
  637. q += new_zhdr->start_middle << CHUNK_SHIFT;
  638. break;
  639. case LAST:
  640. new_zhdr->last_chunks = chunks;
  641. q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
  642. break;
  643. default:
  644. goto out_fail;
  645. }
  646. new_zhdr->foreign_handles++;
  647. memcpy(q, p, sz);
  648. write_lock(&zhdr->slots->lock);
  649. *(unsigned long *)old_handle = (unsigned long)new_zhdr +
  650. __idx(new_zhdr, new_bud);
  651. if (new_bud == LAST)
  652. *(unsigned long *)old_handle |=
  653. (new_zhdr->last_chunks << BUDDY_SHIFT);
  654. write_unlock(&zhdr->slots->lock);
  655. add_to_unbuddied(pool, new_zhdr);
  656. z3fold_page_unlock(new_zhdr);
  657. *moved_chunks = 0;
  658. }
  659. return new_zhdr;
  660. out_fail:
  661. if (new_zhdr) {
  662. if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
  663. atomic64_dec(&pool->pages_nr);
  664. else {
  665. add_to_unbuddied(pool, new_zhdr);
  666. z3fold_page_unlock(new_zhdr);
  667. }
  668. }
  669. return NULL;
  670. }
  671. #define BIG_CHUNK_GAP 3
  672. /* Has to be called with lock held */
  673. static int z3fold_compact_page(struct z3fold_header *zhdr)
  674. {
  675. struct page *page = virt_to_page(zhdr);
  676. if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
  677. return 0; /* can't move middle chunk, it's used */
  678. if (unlikely(PageIsolated(page)))
  679. return 0;
  680. if (zhdr->middle_chunks == 0)
  681. return 0; /* nothing to compact */
  682. if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
  683. /* move to the beginning */
  684. mchunk_memmove(zhdr, ZHDR_CHUNKS);
  685. zhdr->first_chunks = zhdr->middle_chunks;
  686. zhdr->middle_chunks = 0;
  687. zhdr->start_middle = 0;
  688. zhdr->first_num++;
  689. return 1;
  690. }
  691. /*
  692. * moving data is expensive, so let's only do that if
  693. * there's substantial gain (at least BIG_CHUNK_GAP chunks)
  694. */
  695. if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
  696. zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
  697. BIG_CHUNK_GAP) {
  698. mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
  699. zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
  700. return 1;
  701. } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
  702. TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
  703. + zhdr->middle_chunks) >=
  704. BIG_CHUNK_GAP) {
  705. unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
  706. zhdr->middle_chunks;
  707. mchunk_memmove(zhdr, new_start);
  708. zhdr->start_middle = new_start;
  709. return 1;
  710. }
  711. return 0;
  712. }
  713. static void do_compact_page(struct z3fold_header *zhdr, bool locked)
  714. {
  715. struct z3fold_pool *pool = zhdr_to_pool(zhdr);
  716. struct page *page;
  717. page = virt_to_page(zhdr);
  718. if (locked)
  719. WARN_ON(z3fold_page_trylock(zhdr));
  720. else
  721. z3fold_page_lock(zhdr);
  722. if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
  723. z3fold_page_unlock(zhdr);
  724. return;
  725. }
  726. spin_lock(&pool->lock);
  727. list_del_init(&zhdr->buddy);
  728. spin_unlock(&pool->lock);
  729. if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
  730. atomic64_dec(&pool->pages_nr);
  731. return;
  732. }
  733. if (test_bit(PAGE_STALE, &page->private) ||
  734. test_and_set_bit(PAGE_CLAIMED, &page->private)) {
  735. z3fold_page_unlock(zhdr);
  736. return;
  737. }
  738. if (!zhdr->foreign_handles && buddy_single(zhdr) &&
  739. zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
  740. if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
  741. atomic64_dec(&pool->pages_nr);
  742. else {
  743. clear_bit(PAGE_CLAIMED, &page->private);
  744. z3fold_page_unlock(zhdr);
  745. }
  746. return;
  747. }
  748. z3fold_compact_page(zhdr);
  749. add_to_unbuddied(pool, zhdr);
  750. clear_bit(PAGE_CLAIMED, &page->private);
  751. z3fold_page_unlock(zhdr);
  752. }
  753. static void compact_page_work(struct work_struct *w)
  754. {
  755. struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
  756. work);
  757. do_compact_page(zhdr, false);
  758. }
  759. /* returns _locked_ z3fold page header or NULL */
  760. static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
  761. size_t size, bool can_sleep)
  762. {
  763. struct z3fold_header *zhdr = NULL;
  764. struct page *page;
  765. struct list_head *unbuddied;
  766. int chunks = size_to_chunks(size), i;
  767. lookup:
  768. /* First, try to find an unbuddied z3fold page. */
  769. unbuddied = get_cpu_ptr(pool->unbuddied);
  770. for_each_unbuddied_list(i, chunks) {
  771. struct list_head *l = &unbuddied[i];
  772. zhdr = list_first_entry_or_null(READ_ONCE(l),
  773. struct z3fold_header, buddy);
  774. if (!zhdr)
  775. continue;
  776. /* Re-check under lock. */
  777. spin_lock(&pool->lock);
  778. l = &unbuddied[i];
  779. if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
  780. struct z3fold_header, buddy)) ||
  781. !z3fold_page_trylock(zhdr)) {
  782. spin_unlock(&pool->lock);
  783. zhdr = NULL;
  784. put_cpu_ptr(pool->unbuddied);
  785. if (can_sleep)
  786. cond_resched();
  787. goto lookup;
  788. }
  789. list_del_init(&zhdr->buddy);
  790. zhdr->cpu = -1;
  791. spin_unlock(&pool->lock);
  792. page = virt_to_page(zhdr);
  793. if (test_bit(NEEDS_COMPACTING, &page->private) ||
  794. test_bit(PAGE_CLAIMED, &page->private)) {
  795. z3fold_page_unlock(zhdr);
  796. zhdr = NULL;
  797. put_cpu_ptr(pool->unbuddied);
  798. if (can_sleep)
  799. cond_resched();
  800. goto lookup;
  801. }
  802. /*
  803. * this page could not be removed from its unbuddied
  804. * list while pool lock was held, and then we've taken
  805. * page lock so kref_put could not be called before
  806. * we got here, so it's safe to just call kref_get()
  807. */
  808. kref_get(&zhdr->refcount);
  809. break;
  810. }
  811. put_cpu_ptr(pool->unbuddied);
  812. if (!zhdr) {
  813. int cpu;
  814. /* look for _exact_ match on other cpus' lists */
  815. for_each_online_cpu(cpu) {
  816. struct list_head *l;
  817. unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
  818. spin_lock(&pool->lock);
  819. l = &unbuddied[chunks];
  820. zhdr = list_first_entry_or_null(READ_ONCE(l),
  821. struct z3fold_header, buddy);
  822. if (!zhdr || !z3fold_page_trylock(zhdr)) {
  823. spin_unlock(&pool->lock);
  824. zhdr = NULL;
  825. continue;
  826. }
  827. list_del_init(&zhdr->buddy);
  828. zhdr->cpu = -1;
  829. spin_unlock(&pool->lock);
  830. page = virt_to_page(zhdr);
  831. if (test_bit(NEEDS_COMPACTING, &page->private) ||
  832. test_bit(PAGE_CLAIMED, &page->private)) {
  833. z3fold_page_unlock(zhdr);
  834. zhdr = NULL;
  835. if (can_sleep)
  836. cond_resched();
  837. continue;
  838. }
  839. kref_get(&zhdr->refcount);
  840. break;
  841. }
  842. }
  843. if (zhdr && !zhdr->slots)
  844. zhdr->slots = alloc_slots(pool,
  845. can_sleep ? GFP_NOIO : GFP_ATOMIC);
  846. return zhdr;
  847. }
  848. /*
  849. * API Functions
  850. */
  851. /**
  852. * z3fold_create_pool() - create a new z3fold pool
  853. * @name: pool name
  854. * @gfp: gfp flags when allocating the z3fold pool structure
  855. * @ops: user-defined operations for the z3fold pool
  856. *
  857. * Return: pointer to the new z3fold pool or NULL if the metadata allocation
  858. * failed.
  859. */
  860. static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
  861. const struct z3fold_ops *ops)
  862. {
  863. struct z3fold_pool *pool = NULL;
  864. int i, cpu;
  865. pool = kzalloc(sizeof(struct z3fold_pool), gfp);
  866. if (!pool)
  867. goto out;
  868. pool->c_handle = kmem_cache_create("z3fold_handle",
  869. sizeof(struct z3fold_buddy_slots),
  870. SLOTS_ALIGN, 0, NULL);
  871. if (!pool->c_handle)
  872. goto out_c;
  873. spin_lock_init(&pool->lock);
  874. spin_lock_init(&pool->stale_lock);
  875. pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
  876. if (!pool->unbuddied)
  877. goto out_pool;
  878. for_each_possible_cpu(cpu) {
  879. struct list_head *unbuddied =
  880. per_cpu_ptr(pool->unbuddied, cpu);
  881. for_each_unbuddied_list(i, 0)
  882. INIT_LIST_HEAD(&unbuddied[i]);
  883. }
  884. INIT_LIST_HEAD(&pool->lru);
  885. INIT_LIST_HEAD(&pool->stale);
  886. atomic64_set(&pool->pages_nr, 0);
  887. pool->name = name;
  888. pool->compact_wq = create_singlethread_workqueue(pool->name);
  889. if (!pool->compact_wq)
  890. goto out_unbuddied;
  891. pool->release_wq = create_singlethread_workqueue(pool->name);
  892. if (!pool->release_wq)
  893. goto out_wq;
  894. if (z3fold_register_migration(pool))
  895. goto out_rwq;
  896. INIT_WORK(&pool->work, free_pages_work);
  897. pool->ops = ops;
  898. return pool;
  899. out_rwq:
  900. destroy_workqueue(pool->release_wq);
  901. out_wq:
  902. destroy_workqueue(pool->compact_wq);
  903. out_unbuddied:
  904. free_percpu(pool->unbuddied);
  905. out_pool:
  906. kmem_cache_destroy(pool->c_handle);
  907. out_c:
  908. kfree(pool);
  909. out:
  910. return NULL;
  911. }
  912. /**
  913. * z3fold_destroy_pool() - destroys an existing z3fold pool
  914. * @pool: the z3fold pool to be destroyed
  915. *
  916. * The pool should be emptied before this function is called.
  917. */
  918. static void z3fold_destroy_pool(struct z3fold_pool *pool)
  919. {
  920. kmem_cache_destroy(pool->c_handle);
  921. /*
  922. * We need to destroy pool->compact_wq before pool->release_wq,
  923. * as any pending work on pool->compact_wq will call
  924. * queue_work(pool->release_wq, &pool->work).
  925. *
  926. * There are still outstanding pages until both workqueues are drained,
  927. * so we cannot unregister migration until then.
  928. */
  929. destroy_workqueue(pool->compact_wq);
  930. destroy_workqueue(pool->release_wq);
  931. z3fold_unregister_migration(pool);
  932. free_percpu(pool->unbuddied);
  933. kfree(pool);
  934. }
  935. /**
  936. * z3fold_alloc() - allocates a region of a given size
  937. * @pool: z3fold pool from which to allocate
  938. * @size: size in bytes of the desired allocation
  939. * @gfp: gfp flags used if the pool needs to grow
  940. * @handle: handle of the new allocation
  941. *
  942. * This function will attempt to find a free region in the pool large enough to
  943. * satisfy the allocation request. A search of the unbuddied lists is
  944. * performed first. If no suitable free region is found, then a new page is
  945. * allocated and added to the pool to satisfy the request.
  946. *
  947. * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
  948. * as z3fold pool pages.
  949. *
  950. * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
  951. * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
  952. * a new page.
  953. */
  954. static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
  955. unsigned long *handle)
  956. {
  957. int chunks = size_to_chunks(size);
  958. struct z3fold_header *zhdr = NULL;
  959. struct page *page = NULL;
  960. enum buddy bud;
  961. bool can_sleep = gfpflags_allow_blocking(gfp);
  962. if (!size)
  963. return -EINVAL;
  964. if (size > PAGE_SIZE)
  965. return -ENOSPC;
  966. if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
  967. bud = HEADLESS;
  968. else {
  969. retry:
  970. zhdr = __z3fold_alloc(pool, size, can_sleep);
  971. if (zhdr) {
  972. bud = get_free_buddy(zhdr, chunks);
  973. if (bud == HEADLESS) {
  974. if (kref_put(&zhdr->refcount,
  975. release_z3fold_page_locked))
  976. atomic64_dec(&pool->pages_nr);
  977. else
  978. z3fold_page_unlock(zhdr);
  979. pr_err("No free chunks in unbuddied\n");
  980. WARN_ON(1);
  981. goto retry;
  982. }
  983. page = virt_to_page(zhdr);
  984. goto found;
  985. }
  986. bud = FIRST;
  987. }
  988. page = NULL;
  989. if (can_sleep) {
  990. spin_lock(&pool->stale_lock);
  991. zhdr = list_first_entry_or_null(&pool->stale,
  992. struct z3fold_header, buddy);
  993. /*
  994. * Before allocating a page, let's see if we can take one from
  995. * the stale pages list. cancel_work_sync() can sleep so we
  996. * limit this case to the contexts where we can sleep
  997. */
  998. if (zhdr) {
  999. list_del(&zhdr->buddy);
  1000. spin_unlock(&pool->stale_lock);
  1001. cancel_work_sync(&zhdr->work);
  1002. page = virt_to_page(zhdr);
  1003. } else {
  1004. spin_unlock(&pool->stale_lock);
  1005. }
  1006. }
  1007. if (!page)
  1008. page = alloc_page(gfp);
  1009. if (!page)
  1010. return -ENOMEM;
  1011. zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
  1012. if (!zhdr) {
  1013. __free_page(page);
  1014. return -ENOMEM;
  1015. }
  1016. atomic64_inc(&pool->pages_nr);
  1017. if (bud == HEADLESS) {
  1018. set_bit(PAGE_HEADLESS, &page->private);
  1019. goto headless;
  1020. }
  1021. if (can_sleep) {
  1022. lock_page(page);
  1023. __SetPageMovable(page, pool->inode->i_mapping);
  1024. unlock_page(page);
  1025. } else {
  1026. if (trylock_page(page)) {
  1027. __SetPageMovable(page, pool->inode->i_mapping);
  1028. unlock_page(page);
  1029. }
  1030. }
  1031. z3fold_page_lock(zhdr);
  1032. found:
  1033. if (bud == FIRST)
  1034. zhdr->first_chunks = chunks;
  1035. else if (bud == LAST)
  1036. zhdr->last_chunks = chunks;
  1037. else {
  1038. zhdr->middle_chunks = chunks;
  1039. zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
  1040. }
  1041. add_to_unbuddied(pool, zhdr);
  1042. headless:
  1043. spin_lock(&pool->lock);
  1044. /* Add/move z3fold page to beginning of LRU */
  1045. if (!list_empty(&page->lru))
  1046. list_del(&page->lru);
  1047. list_add(&page->lru, &pool->lru);
  1048. *handle = encode_handle(zhdr, bud);
  1049. spin_unlock(&pool->lock);
  1050. if (bud != HEADLESS)
  1051. z3fold_page_unlock(zhdr);
  1052. return 0;
  1053. }
  1054. /**
  1055. * z3fold_free() - frees the allocation associated with the given handle
  1056. * @pool: pool in which the allocation resided
  1057. * @handle: handle associated with the allocation returned by z3fold_alloc()
  1058. *
  1059. * In the case that the z3fold page in which the allocation resides is under
  1060. * reclaim, as indicated by the PG_reclaim flag being set, this function
  1061. * only sets the first|last_chunks to 0. The page is actually freed
  1062. * once both buddies are evicted (see z3fold_reclaim_page() below).
  1063. */
  1064. static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
  1065. {
  1066. struct z3fold_header *zhdr;
  1067. struct page *page;
  1068. enum buddy bud;
  1069. bool page_claimed;
  1070. zhdr = get_z3fold_header(handle);
  1071. page = virt_to_page(zhdr);
  1072. page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
  1073. if (test_bit(PAGE_HEADLESS, &page->private)) {
  1074. /* if a headless page is under reclaim, just leave.
  1075. * NB: we use test_and_set_bit for a reason: if the bit
  1076. * has not been set before, we release this page
  1077. * immediately so we don't care about its value any more.
  1078. */
  1079. if (!page_claimed) {
  1080. spin_lock(&pool->lock);
  1081. list_del(&page->lru);
  1082. spin_unlock(&pool->lock);
  1083. put_z3fold_header(zhdr);
  1084. free_z3fold_page(page, true);
  1085. atomic64_dec(&pool->pages_nr);
  1086. }
  1087. return;
  1088. }
  1089. /* Non-headless case */
  1090. bud = handle_to_buddy(handle);
  1091. switch (bud) {
  1092. case FIRST:
  1093. zhdr->first_chunks = 0;
  1094. break;
  1095. case MIDDLE:
  1096. zhdr->middle_chunks = 0;
  1097. break;
  1098. case LAST:
  1099. zhdr->last_chunks = 0;
  1100. break;
  1101. default:
  1102. pr_err("%s: unknown bud %d\n", __func__, bud);
  1103. WARN_ON(1);
  1104. put_z3fold_header(zhdr);
  1105. return;
  1106. }
  1107. if (!page_claimed)
  1108. free_handle(handle, zhdr);
  1109. if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
  1110. atomic64_dec(&pool->pages_nr);
  1111. return;
  1112. }
  1113. if (page_claimed) {
  1114. /* the page has not been claimed by us */
  1115. z3fold_page_unlock(zhdr);
  1116. return;
  1117. }
  1118. if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
  1119. put_z3fold_header(zhdr);
  1120. clear_bit(PAGE_CLAIMED, &page->private);
  1121. return;
  1122. }
  1123. if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
  1124. spin_lock(&pool->lock);
  1125. list_del_init(&zhdr->buddy);
  1126. spin_unlock(&pool->lock);
  1127. zhdr->cpu = -1;
  1128. kref_get(&zhdr->refcount);
  1129. clear_bit(PAGE_CLAIMED, &page->private);
  1130. do_compact_page(zhdr, true);
  1131. return;
  1132. }
  1133. kref_get(&zhdr->refcount);
  1134. clear_bit(PAGE_CLAIMED, &page->private);
  1135. queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
  1136. put_z3fold_header(zhdr);
  1137. }
  1138. /**
  1139. * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
  1140. * @pool: pool from which a page will attempt to be evicted
  1141. * @retries: number of pages on the LRU list for which eviction will
  1142. * be attempted before failing
  1143. *
  1144. * z3fold reclaim is different from normal system reclaim in that it is done
  1145. * from the bottom, up. This is because only the bottom layer, z3fold, has
  1146. * information on how the allocations are organized within each z3fold page.
  1147. * This has the potential to create interesting locking situations between
  1148. * z3fold and the user, however.
  1149. *
  1150. * To avoid these, this is how z3fold_reclaim_page() should be called:
  1151. *
  1152. * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
  1153. * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
  1154. * call the user-defined eviction handler with the pool and handle as
  1155. * arguments.
  1156. *
  1157. * If the handle can not be evicted, the eviction handler should return
  1158. * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
  1159. * appropriate list and try the next z3fold page on the LRU up to
  1160. * a user defined number of retries.
  1161. *
  1162. * If the handle is successfully evicted, the eviction handler should
  1163. * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
  1164. * contains logic to delay freeing the page if the page is under reclaim,
  1165. * as indicated by the setting of the PG_reclaim flag on the underlying page.
  1166. *
  1167. * If all buddies in the z3fold page are successfully evicted, then the
  1168. * z3fold page can be freed.
  1169. *
  1170. * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
  1171. * no pages to evict or an eviction handler is not registered, -EAGAIN if
  1172. * the retry limit was hit.
  1173. */
  1174. static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
  1175. {
  1176. int i, ret = -1;
  1177. struct z3fold_header *zhdr = NULL;
  1178. struct page *page = NULL;
  1179. struct list_head *pos;
  1180. unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
  1181. struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN)));
  1182. rwlock_init(&slots.lock);
  1183. slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE);
  1184. spin_lock(&pool->lock);
  1185. if (!pool->ops || !pool->ops->evict || retries == 0) {
  1186. spin_unlock(&pool->lock);
  1187. return -EINVAL;
  1188. }
  1189. for (i = 0; i < retries; i++) {
  1190. if (list_empty(&pool->lru)) {
  1191. spin_unlock(&pool->lock);
  1192. return -EINVAL;
  1193. }
  1194. list_for_each_prev(pos, &pool->lru) {
  1195. page = list_entry(pos, struct page, lru);
  1196. zhdr = page_address(page);
  1197. if (test_bit(PAGE_HEADLESS, &page->private)) {
  1198. /*
  1199. * For non-headless pages, we wait to do this
  1200. * until we have the page lock to avoid racing
  1201. * with __z3fold_alloc(). Headless pages don't
  1202. * have a lock (and __z3fold_alloc() will never
  1203. * see them), but we still need to test and set
  1204. * PAGE_CLAIMED to avoid racing with
  1205. * z3fold_free(), so just do it now before
  1206. * leaving the loop.
  1207. */
  1208. if (test_and_set_bit(PAGE_CLAIMED, &page->private))
  1209. continue;
  1210. break;
  1211. }
  1212. if (kref_get_unless_zero(&zhdr->refcount) == 0) {
  1213. zhdr = NULL;
  1214. break;
  1215. }
  1216. if (!z3fold_page_trylock(zhdr)) {
  1217. if (kref_put(&zhdr->refcount,
  1218. release_z3fold_page))
  1219. atomic64_dec(&pool->pages_nr);
  1220. zhdr = NULL;
  1221. continue; /* can't evict at this point */
  1222. }
  1223. /* test_and_set_bit is of course atomic, but we still
  1224. * need to do it under page lock, otherwise checking
  1225. * that bit in __z3fold_alloc wouldn't make sense
  1226. */
  1227. if (zhdr->foreign_handles ||
  1228. test_and_set_bit(PAGE_CLAIMED, &page->private)) {
  1229. if (kref_put(&zhdr->refcount,
  1230. release_z3fold_page_locked))
  1231. atomic64_dec(&pool->pages_nr);
  1232. else
  1233. z3fold_page_unlock(zhdr);
  1234. zhdr = NULL;
  1235. continue; /* can't evict such page */
  1236. }
  1237. list_del_init(&zhdr->buddy);
  1238. zhdr->cpu = -1;
  1239. break;
  1240. }
  1241. if (!zhdr)
  1242. break;
  1243. list_del_init(&page->lru);
  1244. spin_unlock(&pool->lock);
  1245. if (!test_bit(PAGE_HEADLESS, &page->private)) {
  1246. /*
  1247. * We need encode the handles before unlocking, and
  1248. * use our local slots structure because z3fold_free
  1249. * can zero out zhdr->slots and we can't do much
  1250. * about that
  1251. */
  1252. first_handle = 0;
  1253. last_handle = 0;
  1254. middle_handle = 0;
  1255. memset(slots.slot, 0, sizeof(slots.slot));
  1256. if (zhdr->first_chunks)
  1257. first_handle = __encode_handle(zhdr, &slots,
  1258. FIRST);
  1259. if (zhdr->middle_chunks)
  1260. middle_handle = __encode_handle(zhdr, &slots,
  1261. MIDDLE);
  1262. if (zhdr->last_chunks)
  1263. last_handle = __encode_handle(zhdr, &slots,
  1264. LAST);
  1265. /*
  1266. * it's safe to unlock here because we hold a
  1267. * reference to this page
  1268. */
  1269. z3fold_page_unlock(zhdr);
  1270. } else {
  1271. first_handle = encode_handle(zhdr, HEADLESS);
  1272. last_handle = middle_handle = 0;
  1273. }
  1274. /* Issue the eviction callback(s) */
  1275. if (middle_handle) {
  1276. ret = pool->ops->evict(pool, middle_handle);
  1277. if (ret)
  1278. goto next;
  1279. }
  1280. if (first_handle) {
  1281. ret = pool->ops->evict(pool, first_handle);
  1282. if (ret)
  1283. goto next;
  1284. }
  1285. if (last_handle) {
  1286. ret = pool->ops->evict(pool, last_handle);
  1287. if (ret)
  1288. goto next;
  1289. }
  1290. next:
  1291. if (test_bit(PAGE_HEADLESS, &page->private)) {
  1292. if (ret == 0) {
  1293. free_z3fold_page(page, true);
  1294. atomic64_dec(&pool->pages_nr);
  1295. return 0;
  1296. }
  1297. spin_lock(&pool->lock);
  1298. list_add(&page->lru, &pool->lru);
  1299. spin_unlock(&pool->lock);
  1300. clear_bit(PAGE_CLAIMED, &page->private);
  1301. } else {
  1302. struct z3fold_buddy_slots *slots = zhdr->slots;
  1303. z3fold_page_lock(zhdr);
  1304. if (kref_put(&zhdr->refcount,
  1305. release_z3fold_page_locked)) {
  1306. kmem_cache_free(pool->c_handle, slots);
  1307. atomic64_dec(&pool->pages_nr);
  1308. return 0;
  1309. }
  1310. /*
  1311. * if we are here, the page is still not completely
  1312. * free. Take the global pool lock then to be able
  1313. * to add it back to the lru list
  1314. */
  1315. spin_lock(&pool->lock);
  1316. list_add(&page->lru, &pool->lru);
  1317. spin_unlock(&pool->lock);
  1318. z3fold_page_unlock(zhdr);
  1319. clear_bit(PAGE_CLAIMED, &page->private);
  1320. }
  1321. /* We started off locked to we need to lock the pool back */
  1322. spin_lock(&pool->lock);
  1323. }
  1324. spin_unlock(&pool->lock);
  1325. return -EAGAIN;
  1326. }
  1327. /**
  1328. * z3fold_map() - maps the allocation associated with the given handle
  1329. * @pool: pool in which the allocation resides
  1330. * @handle: handle associated with the allocation to be mapped
  1331. *
  1332. * Extracts the buddy number from handle and constructs the pointer to the
  1333. * correct starting chunk within the page.
  1334. *
  1335. * Returns: a pointer to the mapped allocation
  1336. */
  1337. static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
  1338. {
  1339. struct z3fold_header *zhdr;
  1340. struct page *page;
  1341. void *addr;
  1342. enum buddy buddy;
  1343. zhdr = get_z3fold_header(handle);
  1344. addr = zhdr;
  1345. page = virt_to_page(zhdr);
  1346. if (test_bit(PAGE_HEADLESS, &page->private))
  1347. goto out;
  1348. buddy = handle_to_buddy(handle);
  1349. switch (buddy) {
  1350. case FIRST:
  1351. addr += ZHDR_SIZE_ALIGNED;
  1352. break;
  1353. case MIDDLE:
  1354. addr += zhdr->start_middle << CHUNK_SHIFT;
  1355. set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  1356. break;
  1357. case LAST:
  1358. addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
  1359. break;
  1360. default:
  1361. pr_err("unknown buddy id %d\n", buddy);
  1362. WARN_ON(1);
  1363. addr = NULL;
  1364. break;
  1365. }
  1366. if (addr)
  1367. zhdr->mapped_count++;
  1368. out:
  1369. put_z3fold_header(zhdr);
  1370. return addr;
  1371. }
  1372. /**
  1373. * z3fold_unmap() - unmaps the allocation associated with the given handle
  1374. * @pool: pool in which the allocation resides
  1375. * @handle: handle associated with the allocation to be unmapped
  1376. */
  1377. static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
  1378. {
  1379. struct z3fold_header *zhdr;
  1380. struct page *page;
  1381. enum buddy buddy;
  1382. zhdr = get_z3fold_header(handle);
  1383. page = virt_to_page(zhdr);
  1384. if (test_bit(PAGE_HEADLESS, &page->private))
  1385. return;
  1386. buddy = handle_to_buddy(handle);
  1387. if (buddy == MIDDLE)
  1388. clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
  1389. zhdr->mapped_count--;
  1390. put_z3fold_header(zhdr);
  1391. }
  1392. /**
  1393. * z3fold_get_pool_size() - gets the z3fold pool size in pages
  1394. * @pool: pool whose size is being queried
  1395. *
  1396. * Returns: size in pages of the given pool.
  1397. */
  1398. static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
  1399. {
  1400. return atomic64_read(&pool->pages_nr);
  1401. }
  1402. static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
  1403. {
  1404. struct z3fold_header *zhdr;
  1405. struct z3fold_pool *pool;
  1406. VM_BUG_ON_PAGE(!PageMovable(page), page);
  1407. VM_BUG_ON_PAGE(PageIsolated(page), page);
  1408. if (test_bit(PAGE_HEADLESS, &page->private))
  1409. return false;
  1410. zhdr = page_address(page);
  1411. z3fold_page_lock(zhdr);
  1412. if (test_bit(NEEDS_COMPACTING, &page->private) ||
  1413. test_bit(PAGE_STALE, &page->private))
  1414. goto out;
  1415. if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
  1416. goto out;
  1417. if (test_and_set_bit(PAGE_CLAIMED, &page->private))
  1418. goto out;
  1419. pool = zhdr_to_pool(zhdr);
  1420. spin_lock(&pool->lock);
  1421. if (!list_empty(&zhdr->buddy))
  1422. list_del_init(&zhdr->buddy);
  1423. if (!list_empty(&page->lru))
  1424. list_del_init(&page->lru);
  1425. spin_unlock(&pool->lock);
  1426. kref_get(&zhdr->refcount);
  1427. z3fold_page_unlock(zhdr);
  1428. return true;
  1429. out:
  1430. z3fold_page_unlock(zhdr);
  1431. return false;
  1432. }
  1433. static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
  1434. struct page *page, enum migrate_mode mode)
  1435. {
  1436. struct z3fold_header *zhdr, *new_zhdr;
  1437. struct z3fold_pool *pool;
  1438. struct address_space *new_mapping;
  1439. VM_BUG_ON_PAGE(!PageMovable(page), page);
  1440. VM_BUG_ON_PAGE(!PageIsolated(page), page);
  1441. VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
  1442. VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
  1443. zhdr = page_address(page);
  1444. pool = zhdr_to_pool(zhdr);
  1445. if (!z3fold_page_trylock(zhdr))
  1446. return -EAGAIN;
  1447. if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
  1448. z3fold_page_unlock(zhdr);
  1449. clear_bit(PAGE_CLAIMED, &page->private);
  1450. return -EBUSY;
  1451. }
  1452. if (work_pending(&zhdr->work)) {
  1453. z3fold_page_unlock(zhdr);
  1454. return -EAGAIN;
  1455. }
  1456. new_zhdr = page_address(newpage);
  1457. memcpy(new_zhdr, zhdr, PAGE_SIZE);
  1458. newpage->private = page->private;
  1459. page->private = 0;
  1460. z3fold_page_unlock(zhdr);
  1461. spin_lock_init(&new_zhdr->page_lock);
  1462. INIT_WORK(&new_zhdr->work, compact_page_work);
  1463. /*
  1464. * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
  1465. * so we only have to reinitialize it.
  1466. */
  1467. INIT_LIST_HEAD(&new_zhdr->buddy);
  1468. new_mapping = page_mapping(page);
  1469. __ClearPageMovable(page);
  1470. ClearPagePrivate(page);
  1471. get_page(newpage);
  1472. z3fold_page_lock(new_zhdr);
  1473. if (new_zhdr->first_chunks)
  1474. encode_handle(new_zhdr, FIRST);
  1475. if (new_zhdr->last_chunks)
  1476. encode_handle(new_zhdr, LAST);
  1477. if (new_zhdr->middle_chunks)
  1478. encode_handle(new_zhdr, MIDDLE);
  1479. set_bit(NEEDS_COMPACTING, &newpage->private);
  1480. new_zhdr->cpu = smp_processor_id();
  1481. spin_lock(&pool->lock);
  1482. list_add(&newpage->lru, &pool->lru);
  1483. spin_unlock(&pool->lock);
  1484. __SetPageMovable(newpage, new_mapping);
  1485. z3fold_page_unlock(new_zhdr);
  1486. queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
  1487. page_mapcount_reset(page);
  1488. clear_bit(PAGE_CLAIMED, &page->private);
  1489. put_page(page);
  1490. return 0;
  1491. }
  1492. static void z3fold_page_putback(struct page *page)
  1493. {
  1494. struct z3fold_header *zhdr;
  1495. struct z3fold_pool *pool;
  1496. zhdr = page_address(page);
  1497. pool = zhdr_to_pool(zhdr);
  1498. z3fold_page_lock(zhdr);
  1499. if (!list_empty(&zhdr->buddy))
  1500. list_del_init(&zhdr->buddy);
  1501. INIT_LIST_HEAD(&page->lru);
  1502. if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
  1503. atomic64_dec(&pool->pages_nr);
  1504. return;
  1505. }
  1506. spin_lock(&pool->lock);
  1507. list_add(&page->lru, &pool->lru);
  1508. spin_unlock(&pool->lock);
  1509. clear_bit(PAGE_CLAIMED, &page->private);
  1510. z3fold_page_unlock(zhdr);
  1511. }
  1512. static const struct address_space_operations z3fold_aops = {
  1513. .isolate_page = z3fold_page_isolate,
  1514. .migratepage = z3fold_page_migrate,
  1515. .putback_page = z3fold_page_putback,
  1516. };
  1517. /*****************
  1518. * zpool
  1519. ****************/
  1520. static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
  1521. {
  1522. if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
  1523. return pool->zpool_ops->evict(pool->zpool, handle);
  1524. else
  1525. return -ENOENT;
  1526. }
  1527. static const struct z3fold_ops z3fold_zpool_ops = {
  1528. .evict = z3fold_zpool_evict
  1529. };
  1530. static void *z3fold_zpool_create(const char *name, gfp_t gfp,
  1531. const struct zpool_ops *zpool_ops,
  1532. struct zpool *zpool)
  1533. {
  1534. struct z3fold_pool *pool;
  1535. pool = z3fold_create_pool(name, gfp,
  1536. zpool_ops ? &z3fold_zpool_ops : NULL);
  1537. if (pool) {
  1538. pool->zpool = zpool;
  1539. pool->zpool_ops = zpool_ops;
  1540. }
  1541. return pool;
  1542. }
  1543. static void z3fold_zpool_destroy(void *pool)
  1544. {
  1545. z3fold_destroy_pool(pool);
  1546. }
  1547. static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
  1548. unsigned long *handle)
  1549. {
  1550. return z3fold_alloc(pool, size, gfp, handle);
  1551. }
  1552. static void z3fold_zpool_free(void *pool, unsigned long handle)
  1553. {
  1554. z3fold_free(pool, handle);
  1555. }
  1556. static int z3fold_zpool_shrink(void *pool, unsigned int pages,
  1557. unsigned int *reclaimed)
  1558. {
  1559. unsigned int total = 0;
  1560. int ret = -EINVAL;
  1561. while (total < pages) {
  1562. ret = z3fold_reclaim_page(pool, 8);
  1563. if (ret < 0)
  1564. break;
  1565. total++;
  1566. }
  1567. if (reclaimed)
  1568. *reclaimed = total;
  1569. return ret;
  1570. }
  1571. static void *z3fold_zpool_map(void *pool, unsigned long handle,
  1572. enum zpool_mapmode mm)
  1573. {
  1574. return z3fold_map(pool, handle);
  1575. }
  1576. static void z3fold_zpool_unmap(void *pool, unsigned long handle)
  1577. {
  1578. z3fold_unmap(pool, handle);
  1579. }
  1580. static u64 z3fold_zpool_total_size(void *pool)
  1581. {
  1582. return z3fold_get_pool_size(pool) * PAGE_SIZE;
  1583. }
  1584. static struct zpool_driver z3fold_zpool_driver = {
  1585. .type = "z3fold",
  1586. .owner = THIS_MODULE,
  1587. .create = z3fold_zpool_create,
  1588. .destroy = z3fold_zpool_destroy,
  1589. .malloc = z3fold_zpool_malloc,
  1590. .free = z3fold_zpool_free,
  1591. .shrink = z3fold_zpool_shrink,
  1592. .map = z3fold_zpool_map,
  1593. .unmap = z3fold_zpool_unmap,
  1594. .total_size = z3fold_zpool_total_size,
  1595. };
  1596. MODULE_ALIAS("zpool-z3fold");
  1597. static int __init init_z3fold(void)
  1598. {
  1599. int ret;
  1600. /* Make sure the z3fold header is not larger than the page size */
  1601. BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
  1602. ret = z3fold_mount();
  1603. if (ret)
  1604. return ret;
  1605. zpool_register_driver(&z3fold_zpool_driver);
  1606. return 0;
  1607. }
  1608. static void __exit exit_z3fold(void)
  1609. {
  1610. z3fold_unmount();
  1611. zpool_unregister_driver(&z3fold_zpool_driver);
  1612. }
  1613. module_init(init_z3fold);
  1614. module_exit(exit_z3fold);
  1615. MODULE_LICENSE("GPL");
  1616. MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
  1617. MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");