shmem.c 111 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348
  1. /*
  2. * Resizable virtual memory filesystem for Linux.
  3. *
  4. * Copyright (C) 2000 Linus Torvalds.
  5. * 2000 Transmeta Corp.
  6. * 2000-2001 Christoph Rohland
  7. * 2000-2001 SAP AG
  8. * 2002 Red Hat Inc.
  9. * Copyright (C) 2002-2011 Hugh Dickins.
  10. * Copyright (C) 2011 Google Inc.
  11. * Copyright (C) 2002-2005 VERITAS Software Corporation.
  12. * Copyright (C) 2004 Andi Kleen, SuSE Labs
  13. *
  14. * Extended attribute support for tmpfs:
  15. * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
  16. * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
  17. *
  18. * tiny-shmem:
  19. * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
  20. *
  21. * This file is released under the GPL.
  22. */
  23. #include <linux/fs.h>
  24. #include <linux/init.h>
  25. #include <linux/vfs.h>
  26. #include <linux/mount.h>
  27. #include <linux/ramfs.h>
  28. #include <linux/pagemap.h>
  29. #include <linux/file.h>
  30. #include <linux/mm.h>
  31. #include <linux/random.h>
  32. #include <linux/sched/signal.h>
  33. #include <linux/export.h>
  34. #include <linux/swap.h>
  35. #include <linux/uio.h>
  36. #include <linux/khugepaged.h>
  37. #include <linux/hugetlb.h>
  38. #include <linux/frontswap.h>
  39. #include <linux/fs_parser.h>
  40. #include <linux/mm_inline.h>
  41. #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */
  42. #include "internal.h"
  43. #undef CREATE_TRACE_POINTS
  44. #include <trace/hooks/shmem_fs.h>
  45. static struct vfsmount *shm_mnt;
  46. #ifdef CONFIG_SHMEM
  47. /*
  48. * This virtual memory filesystem is heavily based on the ramfs. It
  49. * extends ramfs by the ability to use swap and honor resource limits
  50. * which makes it a completely usable filesystem.
  51. */
  52. #include <linux/xattr.h>
  53. #include <linux/exportfs.h>
  54. #include <linux/posix_acl.h>
  55. #include <linux/posix_acl_xattr.h>
  56. #include <linux/mman.h>
  57. #include <linux/string.h>
  58. #include <linux/slab.h>
  59. #include <linux/backing-dev.h>
  60. #include <linux/shmem_fs.h>
  61. #include <linux/writeback.h>
  62. #include <linux/blkdev.h>
  63. #include <linux/pagevec.h>
  64. #include <linux/percpu_counter.h>
  65. #include <linux/falloc.h>
  66. #include <linux/splice.h>
  67. #include <linux/security.h>
  68. #include <linux/swapops.h>
  69. #include <linux/mempolicy.h>
  70. #include <linux/namei.h>
  71. #include <linux/ctype.h>
  72. #include <linux/migrate.h>
  73. #include <linux/highmem.h>
  74. #include <linux/seq_file.h>
  75. #include <linux/magic.h>
  76. #include <linux/syscalls.h>
  77. #include <linux/fcntl.h>
  78. #include <uapi/linux/memfd.h>
  79. #include <linux/userfaultfd_k.h>
  80. #include <linux/rmap.h>
  81. #include <linux/uuid.h>
  82. #include <linux/uaccess.h>
  83. #include "internal.h"
  84. #define BLOCKS_PER_PAGE (PAGE_SIZE/512)
  85. #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
  86. /* Pretend that each entry is of this size in directory's i_size */
  87. #define BOGO_DIRENT_SIZE 20
  88. /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
  89. #define SHORT_SYMLINK_LEN 128
  90. /*
  91. * shmem_fallocate communicates with shmem_fault or shmem_writepage via
  92. * inode->i_private (with i_mutex making sure that it has only one user at
  93. * a time): we would prefer not to enlarge the shmem inode just for that.
  94. */
  95. struct shmem_falloc {
  96. wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
  97. pgoff_t start; /* start of range currently being fallocated */
  98. pgoff_t next; /* the next page offset to be fallocated */
  99. pgoff_t nr_falloced; /* how many new pages have been fallocated */
  100. pgoff_t nr_unswapped; /* how often writepage refused to swap out */
  101. };
  102. struct shmem_options {
  103. unsigned long long blocks;
  104. unsigned long long inodes;
  105. struct mempolicy *mpol;
  106. kuid_t uid;
  107. kgid_t gid;
  108. umode_t mode;
  109. bool full_inums;
  110. int huge;
  111. int seen;
  112. #define SHMEM_SEEN_BLOCKS 1
  113. #define SHMEM_SEEN_INODES 2
  114. #define SHMEM_SEEN_HUGE 4
  115. #define SHMEM_SEEN_INUMS 8
  116. };
  117. #ifdef CONFIG_TMPFS
  118. static unsigned long shmem_default_max_blocks(void)
  119. {
  120. return totalram_pages() / 2;
  121. }
  122. static unsigned long shmem_default_max_inodes(void)
  123. {
  124. unsigned long nr_pages = totalram_pages();
  125. return min(nr_pages - totalhigh_pages(), nr_pages / 2);
  126. }
  127. #endif
  128. static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
  129. static int shmem_replace_page(struct page **pagep, gfp_t gfp,
  130. struct shmem_inode_info *info, pgoff_t index);
  131. static int shmem_swapin_page(struct inode *inode, pgoff_t index,
  132. struct page **pagep, enum sgp_type sgp,
  133. gfp_t gfp, struct vm_area_struct *vma,
  134. vm_fault_t *fault_type);
  135. static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
  136. struct page **pagep, enum sgp_type sgp,
  137. gfp_t gfp, struct vm_area_struct *vma,
  138. struct vm_fault *vmf, vm_fault_t *fault_type);
  139. int shmem_getpage(struct inode *inode, pgoff_t index,
  140. struct page **pagep, enum sgp_type sgp)
  141. {
  142. return shmem_getpage_gfp(inode, index, pagep, sgp,
  143. mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
  144. }
  145. static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
  146. {
  147. return sb->s_fs_info;
  148. }
  149. /*
  150. * shmem_file_setup pre-accounts the whole fixed size of a VM object,
  151. * for shared memory and for shared anonymous (/dev/zero) mappings
  152. * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
  153. * consistent with the pre-accounting of private mappings ...
  154. */
  155. static inline int shmem_acct_size(unsigned long flags, loff_t size)
  156. {
  157. return (flags & VM_NORESERVE) ?
  158. 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
  159. }
  160. static inline void shmem_unacct_size(unsigned long flags, loff_t size)
  161. {
  162. if (!(flags & VM_NORESERVE))
  163. vm_unacct_memory(VM_ACCT(size));
  164. }
  165. static inline int shmem_reacct_size(unsigned long flags,
  166. loff_t oldsize, loff_t newsize)
  167. {
  168. if (!(flags & VM_NORESERVE)) {
  169. if (VM_ACCT(newsize) > VM_ACCT(oldsize))
  170. return security_vm_enough_memory_mm(current->mm,
  171. VM_ACCT(newsize) - VM_ACCT(oldsize));
  172. else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
  173. vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
  174. }
  175. return 0;
  176. }
  177. /*
  178. * ... whereas tmpfs objects are accounted incrementally as
  179. * pages are allocated, in order to allow large sparse files.
  180. * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
  181. * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
  182. */
  183. static inline int shmem_acct_block(unsigned long flags, long pages)
  184. {
  185. if (!(flags & VM_NORESERVE))
  186. return 0;
  187. return security_vm_enough_memory_mm(current->mm,
  188. pages * VM_ACCT(PAGE_SIZE));
  189. }
  190. static inline void shmem_unacct_blocks(unsigned long flags, long pages)
  191. {
  192. if (flags & VM_NORESERVE)
  193. vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
  194. }
  195. static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
  196. {
  197. struct shmem_inode_info *info = SHMEM_I(inode);
  198. struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
  199. if (shmem_acct_block(info->flags, pages))
  200. return false;
  201. if (sbinfo->max_blocks) {
  202. if (percpu_counter_compare(&sbinfo->used_blocks,
  203. sbinfo->max_blocks - pages) > 0)
  204. goto unacct;
  205. percpu_counter_add(&sbinfo->used_blocks, pages);
  206. }
  207. return true;
  208. unacct:
  209. shmem_unacct_blocks(info->flags, pages);
  210. return false;
  211. }
  212. static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
  213. {
  214. struct shmem_inode_info *info = SHMEM_I(inode);
  215. struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
  216. if (sbinfo->max_blocks)
  217. percpu_counter_sub(&sbinfo->used_blocks, pages);
  218. shmem_unacct_blocks(info->flags, pages);
  219. }
  220. static const struct super_operations shmem_ops;
  221. static const struct address_space_operations shmem_aops;
  222. static const struct file_operations shmem_file_operations;
  223. static const struct inode_operations shmem_inode_operations;
  224. static const struct inode_operations shmem_dir_inode_operations;
  225. static const struct inode_operations shmem_special_inode_operations;
  226. static const struct vm_operations_struct shmem_vm_ops;
  227. static struct file_system_type shmem_fs_type;
  228. bool vma_is_shmem(struct vm_area_struct *vma)
  229. {
  230. return vma->vm_ops == &shmem_vm_ops;
  231. }
  232. static LIST_HEAD(shmem_swaplist);
  233. static DEFINE_MUTEX(shmem_swaplist_mutex);
  234. /*
  235. * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and
  236. * produces a novel ino for the newly allocated inode.
  237. *
  238. * It may also be called when making a hard link to permit the space needed by
  239. * each dentry. However, in that case, no new inode number is needed since that
  240. * internally draws from another pool of inode numbers (currently global
  241. * get_next_ino()). This case is indicated by passing NULL as inop.
  242. */
  243. #define SHMEM_INO_BATCH 1024
  244. static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
  245. {
  246. struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
  247. ino_t ino;
  248. if (!(sb->s_flags & SB_KERNMOUNT)) {
  249. spin_lock(&sbinfo->stat_lock);
  250. if (sbinfo->max_inodes) {
  251. if (!sbinfo->free_inodes) {
  252. spin_unlock(&sbinfo->stat_lock);
  253. return -ENOSPC;
  254. }
  255. sbinfo->free_inodes--;
  256. }
  257. if (inop) {
  258. ino = sbinfo->next_ino++;
  259. if (unlikely(is_zero_ino(ino)))
  260. ino = sbinfo->next_ino++;
  261. if (unlikely(!sbinfo->full_inums &&
  262. ino > UINT_MAX)) {
  263. /*
  264. * Emulate get_next_ino uint wraparound for
  265. * compatibility
  266. */
  267. if (IS_ENABLED(CONFIG_64BIT))
  268. pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n",
  269. __func__, MINOR(sb->s_dev));
  270. sbinfo->next_ino = 1;
  271. ino = sbinfo->next_ino++;
  272. }
  273. *inop = ino;
  274. }
  275. spin_unlock(&sbinfo->stat_lock);
  276. } else if (inop) {
  277. /*
  278. * __shmem_file_setup, one of our callers, is lock-free: it
  279. * doesn't hold stat_lock in shmem_reserve_inode since
  280. * max_inodes is always 0, and is called from potentially
  281. * unknown contexts. As such, use a per-cpu batched allocator
  282. * which doesn't require the per-sb stat_lock unless we are at
  283. * the batch boundary.
  284. *
  285. * We don't need to worry about inode{32,64} since SB_KERNMOUNT
  286. * shmem mounts are not exposed to userspace, so we don't need
  287. * to worry about things like glibc compatibility.
  288. */
  289. ino_t *next_ino;
  290. next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
  291. ino = *next_ino;
  292. if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
  293. spin_lock(&sbinfo->stat_lock);
  294. ino = sbinfo->next_ino;
  295. sbinfo->next_ino += SHMEM_INO_BATCH;
  296. spin_unlock(&sbinfo->stat_lock);
  297. if (unlikely(is_zero_ino(ino)))
  298. ino++;
  299. }
  300. *inop = ino;
  301. *next_ino = ++ino;
  302. put_cpu();
  303. }
  304. return 0;
  305. }
  306. static void shmem_free_inode(struct super_block *sb)
  307. {
  308. struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
  309. if (sbinfo->max_inodes) {
  310. spin_lock(&sbinfo->stat_lock);
  311. sbinfo->free_inodes++;
  312. spin_unlock(&sbinfo->stat_lock);
  313. }
  314. }
  315. /**
  316. * shmem_recalc_inode - recalculate the block usage of an inode
  317. * @inode: inode to recalc
  318. *
  319. * We have to calculate the free blocks since the mm can drop
  320. * undirtied hole pages behind our back.
  321. *
  322. * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
  323. * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
  324. *
  325. * It has to be called with the spinlock held.
  326. */
  327. static void shmem_recalc_inode(struct inode *inode)
  328. {
  329. struct shmem_inode_info *info = SHMEM_I(inode);
  330. long freed;
  331. freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
  332. if (freed > 0) {
  333. info->alloced -= freed;
  334. inode->i_blocks -= freed * BLOCKS_PER_PAGE;
  335. shmem_inode_unacct_blocks(inode, freed);
  336. }
  337. }
  338. bool shmem_charge(struct inode *inode, long pages)
  339. {
  340. struct shmem_inode_info *info = SHMEM_I(inode);
  341. unsigned long flags;
  342. if (!shmem_inode_acct_block(inode, pages))
  343. return false;
  344. /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
  345. inode->i_mapping->nrpages += pages;
  346. spin_lock_irqsave(&info->lock, flags);
  347. info->alloced += pages;
  348. inode->i_blocks += pages * BLOCKS_PER_PAGE;
  349. shmem_recalc_inode(inode);
  350. spin_unlock_irqrestore(&info->lock, flags);
  351. return true;
  352. }
  353. void shmem_uncharge(struct inode *inode, long pages)
  354. {
  355. struct shmem_inode_info *info = SHMEM_I(inode);
  356. unsigned long flags;
  357. /* nrpages adjustment done by __delete_from_page_cache() or caller */
  358. spin_lock_irqsave(&info->lock, flags);
  359. info->alloced -= pages;
  360. inode->i_blocks -= pages * BLOCKS_PER_PAGE;
  361. shmem_recalc_inode(inode);
  362. spin_unlock_irqrestore(&info->lock, flags);
  363. shmem_inode_unacct_blocks(inode, pages);
  364. }
  365. /*
  366. * Replace item expected in xarray by a new item, while holding xa_lock.
  367. */
  368. static int shmem_replace_entry(struct address_space *mapping,
  369. pgoff_t index, void *expected, void *replacement)
  370. {
  371. XA_STATE(xas, &mapping->i_pages, index);
  372. void *item;
  373. VM_BUG_ON(!expected);
  374. VM_BUG_ON(!replacement);
  375. item = xas_load(&xas);
  376. if (item != expected)
  377. return -ENOENT;
  378. xas_store(&xas, replacement);
  379. return 0;
  380. }
  381. /*
  382. * Sometimes, before we decide whether to proceed or to fail, we must check
  383. * that an entry was not already brought back from swap by a racing thread.
  384. *
  385. * Checking page is not enough: by the time a SwapCache page is locked, it
  386. * might be reused, and again be SwapCache, using the same swap as before.
  387. */
  388. static bool shmem_confirm_swap(struct address_space *mapping,
  389. pgoff_t index, swp_entry_t swap)
  390. {
  391. return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
  392. }
  393. /*
  394. * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
  395. *
  396. * SHMEM_HUGE_NEVER:
  397. * disables huge pages for the mount;
  398. * SHMEM_HUGE_ALWAYS:
  399. * enables huge pages for the mount;
  400. * SHMEM_HUGE_WITHIN_SIZE:
  401. * only allocate huge pages if the page will be fully within i_size,
  402. * also respect fadvise()/madvise() hints;
  403. * SHMEM_HUGE_ADVISE:
  404. * only allocate huge pages if requested with fadvise()/madvise();
  405. */
  406. #define SHMEM_HUGE_NEVER 0
  407. #define SHMEM_HUGE_ALWAYS 1
  408. #define SHMEM_HUGE_WITHIN_SIZE 2
  409. #define SHMEM_HUGE_ADVISE 3
  410. /*
  411. * Special values.
  412. * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
  413. *
  414. * SHMEM_HUGE_DENY:
  415. * disables huge on shm_mnt and all mounts, for emergency use;
  416. * SHMEM_HUGE_FORCE:
  417. * enables huge on shm_mnt and all mounts, w/o needing option, for testing;
  418. *
  419. */
  420. #define SHMEM_HUGE_DENY (-1)
  421. #define SHMEM_HUGE_FORCE (-2)
  422. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  423. /* ifdef here to avoid bloating shmem.o when not necessary */
  424. static int shmem_huge __read_mostly;
  425. #if defined(CONFIG_SYSFS)
  426. static int shmem_parse_huge(const char *str)
  427. {
  428. if (!strcmp(str, "never"))
  429. return SHMEM_HUGE_NEVER;
  430. if (!strcmp(str, "always"))
  431. return SHMEM_HUGE_ALWAYS;
  432. if (!strcmp(str, "within_size"))
  433. return SHMEM_HUGE_WITHIN_SIZE;
  434. if (!strcmp(str, "advise"))
  435. return SHMEM_HUGE_ADVISE;
  436. if (!strcmp(str, "deny"))
  437. return SHMEM_HUGE_DENY;
  438. if (!strcmp(str, "force"))
  439. return SHMEM_HUGE_FORCE;
  440. return -EINVAL;
  441. }
  442. #endif
  443. #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
  444. static const char *shmem_format_huge(int huge)
  445. {
  446. switch (huge) {
  447. case SHMEM_HUGE_NEVER:
  448. return "never";
  449. case SHMEM_HUGE_ALWAYS:
  450. return "always";
  451. case SHMEM_HUGE_WITHIN_SIZE:
  452. return "within_size";
  453. case SHMEM_HUGE_ADVISE:
  454. return "advise";
  455. case SHMEM_HUGE_DENY:
  456. return "deny";
  457. case SHMEM_HUGE_FORCE:
  458. return "force";
  459. default:
  460. VM_BUG_ON(1);
  461. return "bad_val";
  462. }
  463. }
  464. #endif
  465. static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
  466. struct shrink_control *sc, unsigned long nr_to_split)
  467. {
  468. LIST_HEAD(list), *pos, *next;
  469. LIST_HEAD(to_remove);
  470. struct inode *inode;
  471. struct shmem_inode_info *info;
  472. struct page *page;
  473. unsigned long batch = sc ? sc->nr_to_scan : 128;
  474. int split = 0;
  475. if (list_empty(&sbinfo->shrinklist))
  476. return SHRINK_STOP;
  477. spin_lock(&sbinfo->shrinklist_lock);
  478. list_for_each_safe(pos, next, &sbinfo->shrinklist) {
  479. info = list_entry(pos, struct shmem_inode_info, shrinklist);
  480. /* pin the inode */
  481. inode = igrab(&info->vfs_inode);
  482. /* inode is about to be evicted */
  483. if (!inode) {
  484. list_del_init(&info->shrinklist);
  485. goto next;
  486. }
  487. /* Check if there's anything to gain */
  488. if (round_up(inode->i_size, PAGE_SIZE) ==
  489. round_up(inode->i_size, HPAGE_PMD_SIZE)) {
  490. list_move(&info->shrinklist, &to_remove);
  491. goto next;
  492. }
  493. list_move(&info->shrinklist, &list);
  494. next:
  495. sbinfo->shrinklist_len--;
  496. if (!--batch)
  497. break;
  498. }
  499. spin_unlock(&sbinfo->shrinklist_lock);
  500. list_for_each_safe(pos, next, &to_remove) {
  501. info = list_entry(pos, struct shmem_inode_info, shrinklist);
  502. inode = &info->vfs_inode;
  503. list_del_init(&info->shrinklist);
  504. iput(inode);
  505. }
  506. list_for_each_safe(pos, next, &list) {
  507. int ret;
  508. info = list_entry(pos, struct shmem_inode_info, shrinklist);
  509. inode = &info->vfs_inode;
  510. if (nr_to_split && split >= nr_to_split)
  511. goto move_back;
  512. page = find_get_page(inode->i_mapping,
  513. (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
  514. if (!page)
  515. goto drop;
  516. /* No huge page at the end of the file: nothing to split */
  517. if (!PageTransHuge(page)) {
  518. put_page(page);
  519. goto drop;
  520. }
  521. /*
  522. * Move the inode on the list back to shrinklist if we failed
  523. * to lock the page at this time.
  524. *
  525. * Waiting for the lock may lead to deadlock in the
  526. * reclaim path.
  527. */
  528. if (!trylock_page(page)) {
  529. put_page(page);
  530. goto move_back;
  531. }
  532. ret = split_huge_page(page);
  533. unlock_page(page);
  534. put_page(page);
  535. /* If split failed move the inode on the list back to shrinklist */
  536. if (ret)
  537. goto move_back;
  538. split++;
  539. drop:
  540. list_del_init(&info->shrinklist);
  541. goto put;
  542. move_back:
  543. /*
  544. * Make sure the inode is either on the global list or deleted
  545. * from any local list before iput() since it could be deleted
  546. * in another thread once we put the inode (then the local list
  547. * is corrupted).
  548. */
  549. spin_lock(&sbinfo->shrinklist_lock);
  550. list_move(&info->shrinklist, &sbinfo->shrinklist);
  551. sbinfo->shrinklist_len++;
  552. spin_unlock(&sbinfo->shrinklist_lock);
  553. put:
  554. iput(inode);
  555. }
  556. return split;
  557. }
  558. static long shmem_unused_huge_scan(struct super_block *sb,
  559. struct shrink_control *sc)
  560. {
  561. struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
  562. if (!READ_ONCE(sbinfo->shrinklist_len))
  563. return SHRINK_STOP;
  564. return shmem_unused_huge_shrink(sbinfo, sc, 0);
  565. }
  566. static long shmem_unused_huge_count(struct super_block *sb,
  567. struct shrink_control *sc)
  568. {
  569. struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
  570. return READ_ONCE(sbinfo->shrinklist_len);
  571. }
  572. #else /* !CONFIG_TRANSPARENT_HUGEPAGE */
  573. #define shmem_huge SHMEM_HUGE_DENY
  574. static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
  575. struct shrink_control *sc, unsigned long nr_to_split)
  576. {
  577. return 0;
  578. }
  579. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  580. static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo)
  581. {
  582. if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
  583. (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) &&
  584. shmem_huge != SHMEM_HUGE_DENY)
  585. return true;
  586. return false;
  587. }
  588. /*
  589. * Like add_to_page_cache_locked, but error if expected item has gone.
  590. */
  591. static int shmem_add_to_page_cache(struct page *page,
  592. struct address_space *mapping,
  593. pgoff_t index, void *expected, gfp_t gfp,
  594. struct mm_struct *charge_mm)
  595. {
  596. XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
  597. unsigned long i = 0;
  598. unsigned long nr = compound_nr(page);
  599. int error;
  600. VM_BUG_ON_PAGE(PageTail(page), page);
  601. VM_BUG_ON_PAGE(index != round_down(index, nr), page);
  602. VM_BUG_ON_PAGE(!PageLocked(page), page);
  603. VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
  604. VM_BUG_ON(expected && PageTransHuge(page));
  605. page_ref_add(page, nr);
  606. page->mapping = mapping;
  607. page->index = index;
  608. if (!PageSwapCache(page)) {
  609. error = mem_cgroup_charge(page, charge_mm, gfp);
  610. if (error) {
  611. if (PageTransHuge(page)) {
  612. count_vm_event(THP_FILE_FALLBACK);
  613. count_vm_event(THP_FILE_FALLBACK_CHARGE);
  614. }
  615. goto error;
  616. }
  617. }
  618. cgroup_throttle_swaprate(page, gfp);
  619. do {
  620. void *entry;
  621. xas_lock_irq(&xas);
  622. entry = xas_find_conflict(&xas);
  623. if (entry != expected)
  624. xas_set_err(&xas, -EEXIST);
  625. xas_create_range(&xas);
  626. if (xas_error(&xas))
  627. goto unlock;
  628. next:
  629. xas_store(&xas, page);
  630. if (++i < nr) {
  631. xas_next(&xas);
  632. goto next;
  633. }
  634. if (PageTransHuge(page)) {
  635. count_vm_event(THP_FILE_ALLOC);
  636. __inc_node_page_state(page, NR_SHMEM_THPS);
  637. }
  638. mapping->nrpages += nr;
  639. __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
  640. __mod_lruvec_page_state(page, NR_SHMEM, nr);
  641. unlock:
  642. xas_unlock_irq(&xas);
  643. } while (xas_nomem(&xas, gfp));
  644. if (xas_error(&xas)) {
  645. error = xas_error(&xas);
  646. goto error;
  647. }
  648. return 0;
  649. error:
  650. page->mapping = NULL;
  651. page_ref_sub(page, nr);
  652. return error;
  653. }
  654. /*
  655. * Like delete_from_page_cache, but substitutes swap for page.
  656. */
  657. static void shmem_delete_from_page_cache(struct page *page, void *radswap)
  658. {
  659. struct address_space *mapping = page->mapping;
  660. int error;
  661. VM_BUG_ON_PAGE(PageCompound(page), page);
  662. xa_lock_irq(&mapping->i_pages);
  663. error = shmem_replace_entry(mapping, page->index, page, radswap);
  664. page->mapping = NULL;
  665. mapping->nrpages--;
  666. __dec_lruvec_page_state(page, NR_FILE_PAGES);
  667. __dec_lruvec_page_state(page, NR_SHMEM);
  668. xa_unlock_irq(&mapping->i_pages);
  669. put_page(page);
  670. BUG_ON(error);
  671. }
  672. /*
  673. * Remove swap entry from page cache, free the swap and its page cache.
  674. */
  675. static int shmem_free_swap(struct address_space *mapping,
  676. pgoff_t index, void *radswap)
  677. {
  678. void *old;
  679. old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
  680. if (old != radswap)
  681. return -ENOENT;
  682. free_swap_and_cache(radix_to_swp_entry(radswap));
  683. return 0;
  684. }
  685. /*
  686. * Determine (in bytes) how many of the shmem object's pages mapped by the
  687. * given offsets are swapped out.
  688. *
  689. * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
  690. * as long as the inode doesn't go away and racy results are not a problem.
  691. */
  692. unsigned long shmem_partial_swap_usage(struct address_space *mapping,
  693. pgoff_t start, pgoff_t end)
  694. {
  695. XA_STATE(xas, &mapping->i_pages, start);
  696. struct page *page;
  697. unsigned long swapped = 0;
  698. rcu_read_lock();
  699. xas_for_each(&xas, page, end - 1) {
  700. if (xas_retry(&xas, page))
  701. continue;
  702. if (xa_is_value(page))
  703. swapped++;
  704. if (need_resched()) {
  705. xas_pause(&xas);
  706. cond_resched_rcu();
  707. }
  708. }
  709. rcu_read_unlock();
  710. return swapped << PAGE_SHIFT;
  711. }
  712. /*
  713. * Determine (in bytes) how many of the shmem object's pages mapped by the
  714. * given vma is swapped out.
  715. *
  716. * This is safe to call without i_mutex or the i_pages lock thanks to RCU,
  717. * as long as the inode doesn't go away and racy results are not a problem.
  718. */
  719. unsigned long shmem_swap_usage(struct vm_area_struct *vma)
  720. {
  721. struct inode *inode = file_inode(vma->vm_file);
  722. struct shmem_inode_info *info = SHMEM_I(inode);
  723. struct address_space *mapping = inode->i_mapping;
  724. unsigned long swapped;
  725. /* Be careful as we don't hold info->lock */
  726. swapped = READ_ONCE(info->swapped);
  727. /*
  728. * The easier cases are when the shmem object has nothing in swap, or
  729. * the vma maps it whole. Then we can simply use the stats that we
  730. * already track.
  731. */
  732. if (!swapped)
  733. return 0;
  734. if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
  735. return swapped << PAGE_SHIFT;
  736. /* Here comes the more involved part */
  737. return shmem_partial_swap_usage(mapping,
  738. linear_page_index(vma, vma->vm_start),
  739. linear_page_index(vma, vma->vm_end));
  740. }
  741. /*
  742. * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
  743. */
  744. void shmem_unlock_mapping(struct address_space *mapping)
  745. {
  746. struct pagevec pvec;
  747. pgoff_t indices[PAGEVEC_SIZE];
  748. pgoff_t index = 0;
  749. pagevec_init(&pvec);
  750. /*
  751. * Minor point, but we might as well stop if someone else SHM_LOCKs it.
  752. */
  753. while (!mapping_unevictable(mapping)) {
  754. /*
  755. * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
  756. * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
  757. */
  758. pvec.nr = find_get_entries(mapping, index,
  759. PAGEVEC_SIZE, pvec.pages, indices);
  760. if (!pvec.nr)
  761. break;
  762. index = indices[pvec.nr - 1] + 1;
  763. pagevec_remove_exceptionals(&pvec);
  764. check_move_unevictable_pages(&pvec);
  765. pagevec_release(&pvec);
  766. cond_resched();
  767. }
  768. }
  769. /*
  770. * Check whether a hole-punch or truncation needs to split a huge page,
  771. * returning true if no split was required, or the split has been successful.
  772. *
  773. * Eviction (or truncation to 0 size) should never need to split a huge page;
  774. * but in rare cases might do so, if shmem_undo_range() failed to trylock on
  775. * head, and then succeeded to trylock on tail.
  776. *
  777. * A split can only succeed when there are no additional references on the
  778. * huge page: so the split below relies upon find_get_entries() having stopped
  779. * when it found a subpage of the huge page, without getting further references.
  780. */
  781. static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
  782. {
  783. if (!PageTransCompound(page))
  784. return true;
  785. /* Just proceed to delete a huge page wholly within the range punched */
  786. if (PageHead(page) &&
  787. page->index >= start && page->index + HPAGE_PMD_NR <= end)
  788. return true;
  789. /* Try to split huge page, so we can truly punch the hole or truncate */
  790. return split_huge_page(page) >= 0;
  791. }
  792. /*
  793. * Remove range of pages and swap entries from page cache, and free them.
  794. * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
  795. */
  796. static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
  797. bool unfalloc)
  798. {
  799. struct address_space *mapping = inode->i_mapping;
  800. struct shmem_inode_info *info = SHMEM_I(inode);
  801. pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
  802. pgoff_t end = (lend + 1) >> PAGE_SHIFT;
  803. unsigned int partial_start = lstart & (PAGE_SIZE - 1);
  804. unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
  805. struct pagevec pvec;
  806. pgoff_t indices[PAGEVEC_SIZE];
  807. long nr_swaps_freed = 0;
  808. pgoff_t index;
  809. int i;
  810. if (lend == -1)
  811. end = -1; /* unsigned, so actually very big */
  812. pagevec_init(&pvec);
  813. index = start;
  814. while (index < end) {
  815. pvec.nr = find_get_entries(mapping, index,
  816. min(end - index, (pgoff_t)PAGEVEC_SIZE),
  817. pvec.pages, indices);
  818. if (!pvec.nr)
  819. break;
  820. for (i = 0; i < pagevec_count(&pvec); i++) {
  821. struct page *page = pvec.pages[i];
  822. index = indices[i];
  823. if (index >= end)
  824. break;
  825. if (xa_is_value(page)) {
  826. if (unfalloc)
  827. continue;
  828. nr_swaps_freed += !shmem_free_swap(mapping,
  829. index, page);
  830. continue;
  831. }
  832. VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
  833. if (!trylock_page(page))
  834. continue;
  835. if ((!unfalloc || !PageUptodate(page)) &&
  836. page_mapping(page) == mapping) {
  837. VM_BUG_ON_PAGE(PageWriteback(page), page);
  838. if (shmem_punch_compound(page, start, end))
  839. truncate_inode_page(mapping, page);
  840. }
  841. unlock_page(page);
  842. }
  843. pagevec_remove_exceptionals(&pvec);
  844. pagevec_release(&pvec);
  845. cond_resched();
  846. index++;
  847. }
  848. if (partial_start) {
  849. struct page *page = NULL;
  850. shmem_getpage(inode, start - 1, &page, SGP_READ);
  851. if (page) {
  852. unsigned int top = PAGE_SIZE;
  853. if (start > end) {
  854. top = partial_end;
  855. partial_end = 0;
  856. }
  857. zero_user_segment(page, partial_start, top);
  858. set_page_dirty(page);
  859. unlock_page(page);
  860. put_page(page);
  861. }
  862. }
  863. if (partial_end) {
  864. struct page *page = NULL;
  865. shmem_getpage(inode, end, &page, SGP_READ);
  866. if (page) {
  867. zero_user_segment(page, 0, partial_end);
  868. set_page_dirty(page);
  869. unlock_page(page);
  870. put_page(page);
  871. }
  872. }
  873. if (start >= end)
  874. return;
  875. index = start;
  876. while (index < end) {
  877. cond_resched();
  878. pvec.nr = find_get_entries(mapping, index,
  879. min(end - index, (pgoff_t)PAGEVEC_SIZE),
  880. pvec.pages, indices);
  881. if (!pvec.nr) {
  882. /* If all gone or hole-punch or unfalloc, we're done */
  883. if (index == start || end != -1)
  884. break;
  885. /* But if truncating, restart to make sure all gone */
  886. index = start;
  887. continue;
  888. }
  889. for (i = 0; i < pagevec_count(&pvec); i++) {
  890. struct page *page = pvec.pages[i];
  891. index = indices[i];
  892. if (index >= end)
  893. break;
  894. if (xa_is_value(page)) {
  895. if (unfalloc)
  896. continue;
  897. if (shmem_free_swap(mapping, index, page)) {
  898. /* Swap was replaced by page: retry */
  899. index--;
  900. break;
  901. }
  902. nr_swaps_freed++;
  903. continue;
  904. }
  905. lock_page(page);
  906. if (!unfalloc || !PageUptodate(page)) {
  907. if (page_mapping(page) != mapping) {
  908. /* Page was replaced by swap: retry */
  909. unlock_page(page);
  910. index--;
  911. break;
  912. }
  913. VM_BUG_ON_PAGE(PageWriteback(page), page);
  914. if (shmem_punch_compound(page, start, end))
  915. truncate_inode_page(mapping, page);
  916. else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
  917. /* Wipe the page and don't get stuck */
  918. clear_highpage(page);
  919. flush_dcache_page(page);
  920. set_page_dirty(page);
  921. if (index <
  922. round_up(start, HPAGE_PMD_NR))
  923. start = index + 1;
  924. }
  925. }
  926. unlock_page(page);
  927. }
  928. pagevec_remove_exceptionals(&pvec);
  929. pagevec_release(&pvec);
  930. index++;
  931. }
  932. spin_lock_irq(&info->lock);
  933. info->swapped -= nr_swaps_freed;
  934. shmem_recalc_inode(inode);
  935. spin_unlock_irq(&info->lock);
  936. }
  937. void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
  938. {
  939. shmem_undo_range(inode, lstart, lend, false);
  940. inode->i_ctime = inode->i_mtime = current_time(inode);
  941. }
  942. EXPORT_SYMBOL_GPL(shmem_truncate_range);
  943. static int shmem_getattr(const struct path *path, struct kstat *stat,
  944. u32 request_mask, unsigned int query_flags)
  945. {
  946. struct inode *inode = path->dentry->d_inode;
  947. struct shmem_inode_info *info = SHMEM_I(inode);
  948. struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb);
  949. if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
  950. spin_lock_irq(&info->lock);
  951. shmem_recalc_inode(inode);
  952. spin_unlock_irq(&info->lock);
  953. }
  954. generic_fillattr(inode, stat);
  955. if (is_huge_enabled(sb_info))
  956. stat->blksize = HPAGE_PMD_SIZE;
  957. return 0;
  958. }
  959. static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
  960. {
  961. struct inode *inode = d_inode(dentry);
  962. struct shmem_inode_info *info = SHMEM_I(inode);
  963. struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
  964. int error;
  965. error = setattr_prepare(dentry, attr);
  966. if (error)
  967. return error;
  968. if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
  969. loff_t oldsize = inode->i_size;
  970. loff_t newsize = attr->ia_size;
  971. /* protected by i_mutex */
  972. if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
  973. (newsize > oldsize && (info->seals & F_SEAL_GROW)))
  974. return -EPERM;
  975. if (newsize != oldsize) {
  976. error = shmem_reacct_size(SHMEM_I(inode)->flags,
  977. oldsize, newsize);
  978. if (error)
  979. return error;
  980. i_size_write(inode, newsize);
  981. inode->i_ctime = inode->i_mtime = current_time(inode);
  982. }
  983. if (newsize <= oldsize) {
  984. loff_t holebegin = round_up(newsize, PAGE_SIZE);
  985. if (oldsize > holebegin)
  986. unmap_mapping_range(inode->i_mapping,
  987. holebegin, 0, 1);
  988. if (info->alloced)
  989. shmem_truncate_range(inode,
  990. newsize, (loff_t)-1);
  991. /* unmap again to remove racily COWed private pages */
  992. if (oldsize > holebegin)
  993. unmap_mapping_range(inode->i_mapping,
  994. holebegin, 0, 1);
  995. /*
  996. * Part of the huge page can be beyond i_size: subject
  997. * to shrink under memory pressure.
  998. */
  999. if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
  1000. spin_lock(&sbinfo->shrinklist_lock);
  1001. /*
  1002. * _careful to defend against unlocked access to
  1003. * ->shrink_list in shmem_unused_huge_shrink()
  1004. */
  1005. if (list_empty_careful(&info->shrinklist)) {
  1006. list_add_tail(&info->shrinklist,
  1007. &sbinfo->shrinklist);
  1008. sbinfo->shrinklist_len++;
  1009. }
  1010. spin_unlock(&sbinfo->shrinklist_lock);
  1011. }
  1012. }
  1013. }
  1014. setattr_copy(inode, attr);
  1015. if (attr->ia_valid & ATTR_MODE)
  1016. error = posix_acl_chmod(inode, inode->i_mode);
  1017. return error;
  1018. }
  1019. static void shmem_evict_inode(struct inode *inode)
  1020. {
  1021. struct shmem_inode_info *info = SHMEM_I(inode);
  1022. struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
  1023. if (inode->i_mapping->a_ops == &shmem_aops) {
  1024. shmem_unacct_size(info->flags, inode->i_size);
  1025. inode->i_size = 0;
  1026. shmem_truncate_range(inode, 0, (loff_t)-1);
  1027. if (!list_empty(&info->shrinklist)) {
  1028. spin_lock(&sbinfo->shrinklist_lock);
  1029. if (!list_empty(&info->shrinklist)) {
  1030. list_del_init(&info->shrinklist);
  1031. sbinfo->shrinklist_len--;
  1032. }
  1033. spin_unlock(&sbinfo->shrinklist_lock);
  1034. }
  1035. while (!list_empty(&info->swaplist)) {
  1036. /* Wait while shmem_unuse() is scanning this inode... */
  1037. wait_var_event(&info->stop_eviction,
  1038. !atomic_read(&info->stop_eviction));
  1039. mutex_lock(&shmem_swaplist_mutex);
  1040. /* ...but beware of the race if we peeked too early */
  1041. if (!atomic_read(&info->stop_eviction))
  1042. list_del_init(&info->swaplist);
  1043. mutex_unlock(&shmem_swaplist_mutex);
  1044. }
  1045. }
  1046. simple_xattrs_free(&info->xattrs);
  1047. WARN_ON(inode->i_blocks);
  1048. shmem_free_inode(inode->i_sb);
  1049. clear_inode(inode);
  1050. }
  1051. extern struct swap_info_struct *swap_info[];
  1052. static int shmem_find_swap_entries(struct address_space *mapping,
  1053. pgoff_t start, unsigned int nr_entries,
  1054. struct page **entries, pgoff_t *indices,
  1055. unsigned int type, bool frontswap)
  1056. {
  1057. XA_STATE(xas, &mapping->i_pages, start);
  1058. struct page *page;
  1059. swp_entry_t entry;
  1060. unsigned int ret = 0;
  1061. if (!nr_entries)
  1062. return 0;
  1063. rcu_read_lock();
  1064. xas_for_each(&xas, page, ULONG_MAX) {
  1065. if (xas_retry(&xas, page))
  1066. continue;
  1067. if (!xa_is_value(page))
  1068. continue;
  1069. entry = radix_to_swp_entry(page);
  1070. if (swp_type(entry) != type)
  1071. continue;
  1072. if (frontswap &&
  1073. !frontswap_test(swap_info[type], swp_offset(entry)))
  1074. continue;
  1075. indices[ret] = xas.xa_index;
  1076. entries[ret] = page;
  1077. if (need_resched()) {
  1078. xas_pause(&xas);
  1079. cond_resched_rcu();
  1080. }
  1081. if (++ret == nr_entries)
  1082. break;
  1083. }
  1084. rcu_read_unlock();
  1085. return ret;
  1086. }
  1087. /*
  1088. * Move the swapped pages for an inode to page cache. Returns the count
  1089. * of pages swapped in, or the error in case of failure.
  1090. */
  1091. static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec,
  1092. pgoff_t *indices)
  1093. {
  1094. int i = 0;
  1095. int ret = 0;
  1096. int error = 0;
  1097. struct address_space *mapping = inode->i_mapping;
  1098. for (i = 0; i < pvec.nr; i++) {
  1099. struct page *page = pvec.pages[i];
  1100. if (!xa_is_value(page))
  1101. continue;
  1102. error = shmem_swapin_page(inode, indices[i],
  1103. &page, SGP_CACHE,
  1104. mapping_gfp_mask(mapping),
  1105. NULL, NULL);
  1106. if (error == 0) {
  1107. unlock_page(page);
  1108. put_page(page);
  1109. ret++;
  1110. }
  1111. if (error == -ENOMEM)
  1112. break;
  1113. error = 0;
  1114. }
  1115. return error ? error : ret;
  1116. }
  1117. /*
  1118. * If swap found in inode, free it and move page from swapcache to filecache.
  1119. */
  1120. static int shmem_unuse_inode(struct inode *inode, unsigned int type,
  1121. bool frontswap, unsigned long *fs_pages_to_unuse)
  1122. {
  1123. struct address_space *mapping = inode->i_mapping;
  1124. pgoff_t start = 0;
  1125. struct pagevec pvec;
  1126. pgoff_t indices[PAGEVEC_SIZE];
  1127. bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0);
  1128. int ret = 0;
  1129. pagevec_init(&pvec);
  1130. do {
  1131. unsigned int nr_entries = PAGEVEC_SIZE;
  1132. if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE)
  1133. nr_entries = *fs_pages_to_unuse;
  1134. pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries,
  1135. pvec.pages, indices,
  1136. type, frontswap);
  1137. if (pvec.nr == 0) {
  1138. ret = 0;
  1139. break;
  1140. }
  1141. ret = shmem_unuse_swap_entries(inode, pvec, indices);
  1142. if (ret < 0)
  1143. break;
  1144. if (frontswap_partial) {
  1145. *fs_pages_to_unuse -= ret;
  1146. if (*fs_pages_to_unuse == 0) {
  1147. ret = FRONTSWAP_PAGES_UNUSED;
  1148. break;
  1149. }
  1150. }
  1151. start = indices[pvec.nr - 1];
  1152. } while (true);
  1153. return ret;
  1154. }
  1155. /*
  1156. * Read all the shared memory data that resides in the swap
  1157. * device 'type' back into memory, so the swap device can be
  1158. * unused.
  1159. */
  1160. int shmem_unuse(unsigned int type, bool frontswap,
  1161. unsigned long *fs_pages_to_unuse)
  1162. {
  1163. struct shmem_inode_info *info, *next;
  1164. int error = 0;
  1165. if (list_empty(&shmem_swaplist))
  1166. return 0;
  1167. mutex_lock(&shmem_swaplist_mutex);
  1168. list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
  1169. if (!info->swapped) {
  1170. list_del_init(&info->swaplist);
  1171. continue;
  1172. }
  1173. /*
  1174. * Drop the swaplist mutex while searching the inode for swap;
  1175. * but before doing so, make sure shmem_evict_inode() will not
  1176. * remove placeholder inode from swaplist, nor let it be freed
  1177. * (igrab() would protect from unlink, but not from unmount).
  1178. */
  1179. atomic_inc(&info->stop_eviction);
  1180. mutex_unlock(&shmem_swaplist_mutex);
  1181. error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
  1182. fs_pages_to_unuse);
  1183. cond_resched();
  1184. mutex_lock(&shmem_swaplist_mutex);
  1185. next = list_next_entry(info, swaplist);
  1186. if (!info->swapped)
  1187. list_del_init(&info->swaplist);
  1188. if (atomic_dec_and_test(&info->stop_eviction))
  1189. wake_up_var(&info->stop_eviction);
  1190. if (error)
  1191. break;
  1192. }
  1193. mutex_unlock(&shmem_swaplist_mutex);
  1194. return error;
  1195. }
  1196. /*
  1197. * Move the page from the page cache to the swap cache.
  1198. */
  1199. static int shmem_writepage(struct page *page, struct writeback_control *wbc)
  1200. {
  1201. struct shmem_inode_info *info;
  1202. struct address_space *mapping;
  1203. struct inode *inode;
  1204. swp_entry_t swap;
  1205. pgoff_t index;
  1206. VM_BUG_ON_PAGE(PageCompound(page), page);
  1207. BUG_ON(!PageLocked(page));
  1208. mapping = page->mapping;
  1209. index = page->index;
  1210. inode = mapping->host;
  1211. info = SHMEM_I(inode);
  1212. if (info->flags & VM_LOCKED)
  1213. goto redirty;
  1214. if (!total_swap_pages)
  1215. goto redirty;
  1216. /*
  1217. * Our capabilities prevent regular writeback or sync from ever calling
  1218. * shmem_writepage; but a stacking filesystem might use ->writepage of
  1219. * its underlying filesystem, in which case tmpfs should write out to
  1220. * swap only in response to memory pressure, and not for the writeback
  1221. * threads or sync.
  1222. */
  1223. if (!wbc->for_reclaim) {
  1224. WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
  1225. goto redirty;
  1226. }
  1227. /*
  1228. * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
  1229. * value into swapfile.c, the only way we can correctly account for a
  1230. * fallocated page arriving here is now to initialize it and write it.
  1231. *
  1232. * That's okay for a page already fallocated earlier, but if we have
  1233. * not yet completed the fallocation, then (a) we want to keep track
  1234. * of this page in case we have to undo it, and (b) it may not be a
  1235. * good idea to continue anyway, once we're pushing into swap. So
  1236. * reactivate the page, and let shmem_fallocate() quit when too many.
  1237. */
  1238. if (!PageUptodate(page)) {
  1239. if (inode->i_private) {
  1240. struct shmem_falloc *shmem_falloc;
  1241. spin_lock(&inode->i_lock);
  1242. shmem_falloc = inode->i_private;
  1243. if (shmem_falloc &&
  1244. !shmem_falloc->waitq &&
  1245. index >= shmem_falloc->start &&
  1246. index < shmem_falloc->next)
  1247. shmem_falloc->nr_unswapped++;
  1248. else
  1249. shmem_falloc = NULL;
  1250. spin_unlock(&inode->i_lock);
  1251. if (shmem_falloc)
  1252. goto redirty;
  1253. }
  1254. clear_highpage(page);
  1255. flush_dcache_page(page);
  1256. SetPageUptodate(page);
  1257. }
  1258. swap = get_swap_page(page);
  1259. if (!swap.val)
  1260. goto redirty;
  1261. /*
  1262. * Add inode to shmem_unuse()'s list of swapped-out inodes,
  1263. * if it's not already there. Do it now before the page is
  1264. * moved to swap cache, when its pagelock no longer protects
  1265. * the inode from eviction. But don't unlock the mutex until
  1266. * we've incremented swapped, because shmem_unuse_inode() will
  1267. * prune a !swapped inode from the swaplist under this mutex.
  1268. */
  1269. mutex_lock(&shmem_swaplist_mutex);
  1270. if (list_empty(&info->swaplist))
  1271. list_add(&info->swaplist, &shmem_swaplist);
  1272. if (add_to_swap_cache(page, swap,
  1273. __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
  1274. NULL) == 0) {
  1275. spin_lock_irq(&info->lock);
  1276. shmem_recalc_inode(inode);
  1277. info->swapped++;
  1278. spin_unlock_irq(&info->lock);
  1279. swap_shmem_alloc(swap);
  1280. shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
  1281. mutex_unlock(&shmem_swaplist_mutex);
  1282. BUG_ON(page_mapped(page));
  1283. swap_writepage(page, wbc);
  1284. return 0;
  1285. }
  1286. mutex_unlock(&shmem_swaplist_mutex);
  1287. put_swap_page(page, swap);
  1288. redirty:
  1289. set_page_dirty(page);
  1290. if (wbc->for_reclaim)
  1291. return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
  1292. unlock_page(page);
  1293. return 0;
  1294. }
  1295. #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
  1296. static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
  1297. {
  1298. char buffer[64];
  1299. if (!mpol || mpol->mode == MPOL_DEFAULT)
  1300. return; /* show nothing */
  1301. mpol_to_str(buffer, sizeof(buffer), mpol);
  1302. seq_printf(seq, ",mpol=%s", buffer);
  1303. }
  1304. static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
  1305. {
  1306. struct mempolicy *mpol = NULL;
  1307. if (sbinfo->mpol) {
  1308. spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
  1309. mpol = sbinfo->mpol;
  1310. mpol_get(mpol);
  1311. spin_unlock(&sbinfo->stat_lock);
  1312. }
  1313. return mpol;
  1314. }
  1315. #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
  1316. static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
  1317. {
  1318. }
  1319. static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
  1320. {
  1321. return NULL;
  1322. }
  1323. #endif /* CONFIG_NUMA && CONFIG_TMPFS */
  1324. #ifndef CONFIG_NUMA
  1325. #define vm_policy vm_private_data
  1326. #endif
  1327. static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
  1328. struct shmem_inode_info *info, pgoff_t index)
  1329. {
  1330. /* Create a pseudo vma that just contains the policy */
  1331. vma_init(vma, NULL);
  1332. /* Bias interleave by inode number to distribute better across nodes */
  1333. vma->vm_pgoff = index + info->vfs_inode.i_ino;
  1334. vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
  1335. }
  1336. static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
  1337. {
  1338. /* Drop reference taken by mpol_shared_policy_lookup() */
  1339. mpol_cond_put(vma->vm_policy);
  1340. }
  1341. static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
  1342. struct shmem_inode_info *info, pgoff_t index)
  1343. {
  1344. struct vm_area_struct pvma;
  1345. struct page *page;
  1346. struct vm_fault vmf = {
  1347. .vma = &pvma,
  1348. };
  1349. shmem_pseudo_vma_init(&pvma, info, index);
  1350. page = swap_cluster_readahead(swap, gfp, &vmf);
  1351. shmem_pseudo_vma_destroy(&pvma);
  1352. return page;
  1353. }
  1354. static struct page *shmem_alloc_hugepage(gfp_t gfp,
  1355. struct shmem_inode_info *info, pgoff_t index)
  1356. {
  1357. struct vm_area_struct pvma;
  1358. struct address_space *mapping = info->vfs_inode.i_mapping;
  1359. pgoff_t hindex;
  1360. struct page *page;
  1361. hindex = round_down(index, HPAGE_PMD_NR);
  1362. if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1,
  1363. XA_PRESENT))
  1364. return NULL;
  1365. shmem_pseudo_vma_init(&pvma, info, hindex);
  1366. page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
  1367. HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
  1368. shmem_pseudo_vma_destroy(&pvma);
  1369. if (page)
  1370. prep_transhuge_page(page);
  1371. else
  1372. count_vm_event(THP_FILE_FALLBACK);
  1373. return page;
  1374. }
  1375. static struct page *shmem_alloc_page(gfp_t gfp,
  1376. struct shmem_inode_info *info, pgoff_t index)
  1377. {
  1378. struct vm_area_struct pvma;
  1379. struct page *page = NULL;
  1380. trace_android_vh_shmem_alloc_page(&page);
  1381. if (page)
  1382. return page;
  1383. shmem_pseudo_vma_init(&pvma, info, index);
  1384. page = alloc_page_vma(gfp, &pvma, 0);
  1385. shmem_pseudo_vma_destroy(&pvma);
  1386. return page;
  1387. }
  1388. static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
  1389. struct inode *inode,
  1390. pgoff_t index, bool huge)
  1391. {
  1392. struct shmem_inode_info *info = SHMEM_I(inode);
  1393. struct page *page;
  1394. int nr;
  1395. int err = -ENOSPC;
  1396. if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
  1397. huge = false;
  1398. nr = huge ? HPAGE_PMD_NR : 1;
  1399. if (!shmem_inode_acct_block(inode, nr))
  1400. goto failed;
  1401. if (huge)
  1402. page = shmem_alloc_hugepage(gfp, info, index);
  1403. else
  1404. page = shmem_alloc_page(gfp, info, index);
  1405. if (page) {
  1406. __SetPageLocked(page);
  1407. __SetPageSwapBacked(page);
  1408. return page;
  1409. }
  1410. err = -ENOMEM;
  1411. shmem_inode_unacct_blocks(inode, nr);
  1412. failed:
  1413. return ERR_PTR(err);
  1414. }
  1415. /*
  1416. * When a page is moved from swapcache to shmem filecache (either by the
  1417. * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
  1418. * shmem_unuse_inode()), it may have been read in earlier from swap, in
  1419. * ignorance of the mapping it belongs to. If that mapping has special
  1420. * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
  1421. * we may need to copy to a suitable page before moving to filecache.
  1422. *
  1423. * In a future release, this may well be extended to respect cpuset and
  1424. * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
  1425. * but for now it is a simple matter of zone.
  1426. */
  1427. static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
  1428. {
  1429. return page_zonenum(page) > gfp_zone(gfp);
  1430. }
  1431. static int shmem_replace_page(struct page **pagep, gfp_t gfp,
  1432. struct shmem_inode_info *info, pgoff_t index)
  1433. {
  1434. struct page *oldpage, *newpage;
  1435. struct address_space *swap_mapping;
  1436. swp_entry_t entry;
  1437. pgoff_t swap_index;
  1438. int error;
  1439. oldpage = *pagep;
  1440. entry.val = page_private(oldpage);
  1441. swap_index = swp_offset(entry);
  1442. swap_mapping = page_mapping(oldpage);
  1443. /*
  1444. * We have arrived here because our zones are constrained, so don't
  1445. * limit chance of success by further cpuset and node constraints.
  1446. */
  1447. gfp &= ~GFP_CONSTRAINT_MASK;
  1448. newpage = shmem_alloc_page(gfp, info, index);
  1449. if (!newpage)
  1450. return -ENOMEM;
  1451. get_page(newpage);
  1452. copy_highpage(newpage, oldpage);
  1453. flush_dcache_page(newpage);
  1454. __SetPageLocked(newpage);
  1455. __SetPageSwapBacked(newpage);
  1456. SetPageUptodate(newpage);
  1457. set_page_private(newpage, entry.val);
  1458. SetPageSwapCache(newpage);
  1459. /*
  1460. * Our caller will very soon move newpage out of swapcache, but it's
  1461. * a nice clean interface for us to replace oldpage by newpage there.
  1462. */
  1463. xa_lock_irq(&swap_mapping->i_pages);
  1464. error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
  1465. if (!error) {
  1466. mem_cgroup_migrate(oldpage, newpage);
  1467. __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
  1468. __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
  1469. }
  1470. xa_unlock_irq(&swap_mapping->i_pages);
  1471. if (unlikely(error)) {
  1472. /*
  1473. * Is this possible? I think not, now that our callers check
  1474. * both PageSwapCache and page_private after getting page lock;
  1475. * but be defensive. Reverse old to newpage for clear and free.
  1476. */
  1477. oldpage = newpage;
  1478. } else {
  1479. lru_cache_add(newpage);
  1480. *pagep = newpage;
  1481. }
  1482. ClearPageSwapCache(oldpage);
  1483. set_page_private(oldpage, 0);
  1484. unlock_page(oldpage);
  1485. put_page(oldpage);
  1486. put_page(oldpage);
  1487. return error;
  1488. }
  1489. /*
  1490. * Swap in the page pointed to by *pagep.
  1491. * Caller has to make sure that *pagep contains a valid swapped page.
  1492. * Returns 0 and the page in pagep if success. On failure, returns the
  1493. * error code and NULL in *pagep.
  1494. */
  1495. static int shmem_swapin_page(struct inode *inode, pgoff_t index,
  1496. struct page **pagep, enum sgp_type sgp,
  1497. gfp_t gfp, struct vm_area_struct *vma,
  1498. vm_fault_t *fault_type)
  1499. {
  1500. struct address_space *mapping = inode->i_mapping;
  1501. struct shmem_inode_info *info = SHMEM_I(inode);
  1502. struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm;
  1503. struct page *page;
  1504. swp_entry_t swap;
  1505. int error;
  1506. VM_BUG_ON(!*pagep || !xa_is_value(*pagep));
  1507. swap = radix_to_swp_entry(*pagep);
  1508. *pagep = NULL;
  1509. /* Look it up and read it in.. */
  1510. page = lookup_swap_cache(swap, NULL, 0);
  1511. if (!page) {
  1512. /* Or update major stats only when swapin succeeds?? */
  1513. if (fault_type) {
  1514. *fault_type |= VM_FAULT_MAJOR;
  1515. count_vm_event(PGMAJFAULT);
  1516. count_memcg_event_mm(charge_mm, PGMAJFAULT);
  1517. }
  1518. /* Here we actually start the io */
  1519. page = shmem_swapin(swap, gfp, info, index);
  1520. if (!page) {
  1521. error = -ENOMEM;
  1522. goto failed;
  1523. }
  1524. }
  1525. /* We have to do this with page locked to prevent races */
  1526. lock_page(page);
  1527. if (!PageSwapCache(page) || page_private(page) != swap.val ||
  1528. !shmem_confirm_swap(mapping, index, swap)) {
  1529. error = -EEXIST;
  1530. goto unlock;
  1531. }
  1532. if (!PageUptodate(page)) {
  1533. error = -EIO;
  1534. goto failed;
  1535. }
  1536. wait_on_page_writeback(page);
  1537. /*
  1538. * Some architectures may have to restore extra metadata to the
  1539. * physical page after reading from swap.
  1540. */
  1541. arch_swap_restore(swap, page);
  1542. if (shmem_should_replace_page(page, gfp)) {
  1543. error = shmem_replace_page(&page, gfp, info, index);
  1544. if (error)
  1545. goto failed;
  1546. }
  1547. error = shmem_add_to_page_cache(page, mapping, index,
  1548. swp_to_radix_entry(swap), gfp,
  1549. charge_mm);
  1550. if (error)
  1551. goto failed;
  1552. spin_lock_irq(&info->lock);
  1553. info->swapped--;
  1554. shmem_recalc_inode(inode);
  1555. spin_unlock_irq(&info->lock);
  1556. if (sgp == SGP_WRITE)
  1557. mark_page_accessed(page);
  1558. delete_from_swap_cache(page);
  1559. set_page_dirty(page);
  1560. swap_free(swap);
  1561. *pagep = page;
  1562. return 0;
  1563. failed:
  1564. if (!shmem_confirm_swap(mapping, index, swap))
  1565. error = -EEXIST;
  1566. unlock:
  1567. if (page) {
  1568. unlock_page(page);
  1569. put_page(page);
  1570. }
  1571. return error;
  1572. }
  1573. /*
  1574. * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
  1575. *
  1576. * If we allocate a new one we do not mark it dirty. That's up to the
  1577. * vm. If we swap it in we mark it dirty since we also free the swap
  1578. * entry since a page cannot live in both the swap and page cache.
  1579. *
  1580. * vma, vmf, and fault_type are only supplied by shmem_fault:
  1581. * otherwise they are NULL.
  1582. */
  1583. static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
  1584. struct page **pagep, enum sgp_type sgp, gfp_t gfp,
  1585. struct vm_area_struct *vma, struct vm_fault *vmf,
  1586. vm_fault_t *fault_type)
  1587. {
  1588. struct address_space *mapping = inode->i_mapping;
  1589. struct shmem_inode_info *info = SHMEM_I(inode);
  1590. struct shmem_sb_info *sbinfo;
  1591. struct mm_struct *charge_mm;
  1592. struct page *page;
  1593. enum sgp_type sgp_huge = sgp;
  1594. pgoff_t hindex = index;
  1595. int error;
  1596. int once = 0;
  1597. int alloced = 0;
  1598. if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
  1599. return -EFBIG;
  1600. if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
  1601. sgp = SGP_CACHE;
  1602. repeat:
  1603. if (sgp <= SGP_CACHE &&
  1604. ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
  1605. return -EINVAL;
  1606. }
  1607. sbinfo = SHMEM_SB(inode->i_sb);
  1608. charge_mm = vma ? vma->vm_mm : current->mm;
  1609. page = find_lock_entry(mapping, index);
  1610. if (page && vma && userfaultfd_minor(vma)) {
  1611. if (!xa_is_value(page)) {
  1612. unlock_page(page);
  1613. put_page(page);
  1614. }
  1615. *fault_type = handle_userfault(vmf, VM_UFFD_MINOR);
  1616. return 0;
  1617. }
  1618. if (xa_is_value(page)) {
  1619. error = shmem_swapin_page(inode, index, &page,
  1620. sgp, gfp, vma, fault_type);
  1621. if (error == -EEXIST)
  1622. goto repeat;
  1623. *pagep = page;
  1624. return error;
  1625. }
  1626. if (page)
  1627. hindex = page->index;
  1628. if (page && sgp == SGP_WRITE)
  1629. mark_page_accessed(page);
  1630. /* fallocated page? */
  1631. if (page && !PageUptodate(page)) {
  1632. if (sgp != SGP_READ)
  1633. goto clear;
  1634. unlock_page(page);
  1635. put_page(page);
  1636. page = NULL;
  1637. hindex = index;
  1638. }
  1639. if (page || sgp == SGP_READ)
  1640. goto out;
  1641. /*
  1642. * Fast cache lookup did not find it:
  1643. * bring it back from swap or allocate.
  1644. */
  1645. if (vma && userfaultfd_missing(vma)) {
  1646. *fault_type = handle_userfault(vmf, VM_UFFD_MISSING);
  1647. return 0;
  1648. }
  1649. /* shmem_symlink() */
  1650. if (mapping->a_ops != &shmem_aops)
  1651. goto alloc_nohuge;
  1652. if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
  1653. goto alloc_nohuge;
  1654. if (shmem_huge == SHMEM_HUGE_FORCE)
  1655. goto alloc_huge;
  1656. switch (sbinfo->huge) {
  1657. case SHMEM_HUGE_NEVER:
  1658. goto alloc_nohuge;
  1659. case SHMEM_HUGE_WITHIN_SIZE: {
  1660. loff_t i_size;
  1661. pgoff_t off;
  1662. off = round_up(index, HPAGE_PMD_NR);
  1663. i_size = round_up(i_size_read(inode), PAGE_SIZE);
  1664. if (i_size >= HPAGE_PMD_SIZE &&
  1665. i_size >> PAGE_SHIFT >= off)
  1666. goto alloc_huge;
  1667. fallthrough;
  1668. }
  1669. case SHMEM_HUGE_ADVISE:
  1670. if (sgp_huge == SGP_HUGE)
  1671. goto alloc_huge;
  1672. /* TODO: implement fadvise() hints */
  1673. goto alloc_nohuge;
  1674. }
  1675. alloc_huge:
  1676. page = shmem_alloc_and_acct_page(gfp, inode, index, true);
  1677. if (IS_ERR(page)) {
  1678. alloc_nohuge:
  1679. page = shmem_alloc_and_acct_page(gfp, inode,
  1680. index, false);
  1681. }
  1682. if (IS_ERR(page)) {
  1683. int retry = 5;
  1684. error = PTR_ERR(page);
  1685. page = NULL;
  1686. if (error != -ENOSPC)
  1687. goto unlock;
  1688. /*
  1689. * Try to reclaim some space by splitting a huge page
  1690. * beyond i_size on the filesystem.
  1691. */
  1692. while (retry--) {
  1693. int ret;
  1694. ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
  1695. if (ret == SHRINK_STOP)
  1696. break;
  1697. if (ret)
  1698. goto alloc_nohuge;
  1699. }
  1700. goto unlock;
  1701. }
  1702. if (PageTransHuge(page))
  1703. hindex = round_down(index, HPAGE_PMD_NR);
  1704. else
  1705. hindex = index;
  1706. if (sgp == SGP_WRITE)
  1707. __SetPageReferenced(page);
  1708. error = shmem_add_to_page_cache(page, mapping, hindex,
  1709. NULL, gfp & GFP_RECLAIM_MASK,
  1710. charge_mm);
  1711. if (error)
  1712. goto unacct;
  1713. lru_cache_add(page);
  1714. spin_lock_irq(&info->lock);
  1715. info->alloced += compound_nr(page);
  1716. inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
  1717. shmem_recalc_inode(inode);
  1718. spin_unlock_irq(&info->lock);
  1719. alloced = true;
  1720. if (PageTransHuge(page) &&
  1721. DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
  1722. hindex + HPAGE_PMD_NR - 1) {
  1723. /*
  1724. * Part of the huge page is beyond i_size: subject
  1725. * to shrink under memory pressure.
  1726. */
  1727. spin_lock(&sbinfo->shrinklist_lock);
  1728. /*
  1729. * _careful to defend against unlocked access to
  1730. * ->shrink_list in shmem_unused_huge_shrink()
  1731. */
  1732. if (list_empty_careful(&info->shrinklist)) {
  1733. list_add_tail(&info->shrinklist,
  1734. &sbinfo->shrinklist);
  1735. sbinfo->shrinklist_len++;
  1736. }
  1737. spin_unlock(&sbinfo->shrinklist_lock);
  1738. }
  1739. /*
  1740. * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
  1741. */
  1742. if (sgp == SGP_FALLOC)
  1743. sgp = SGP_WRITE;
  1744. clear:
  1745. /*
  1746. * Let SGP_WRITE caller clear ends if write does not fill page;
  1747. * but SGP_FALLOC on a page fallocated earlier must initialize
  1748. * it now, lest undo on failure cancel our earlier guarantee.
  1749. */
  1750. if (sgp != SGP_WRITE && !PageUptodate(page)) {
  1751. int i;
  1752. for (i = 0; i < compound_nr(page); i++) {
  1753. clear_highpage(page + i);
  1754. flush_dcache_page(page + i);
  1755. }
  1756. SetPageUptodate(page);
  1757. }
  1758. /* Perhaps the file has been truncated since we checked */
  1759. if (sgp <= SGP_CACHE &&
  1760. ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
  1761. if (alloced) {
  1762. ClearPageDirty(page);
  1763. delete_from_page_cache(page);
  1764. spin_lock_irq(&info->lock);
  1765. shmem_recalc_inode(inode);
  1766. spin_unlock_irq(&info->lock);
  1767. }
  1768. error = -EINVAL;
  1769. goto unlock;
  1770. }
  1771. out:
  1772. *pagep = page + index - hindex;
  1773. return 0;
  1774. /*
  1775. * Error recovery.
  1776. */
  1777. unacct:
  1778. shmem_inode_unacct_blocks(inode, compound_nr(page));
  1779. if (PageTransHuge(page)) {
  1780. unlock_page(page);
  1781. put_page(page);
  1782. goto alloc_nohuge;
  1783. }
  1784. unlock:
  1785. if (page) {
  1786. unlock_page(page);
  1787. put_page(page);
  1788. }
  1789. if (error == -ENOSPC && !once++) {
  1790. spin_lock_irq(&info->lock);
  1791. shmem_recalc_inode(inode);
  1792. spin_unlock_irq(&info->lock);
  1793. goto repeat;
  1794. }
  1795. if (error == -EEXIST)
  1796. goto repeat;
  1797. return error;
  1798. }
  1799. /*
  1800. * This is like autoremove_wake_function, but it removes the wait queue
  1801. * entry unconditionally - even if something else had already woken the
  1802. * target.
  1803. */
  1804. static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
  1805. {
  1806. int ret = default_wake_function(wait, mode, sync, key);
  1807. list_del_init(&wait->entry);
  1808. return ret;
  1809. }
  1810. static vm_fault_t shmem_fault(struct vm_fault *vmf)
  1811. {
  1812. struct vm_area_struct *vma = vmf->vma;
  1813. struct inode *inode = file_inode(vma->vm_file);
  1814. gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
  1815. enum sgp_type sgp;
  1816. int err;
  1817. vm_fault_t ret = VM_FAULT_LOCKED;
  1818. /*
  1819. * Trinity finds that probing a hole which tmpfs is punching can
  1820. * prevent the hole-punch from ever completing: which in turn
  1821. * locks writers out with its hold on i_mutex. So refrain from
  1822. * faulting pages into the hole while it's being punched. Although
  1823. * shmem_undo_range() does remove the additions, it may be unable to
  1824. * keep up, as each new page needs its own unmap_mapping_range() call,
  1825. * and the i_mmap tree grows ever slower to scan if new vmas are added.
  1826. *
  1827. * It does not matter if we sometimes reach this check just before the
  1828. * hole-punch begins, so that one fault then races with the punch:
  1829. * we just need to make racing faults a rare case.
  1830. *
  1831. * The implementation below would be much simpler if we just used a
  1832. * standard mutex or completion: but we cannot take i_mutex in fault,
  1833. * and bloating every shmem inode for this unlikely case would be sad.
  1834. */
  1835. if (unlikely(inode->i_private)) {
  1836. struct shmem_falloc *shmem_falloc;
  1837. spin_lock(&inode->i_lock);
  1838. shmem_falloc = inode->i_private;
  1839. if (shmem_falloc &&
  1840. shmem_falloc->waitq &&
  1841. vmf->pgoff >= shmem_falloc->start &&
  1842. vmf->pgoff < shmem_falloc->next) {
  1843. struct file *fpin;
  1844. wait_queue_head_t *shmem_falloc_waitq;
  1845. DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
  1846. ret = VM_FAULT_NOPAGE;
  1847. fpin = maybe_unlock_mmap_for_io(vmf, NULL);
  1848. if (fpin)
  1849. ret = VM_FAULT_RETRY;
  1850. shmem_falloc_waitq = shmem_falloc->waitq;
  1851. prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
  1852. TASK_UNINTERRUPTIBLE);
  1853. spin_unlock(&inode->i_lock);
  1854. schedule();
  1855. /*
  1856. * shmem_falloc_waitq points into the shmem_fallocate()
  1857. * stack of the hole-punching task: shmem_falloc_waitq
  1858. * is usually invalid by the time we reach here, but
  1859. * finish_wait() does not dereference it in that case;
  1860. * though i_lock needed lest racing with wake_up_all().
  1861. */
  1862. spin_lock(&inode->i_lock);
  1863. finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
  1864. spin_unlock(&inode->i_lock);
  1865. if (fpin)
  1866. fput(fpin);
  1867. return ret;
  1868. }
  1869. spin_unlock(&inode->i_lock);
  1870. }
  1871. sgp = SGP_CACHE;
  1872. if ((vma->vm_flags & VM_NOHUGEPAGE) ||
  1873. test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
  1874. sgp = SGP_NOHUGE;
  1875. else if (vma->vm_flags & VM_HUGEPAGE)
  1876. sgp = SGP_HUGE;
  1877. err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
  1878. gfp, vma, vmf, &ret);
  1879. if (err)
  1880. return vmf_error(err);
  1881. return ret;
  1882. }
  1883. unsigned long shmem_get_unmapped_area(struct file *file,
  1884. unsigned long uaddr, unsigned long len,
  1885. unsigned long pgoff, unsigned long flags)
  1886. {
  1887. unsigned long (*get_area)(struct file *,
  1888. unsigned long, unsigned long, unsigned long, unsigned long);
  1889. unsigned long addr;
  1890. unsigned long offset;
  1891. unsigned long inflated_len;
  1892. unsigned long inflated_addr;
  1893. unsigned long inflated_offset;
  1894. if (len > TASK_SIZE)
  1895. return -ENOMEM;
  1896. get_area = current->mm->get_unmapped_area;
  1897. addr = get_area(file, uaddr, len, pgoff, flags);
  1898. if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
  1899. return addr;
  1900. if (IS_ERR_VALUE(addr))
  1901. return addr;
  1902. if (addr & ~PAGE_MASK)
  1903. return addr;
  1904. if (addr > TASK_SIZE - len)
  1905. return addr;
  1906. if (shmem_huge == SHMEM_HUGE_DENY)
  1907. return addr;
  1908. if (len < HPAGE_PMD_SIZE)
  1909. return addr;
  1910. if (flags & MAP_FIXED)
  1911. return addr;
  1912. /*
  1913. * Our priority is to support MAP_SHARED mapped hugely;
  1914. * and support MAP_PRIVATE mapped hugely too, until it is COWed.
  1915. * But if caller specified an address hint and we allocated area there
  1916. * successfully, respect that as before.
  1917. */
  1918. if (uaddr == addr)
  1919. return addr;
  1920. if (shmem_huge != SHMEM_HUGE_FORCE) {
  1921. struct super_block *sb;
  1922. if (file) {
  1923. VM_BUG_ON(file->f_op != &shmem_file_operations);
  1924. sb = file_inode(file)->i_sb;
  1925. } else {
  1926. /*
  1927. * Called directly from mm/mmap.c, or drivers/char/mem.c
  1928. * for "/dev/zero", to create a shared anonymous object.
  1929. */
  1930. if (IS_ERR(shm_mnt))
  1931. return addr;
  1932. sb = shm_mnt->mnt_sb;
  1933. }
  1934. if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
  1935. return addr;
  1936. }
  1937. offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
  1938. if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
  1939. return addr;
  1940. if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
  1941. return addr;
  1942. inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
  1943. if (inflated_len > TASK_SIZE)
  1944. return addr;
  1945. if (inflated_len < len)
  1946. return addr;
  1947. inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags);
  1948. if (IS_ERR_VALUE(inflated_addr))
  1949. return addr;
  1950. if (inflated_addr & ~PAGE_MASK)
  1951. return addr;
  1952. inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
  1953. inflated_addr += offset - inflated_offset;
  1954. if (inflated_offset > offset)
  1955. inflated_addr += HPAGE_PMD_SIZE;
  1956. if (inflated_addr > TASK_SIZE - len)
  1957. return addr;
  1958. return inflated_addr;
  1959. }
  1960. #ifdef CONFIG_NUMA
  1961. static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
  1962. {
  1963. struct inode *inode = file_inode(vma->vm_file);
  1964. return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
  1965. }
  1966. static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
  1967. unsigned long addr)
  1968. {
  1969. struct inode *inode = file_inode(vma->vm_file);
  1970. pgoff_t index;
  1971. index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  1972. return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
  1973. }
  1974. #endif
  1975. int shmem_lock(struct file *file, int lock, struct user_struct *user)
  1976. {
  1977. struct inode *inode = file_inode(file);
  1978. struct shmem_inode_info *info = SHMEM_I(inode);
  1979. int retval = -ENOMEM;
  1980. /*
  1981. * What serializes the accesses to info->flags?
  1982. * ipc_lock_object() when called from shmctl_do_lock(),
  1983. * no serialization needed when called from shm_destroy().
  1984. */
  1985. if (lock && !(info->flags & VM_LOCKED)) {
  1986. if (!user_shm_lock(inode->i_size, user))
  1987. goto out_nomem;
  1988. info->flags |= VM_LOCKED;
  1989. mapping_set_unevictable(file->f_mapping);
  1990. }
  1991. if (!lock && (info->flags & VM_LOCKED) && user) {
  1992. user_shm_unlock(inode->i_size, user);
  1993. info->flags &= ~VM_LOCKED;
  1994. mapping_clear_unevictable(file->f_mapping);
  1995. }
  1996. retval = 0;
  1997. out_nomem:
  1998. return retval;
  1999. }
  2000. static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
  2001. {
  2002. struct shmem_inode_info *info = SHMEM_I(file_inode(file));
  2003. int ret;
  2004. ret = seal_check_future_write(info->seals, vma);
  2005. if (ret)
  2006. return ret;
  2007. /* arm64 - allow memory tagging on RAM-based files */
  2008. vma->vm_flags |= VM_MTE_ALLOWED;
  2009. file_accessed(file);
  2010. vma->vm_ops = &shmem_vm_ops;
  2011. if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
  2012. ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
  2013. (vma->vm_end & HPAGE_PMD_MASK)) {
  2014. khugepaged_enter(vma, vma->vm_flags);
  2015. }
  2016. return 0;
  2017. }
  2018. static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
  2019. umode_t mode, dev_t dev, unsigned long flags)
  2020. {
  2021. struct inode *inode;
  2022. struct shmem_inode_info *info;
  2023. struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
  2024. ino_t ino;
  2025. if (shmem_reserve_inode(sb, &ino))
  2026. return NULL;
  2027. inode = new_inode(sb);
  2028. if (inode) {
  2029. inode->i_ino = ino;
  2030. inode_init_owner(inode, dir, mode);
  2031. inode->i_blocks = 0;
  2032. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  2033. inode->i_generation = prandom_u32();
  2034. info = SHMEM_I(inode);
  2035. memset(info, 0, (char *)inode - (char *)info);
  2036. spin_lock_init(&info->lock);
  2037. atomic_set(&info->stop_eviction, 0);
  2038. info->seals = F_SEAL_SEAL;
  2039. info->flags = flags & VM_NORESERVE;
  2040. INIT_LIST_HEAD(&info->shrinklist);
  2041. INIT_LIST_HEAD(&info->swaplist);
  2042. simple_xattrs_init(&info->xattrs);
  2043. cache_no_acl(inode);
  2044. switch (mode & S_IFMT) {
  2045. default:
  2046. inode->i_op = &shmem_special_inode_operations;
  2047. init_special_inode(inode, mode, dev);
  2048. break;
  2049. case S_IFREG:
  2050. inode->i_mapping->a_ops = &shmem_aops;
  2051. inode->i_op = &shmem_inode_operations;
  2052. inode->i_fop = &shmem_file_operations;
  2053. mpol_shared_policy_init(&info->policy,
  2054. shmem_get_sbmpol(sbinfo));
  2055. break;
  2056. case S_IFDIR:
  2057. inc_nlink(inode);
  2058. /* Some things misbehave if size == 0 on a directory */
  2059. inode->i_size = 2 * BOGO_DIRENT_SIZE;
  2060. inode->i_op = &shmem_dir_inode_operations;
  2061. inode->i_fop = &simple_dir_operations;
  2062. break;
  2063. case S_IFLNK:
  2064. /*
  2065. * Must not load anything in the rbtree,
  2066. * mpol_free_shared_policy will not be called.
  2067. */
  2068. mpol_shared_policy_init(&info->policy, NULL);
  2069. break;
  2070. }
  2071. lockdep_annotate_inode_mutex_key(inode);
  2072. } else
  2073. shmem_free_inode(sb);
  2074. return inode;
  2075. }
  2076. bool shmem_mapping(struct address_space *mapping)
  2077. {
  2078. return mapping->a_ops == &shmem_aops;
  2079. }
  2080. #ifdef CONFIG_USERFAULTFD
  2081. int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
  2082. pmd_t *dst_pmd,
  2083. struct vm_area_struct *dst_vma,
  2084. unsigned long dst_addr,
  2085. unsigned long src_addr,
  2086. bool zeropage,
  2087. struct page **pagep)
  2088. {
  2089. struct inode *inode = file_inode(dst_vma->vm_file);
  2090. struct shmem_inode_info *info = SHMEM_I(inode);
  2091. struct address_space *mapping = inode->i_mapping;
  2092. gfp_t gfp = mapping_gfp_mask(mapping);
  2093. pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
  2094. void *page_kaddr;
  2095. struct page *page;
  2096. int ret;
  2097. pgoff_t max_off;
  2098. if (!shmem_inode_acct_block(inode, 1)) {
  2099. /*
  2100. * We may have got a page, returned -ENOENT triggering a retry,
  2101. * and now we find ourselves with -ENOMEM. Release the page, to
  2102. * avoid a BUG_ON in our caller.
  2103. */
  2104. if (unlikely(*pagep)) {
  2105. put_page(*pagep);
  2106. *pagep = NULL;
  2107. }
  2108. return -ENOMEM;
  2109. }
  2110. if (!*pagep) {
  2111. ret = -ENOMEM;
  2112. page = shmem_alloc_page(gfp, info, pgoff);
  2113. if (!page)
  2114. goto out_unacct_blocks;
  2115. if (!zeropage) { /* COPY */
  2116. page_kaddr = kmap_atomic(page);
  2117. ret = copy_from_user(page_kaddr,
  2118. (const void __user *)src_addr,
  2119. PAGE_SIZE);
  2120. kunmap_atomic(page_kaddr);
  2121. /* fallback to copy_from_user outside mmap_lock */
  2122. if (unlikely(ret)) {
  2123. *pagep = page;
  2124. ret = -ENOENT;
  2125. /* don't free the page */
  2126. goto out_unacct_blocks;
  2127. }
  2128. } else { /* ZEROPAGE */
  2129. clear_highpage(page);
  2130. }
  2131. } else {
  2132. page = *pagep;
  2133. *pagep = NULL;
  2134. }
  2135. VM_BUG_ON(PageLocked(page));
  2136. VM_BUG_ON(PageSwapBacked(page));
  2137. __SetPageLocked(page);
  2138. __SetPageSwapBacked(page);
  2139. __SetPageUptodate(page);
  2140. ret = -EFAULT;
  2141. max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
  2142. if (unlikely(pgoff >= max_off))
  2143. goto out_release;
  2144. ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
  2145. gfp & GFP_RECLAIM_MASK, dst_mm);
  2146. if (ret)
  2147. goto out_release;
  2148. ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
  2149. page, true, false);
  2150. if (ret)
  2151. goto out_delete_from_cache;
  2152. spin_lock_irq(&info->lock);
  2153. info->alloced++;
  2154. inode->i_blocks += BLOCKS_PER_PAGE;
  2155. shmem_recalc_inode(inode);
  2156. spin_unlock_irq(&info->lock);
  2157. SetPageDirty(page);
  2158. unlock_page(page);
  2159. return 0;
  2160. out_delete_from_cache:
  2161. delete_from_page_cache(page);
  2162. out_release:
  2163. unlock_page(page);
  2164. put_page(page);
  2165. out_unacct_blocks:
  2166. shmem_inode_unacct_blocks(inode, 1);
  2167. return ret;
  2168. }
  2169. #endif /* CONFIG_USERFAULTFD */
  2170. #ifdef CONFIG_TMPFS
  2171. static const struct inode_operations shmem_symlink_inode_operations;
  2172. static const struct inode_operations shmem_short_symlink_operations;
  2173. #ifdef CONFIG_TMPFS_XATTR
  2174. static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
  2175. #else
  2176. #define shmem_initxattrs NULL
  2177. #endif
  2178. static int
  2179. shmem_write_begin(struct file *file, struct address_space *mapping,
  2180. loff_t pos, unsigned len, unsigned flags,
  2181. struct page **pagep, void **fsdata)
  2182. {
  2183. struct inode *inode = mapping->host;
  2184. struct shmem_inode_info *info = SHMEM_I(inode);
  2185. pgoff_t index = pos >> PAGE_SHIFT;
  2186. /* i_mutex is held by caller */
  2187. if (unlikely(info->seals & (F_SEAL_GROW |
  2188. F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
  2189. if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
  2190. return -EPERM;
  2191. if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
  2192. return -EPERM;
  2193. }
  2194. return shmem_getpage(inode, index, pagep, SGP_WRITE);
  2195. }
  2196. static int
  2197. shmem_write_end(struct file *file, struct address_space *mapping,
  2198. loff_t pos, unsigned len, unsigned copied,
  2199. struct page *page, void *fsdata)
  2200. {
  2201. struct inode *inode = mapping->host;
  2202. if (pos + copied > inode->i_size)
  2203. i_size_write(inode, pos + copied);
  2204. if (!PageUptodate(page)) {
  2205. struct page *head = compound_head(page);
  2206. if (PageTransCompound(page)) {
  2207. int i;
  2208. for (i = 0; i < HPAGE_PMD_NR; i++) {
  2209. if (head + i == page)
  2210. continue;
  2211. clear_highpage(head + i);
  2212. flush_dcache_page(head + i);
  2213. }
  2214. }
  2215. if (copied < PAGE_SIZE) {
  2216. unsigned from = pos & (PAGE_SIZE - 1);
  2217. zero_user_segments(page, 0, from,
  2218. from + copied, PAGE_SIZE);
  2219. }
  2220. SetPageUptodate(head);
  2221. }
  2222. set_page_dirty(page);
  2223. unlock_page(page);
  2224. put_page(page);
  2225. return copied;
  2226. }
  2227. static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
  2228. {
  2229. struct file *file = iocb->ki_filp;
  2230. struct inode *inode = file_inode(file);
  2231. struct address_space *mapping = inode->i_mapping;
  2232. pgoff_t index;
  2233. unsigned long offset;
  2234. enum sgp_type sgp = SGP_READ;
  2235. int error = 0;
  2236. ssize_t retval = 0;
  2237. loff_t *ppos = &iocb->ki_pos;
  2238. /*
  2239. * Might this read be for a stacking filesystem? Then when reading
  2240. * holes of a sparse file, we actually need to allocate those pages,
  2241. * and even mark them dirty, so it cannot exceed the max_blocks limit.
  2242. */
  2243. if (!iter_is_iovec(to))
  2244. sgp = SGP_CACHE;
  2245. index = *ppos >> PAGE_SHIFT;
  2246. offset = *ppos & ~PAGE_MASK;
  2247. for (;;) {
  2248. struct page *page = NULL;
  2249. pgoff_t end_index;
  2250. unsigned long nr, ret;
  2251. loff_t i_size = i_size_read(inode);
  2252. end_index = i_size >> PAGE_SHIFT;
  2253. if (index > end_index)
  2254. break;
  2255. if (index == end_index) {
  2256. nr = i_size & ~PAGE_MASK;
  2257. if (nr <= offset)
  2258. break;
  2259. }
  2260. error = shmem_getpage(inode, index, &page, sgp);
  2261. if (error) {
  2262. if (error == -EINVAL)
  2263. error = 0;
  2264. break;
  2265. }
  2266. if (page) {
  2267. if (sgp == SGP_CACHE)
  2268. set_page_dirty(page);
  2269. unlock_page(page);
  2270. }
  2271. /*
  2272. * We must evaluate after, since reads (unlike writes)
  2273. * are called without i_mutex protection against truncate
  2274. */
  2275. nr = PAGE_SIZE;
  2276. i_size = i_size_read(inode);
  2277. end_index = i_size >> PAGE_SHIFT;
  2278. if (index == end_index) {
  2279. nr = i_size & ~PAGE_MASK;
  2280. if (nr <= offset) {
  2281. if (page)
  2282. put_page(page);
  2283. break;
  2284. }
  2285. }
  2286. nr -= offset;
  2287. if (page) {
  2288. /*
  2289. * If users can be writing to this page using arbitrary
  2290. * virtual addresses, take care about potential aliasing
  2291. * before reading the page on the kernel side.
  2292. */
  2293. if (mapping_writably_mapped(mapping))
  2294. flush_dcache_page(page);
  2295. /*
  2296. * Mark the page accessed if we read the beginning.
  2297. */
  2298. if (!offset)
  2299. mark_page_accessed(page);
  2300. } else {
  2301. page = ZERO_PAGE(0);
  2302. get_page(page);
  2303. }
  2304. /*
  2305. * Ok, we have the page, and it's up-to-date, so
  2306. * now we can copy it to user space...
  2307. */
  2308. ret = copy_page_to_iter(page, offset, nr, to);
  2309. retval += ret;
  2310. offset += ret;
  2311. index += offset >> PAGE_SHIFT;
  2312. offset &= ~PAGE_MASK;
  2313. put_page(page);
  2314. if (!iov_iter_count(to))
  2315. break;
  2316. if (ret < nr) {
  2317. error = -EFAULT;
  2318. break;
  2319. }
  2320. cond_resched();
  2321. }
  2322. *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
  2323. file_accessed(file);
  2324. return retval ? retval : error;
  2325. }
  2326. /*
  2327. * llseek SEEK_DATA or SEEK_HOLE through the page cache.
  2328. */
  2329. static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
  2330. pgoff_t index, pgoff_t end, int whence)
  2331. {
  2332. struct page *page;
  2333. struct pagevec pvec;
  2334. pgoff_t indices[PAGEVEC_SIZE];
  2335. bool done = false;
  2336. int i;
  2337. pagevec_init(&pvec);
  2338. pvec.nr = 1; /* start small: we may be there already */
  2339. while (!done) {
  2340. pvec.nr = find_get_entries(mapping, index,
  2341. pvec.nr, pvec.pages, indices);
  2342. if (!pvec.nr) {
  2343. if (whence == SEEK_DATA)
  2344. index = end;
  2345. break;
  2346. }
  2347. for (i = 0; i < pvec.nr; i++, index++) {
  2348. if (index < indices[i]) {
  2349. if (whence == SEEK_HOLE) {
  2350. done = true;
  2351. break;
  2352. }
  2353. index = indices[i];
  2354. }
  2355. page = pvec.pages[i];
  2356. if (page && !xa_is_value(page)) {
  2357. if (!PageUptodate(page))
  2358. page = NULL;
  2359. }
  2360. if (index >= end ||
  2361. (page && whence == SEEK_DATA) ||
  2362. (!page && whence == SEEK_HOLE)) {
  2363. done = true;
  2364. break;
  2365. }
  2366. }
  2367. pagevec_remove_exceptionals(&pvec);
  2368. pagevec_release(&pvec);
  2369. pvec.nr = PAGEVEC_SIZE;
  2370. cond_resched();
  2371. }
  2372. return index;
  2373. }
  2374. static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
  2375. {
  2376. struct address_space *mapping = file->f_mapping;
  2377. struct inode *inode = mapping->host;
  2378. pgoff_t start, end;
  2379. loff_t new_offset;
  2380. if (whence != SEEK_DATA && whence != SEEK_HOLE)
  2381. return generic_file_llseek_size(file, offset, whence,
  2382. MAX_LFS_FILESIZE, i_size_read(inode));
  2383. inode_lock(inode);
  2384. /* We're holding i_mutex so we can access i_size directly */
  2385. if (offset < 0 || offset >= inode->i_size)
  2386. offset = -ENXIO;
  2387. else {
  2388. start = offset >> PAGE_SHIFT;
  2389. end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  2390. new_offset = shmem_seek_hole_data(mapping, start, end, whence);
  2391. new_offset <<= PAGE_SHIFT;
  2392. if (new_offset > offset) {
  2393. if (new_offset < inode->i_size)
  2394. offset = new_offset;
  2395. else if (whence == SEEK_DATA)
  2396. offset = -ENXIO;
  2397. else
  2398. offset = inode->i_size;
  2399. }
  2400. }
  2401. if (offset >= 0)
  2402. offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
  2403. inode_unlock(inode);
  2404. return offset;
  2405. }
  2406. static long shmem_fallocate(struct file *file, int mode, loff_t offset,
  2407. loff_t len)
  2408. {
  2409. struct inode *inode = file_inode(file);
  2410. struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
  2411. struct shmem_inode_info *info = SHMEM_I(inode);
  2412. struct shmem_falloc shmem_falloc;
  2413. pgoff_t start, index, end;
  2414. int error;
  2415. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  2416. return -EOPNOTSUPP;
  2417. inode_lock(inode);
  2418. if (mode & FALLOC_FL_PUNCH_HOLE) {
  2419. struct address_space *mapping = file->f_mapping;
  2420. loff_t unmap_start = round_up(offset, PAGE_SIZE);
  2421. loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
  2422. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
  2423. /* protected by i_mutex */
  2424. if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
  2425. error = -EPERM;
  2426. goto out;
  2427. }
  2428. shmem_falloc.waitq = &shmem_falloc_waitq;
  2429. shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT;
  2430. shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
  2431. spin_lock(&inode->i_lock);
  2432. inode->i_private = &shmem_falloc;
  2433. spin_unlock(&inode->i_lock);
  2434. if ((u64)unmap_end > (u64)unmap_start)
  2435. unmap_mapping_range(mapping, unmap_start,
  2436. 1 + unmap_end - unmap_start, 0);
  2437. shmem_truncate_range(inode, offset, offset + len - 1);
  2438. /* No need to unmap again: hole-punching leaves COWed pages */
  2439. spin_lock(&inode->i_lock);
  2440. inode->i_private = NULL;
  2441. wake_up_all(&shmem_falloc_waitq);
  2442. WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
  2443. spin_unlock(&inode->i_lock);
  2444. error = 0;
  2445. goto out;
  2446. }
  2447. /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
  2448. error = inode_newsize_ok(inode, offset + len);
  2449. if (error)
  2450. goto out;
  2451. if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
  2452. error = -EPERM;
  2453. goto out;
  2454. }
  2455. start = offset >> PAGE_SHIFT;
  2456. end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  2457. /* Try to avoid a swapstorm if len is impossible to satisfy */
  2458. if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
  2459. error = -ENOSPC;
  2460. goto out;
  2461. }
  2462. shmem_falloc.waitq = NULL;
  2463. shmem_falloc.start = start;
  2464. shmem_falloc.next = start;
  2465. shmem_falloc.nr_falloced = 0;
  2466. shmem_falloc.nr_unswapped = 0;
  2467. spin_lock(&inode->i_lock);
  2468. inode->i_private = &shmem_falloc;
  2469. spin_unlock(&inode->i_lock);
  2470. for (index = start; index < end; index++) {
  2471. struct page *page;
  2472. /*
  2473. * Good, the fallocate(2) manpage permits EINTR: we may have
  2474. * been interrupted because we are using up too much memory.
  2475. */
  2476. if (signal_pending(current))
  2477. error = -EINTR;
  2478. else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
  2479. error = -ENOMEM;
  2480. else
  2481. error = shmem_getpage(inode, index, &page, SGP_FALLOC);
  2482. if (error) {
  2483. /* Remove the !PageUptodate pages we added */
  2484. if (index > start) {
  2485. shmem_undo_range(inode,
  2486. (loff_t)start << PAGE_SHIFT,
  2487. ((loff_t)index << PAGE_SHIFT) - 1, true);
  2488. }
  2489. goto undone;
  2490. }
  2491. /*
  2492. * Inform shmem_writepage() how far we have reached.
  2493. * No need for lock or barrier: we have the page lock.
  2494. */
  2495. shmem_falloc.next++;
  2496. if (!PageUptodate(page))
  2497. shmem_falloc.nr_falloced++;
  2498. /*
  2499. * If !PageUptodate, leave it that way so that freeable pages
  2500. * can be recognized if we need to rollback on error later.
  2501. * But set_page_dirty so that memory pressure will swap rather
  2502. * than free the pages we are allocating (and SGP_CACHE pages
  2503. * might still be clean: we now need to mark those dirty too).
  2504. */
  2505. set_page_dirty(page);
  2506. unlock_page(page);
  2507. put_page(page);
  2508. cond_resched();
  2509. }
  2510. if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
  2511. i_size_write(inode, offset + len);
  2512. inode->i_ctime = current_time(inode);
  2513. undone:
  2514. spin_lock(&inode->i_lock);
  2515. inode->i_private = NULL;
  2516. spin_unlock(&inode->i_lock);
  2517. out:
  2518. inode_unlock(inode);
  2519. return error;
  2520. }
  2521. static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
  2522. {
  2523. struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
  2524. buf->f_type = TMPFS_MAGIC;
  2525. buf->f_bsize = PAGE_SIZE;
  2526. buf->f_namelen = NAME_MAX;
  2527. if (sbinfo->max_blocks) {
  2528. buf->f_blocks = sbinfo->max_blocks;
  2529. buf->f_bavail =
  2530. buf->f_bfree = sbinfo->max_blocks -
  2531. percpu_counter_sum(&sbinfo->used_blocks);
  2532. }
  2533. if (sbinfo->max_inodes) {
  2534. buf->f_files = sbinfo->max_inodes;
  2535. buf->f_ffree = sbinfo->free_inodes;
  2536. }
  2537. /* else leave those fields 0 like simple_statfs */
  2538. return 0;
  2539. }
  2540. /*
  2541. * File creation. Allocate an inode, and we're done..
  2542. */
  2543. static int
  2544. shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
  2545. {
  2546. struct inode *inode;
  2547. int error = -ENOSPC;
  2548. inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
  2549. if (inode) {
  2550. error = simple_acl_create(dir, inode);
  2551. if (error)
  2552. goto out_iput;
  2553. error = security_inode_init_security(inode, dir,
  2554. &dentry->d_name,
  2555. shmem_initxattrs, NULL);
  2556. if (error && error != -EOPNOTSUPP)
  2557. goto out_iput;
  2558. error = 0;
  2559. dir->i_size += BOGO_DIRENT_SIZE;
  2560. dir->i_ctime = dir->i_mtime = current_time(dir);
  2561. d_instantiate(dentry, inode);
  2562. dget(dentry); /* Extra count - pin the dentry in core */
  2563. }
  2564. return error;
  2565. out_iput:
  2566. iput(inode);
  2567. return error;
  2568. }
  2569. static int
  2570. shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
  2571. {
  2572. struct inode *inode;
  2573. int error = -ENOSPC;
  2574. inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
  2575. if (inode) {
  2576. error = security_inode_init_security(inode, dir,
  2577. NULL,
  2578. shmem_initxattrs, NULL);
  2579. if (error && error != -EOPNOTSUPP)
  2580. goto out_iput;
  2581. error = simple_acl_create(dir, inode);
  2582. if (error)
  2583. goto out_iput;
  2584. d_tmpfile(dentry, inode);
  2585. }
  2586. return error;
  2587. out_iput:
  2588. iput(inode);
  2589. return error;
  2590. }
  2591. static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  2592. {
  2593. int error;
  2594. if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
  2595. return error;
  2596. inc_nlink(dir);
  2597. return 0;
  2598. }
  2599. static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
  2600. bool excl)
  2601. {
  2602. return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
  2603. }
  2604. /*
  2605. * Link a file..
  2606. */
  2607. static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
  2608. {
  2609. struct inode *inode = d_inode(old_dentry);
  2610. int ret = 0;
  2611. /*
  2612. * No ordinary (disk based) filesystem counts links as inodes;
  2613. * but each new link needs a new dentry, pinning lowmem, and
  2614. * tmpfs dentries cannot be pruned until they are unlinked.
  2615. * But if an O_TMPFILE file is linked into the tmpfs, the
  2616. * first link must skip that, to get the accounting right.
  2617. */
  2618. if (inode->i_nlink) {
  2619. ret = shmem_reserve_inode(inode->i_sb, NULL);
  2620. if (ret)
  2621. goto out;
  2622. }
  2623. dir->i_size += BOGO_DIRENT_SIZE;
  2624. inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
  2625. inc_nlink(inode);
  2626. ihold(inode); /* New dentry reference */
  2627. dget(dentry); /* Extra pinning count for the created dentry */
  2628. d_instantiate(dentry, inode);
  2629. out:
  2630. return ret;
  2631. }
  2632. static int shmem_unlink(struct inode *dir, struct dentry *dentry)
  2633. {
  2634. struct inode *inode = d_inode(dentry);
  2635. if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
  2636. shmem_free_inode(inode->i_sb);
  2637. dir->i_size -= BOGO_DIRENT_SIZE;
  2638. inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
  2639. drop_nlink(inode);
  2640. dput(dentry); /* Undo the count from "create" - this does all the work */
  2641. return 0;
  2642. }
  2643. static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
  2644. {
  2645. if (!simple_empty(dentry))
  2646. return -ENOTEMPTY;
  2647. drop_nlink(d_inode(dentry));
  2648. drop_nlink(dir);
  2649. return shmem_unlink(dir, dentry);
  2650. }
  2651. static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
  2652. {
  2653. bool old_is_dir = d_is_dir(old_dentry);
  2654. bool new_is_dir = d_is_dir(new_dentry);
  2655. if (old_dir != new_dir && old_is_dir != new_is_dir) {
  2656. if (old_is_dir) {
  2657. drop_nlink(old_dir);
  2658. inc_nlink(new_dir);
  2659. } else {
  2660. drop_nlink(new_dir);
  2661. inc_nlink(old_dir);
  2662. }
  2663. }
  2664. old_dir->i_ctime = old_dir->i_mtime =
  2665. new_dir->i_ctime = new_dir->i_mtime =
  2666. d_inode(old_dentry)->i_ctime =
  2667. d_inode(new_dentry)->i_ctime = current_time(old_dir);
  2668. return 0;
  2669. }
  2670. static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
  2671. {
  2672. struct dentry *whiteout;
  2673. int error;
  2674. whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
  2675. if (!whiteout)
  2676. return -ENOMEM;
  2677. error = shmem_mknod(old_dir, whiteout,
  2678. S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
  2679. dput(whiteout);
  2680. if (error)
  2681. return error;
  2682. /*
  2683. * Cheat and hash the whiteout while the old dentry is still in
  2684. * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
  2685. *
  2686. * d_lookup() will consistently find one of them at this point,
  2687. * not sure which one, but that isn't even important.
  2688. */
  2689. d_rehash(whiteout);
  2690. return 0;
  2691. }
  2692. /*
  2693. * The VFS layer already does all the dentry stuff for rename,
  2694. * we just have to decrement the usage count for the target if
  2695. * it exists so that the VFS layer correctly free's it when it
  2696. * gets overwritten.
  2697. */
  2698. static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
  2699. {
  2700. struct inode *inode = d_inode(old_dentry);
  2701. int they_are_dirs = S_ISDIR(inode->i_mode);
  2702. if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
  2703. return -EINVAL;
  2704. if (flags & RENAME_EXCHANGE)
  2705. return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
  2706. if (!simple_empty(new_dentry))
  2707. return -ENOTEMPTY;
  2708. if (flags & RENAME_WHITEOUT) {
  2709. int error;
  2710. error = shmem_whiteout(old_dir, old_dentry);
  2711. if (error)
  2712. return error;
  2713. }
  2714. if (d_really_is_positive(new_dentry)) {
  2715. (void) shmem_unlink(new_dir, new_dentry);
  2716. if (they_are_dirs) {
  2717. drop_nlink(d_inode(new_dentry));
  2718. drop_nlink(old_dir);
  2719. }
  2720. } else if (they_are_dirs) {
  2721. drop_nlink(old_dir);
  2722. inc_nlink(new_dir);
  2723. }
  2724. old_dir->i_size -= BOGO_DIRENT_SIZE;
  2725. new_dir->i_size += BOGO_DIRENT_SIZE;
  2726. old_dir->i_ctime = old_dir->i_mtime =
  2727. new_dir->i_ctime = new_dir->i_mtime =
  2728. inode->i_ctime = current_time(old_dir);
  2729. return 0;
  2730. }
  2731. static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
  2732. {
  2733. int error;
  2734. int len;
  2735. struct inode *inode;
  2736. struct page *page;
  2737. len = strlen(symname) + 1;
  2738. if (len > PAGE_SIZE)
  2739. return -ENAMETOOLONG;
  2740. inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0,
  2741. VM_NORESERVE);
  2742. if (!inode)
  2743. return -ENOSPC;
  2744. error = security_inode_init_security(inode, dir, &dentry->d_name,
  2745. shmem_initxattrs, NULL);
  2746. if (error && error != -EOPNOTSUPP) {
  2747. iput(inode);
  2748. return error;
  2749. }
  2750. inode->i_size = len-1;
  2751. if (len <= SHORT_SYMLINK_LEN) {
  2752. inode->i_link = kmemdup(symname, len, GFP_KERNEL);
  2753. if (!inode->i_link) {
  2754. iput(inode);
  2755. return -ENOMEM;
  2756. }
  2757. inode->i_op = &shmem_short_symlink_operations;
  2758. } else {
  2759. inode_nohighmem(inode);
  2760. error = shmem_getpage(inode, 0, &page, SGP_WRITE);
  2761. if (error) {
  2762. iput(inode);
  2763. return error;
  2764. }
  2765. inode->i_mapping->a_ops = &shmem_aops;
  2766. inode->i_op = &shmem_symlink_inode_operations;
  2767. memcpy(page_address(page), symname, len);
  2768. SetPageUptodate(page);
  2769. set_page_dirty(page);
  2770. unlock_page(page);
  2771. put_page(page);
  2772. }
  2773. dir->i_size += BOGO_DIRENT_SIZE;
  2774. dir->i_ctime = dir->i_mtime = current_time(dir);
  2775. d_instantiate(dentry, inode);
  2776. dget(dentry);
  2777. return 0;
  2778. }
  2779. static void shmem_put_link(void *arg)
  2780. {
  2781. mark_page_accessed(arg);
  2782. put_page(arg);
  2783. }
  2784. static const char *shmem_get_link(struct dentry *dentry,
  2785. struct inode *inode,
  2786. struct delayed_call *done)
  2787. {
  2788. struct page *page = NULL;
  2789. int error;
  2790. if (!dentry) {
  2791. page = find_get_page(inode->i_mapping, 0);
  2792. if (!page)
  2793. return ERR_PTR(-ECHILD);
  2794. if (!PageUptodate(page)) {
  2795. put_page(page);
  2796. return ERR_PTR(-ECHILD);
  2797. }
  2798. } else {
  2799. error = shmem_getpage(inode, 0, &page, SGP_READ);
  2800. if (error)
  2801. return ERR_PTR(error);
  2802. unlock_page(page);
  2803. }
  2804. set_delayed_call(done, shmem_put_link, page);
  2805. return page_address(page);
  2806. }
  2807. #ifdef CONFIG_TMPFS_XATTR
  2808. /*
  2809. * Superblocks without xattr inode operations may get some security.* xattr
  2810. * support from the LSM "for free". As soon as we have any other xattrs
  2811. * like ACLs, we also need to implement the security.* handlers at
  2812. * filesystem level, though.
  2813. */
  2814. /*
  2815. * Callback for security_inode_init_security() for acquiring xattrs.
  2816. */
  2817. static int shmem_initxattrs(struct inode *inode,
  2818. const struct xattr *xattr_array,
  2819. void *fs_info)
  2820. {
  2821. struct shmem_inode_info *info = SHMEM_I(inode);
  2822. const struct xattr *xattr;
  2823. struct simple_xattr *new_xattr;
  2824. size_t len;
  2825. for (xattr = xattr_array; xattr->name != NULL; xattr++) {
  2826. new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
  2827. if (!new_xattr)
  2828. return -ENOMEM;
  2829. len = strlen(xattr->name) + 1;
  2830. new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
  2831. GFP_KERNEL);
  2832. if (!new_xattr->name) {
  2833. kvfree(new_xattr);
  2834. return -ENOMEM;
  2835. }
  2836. memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
  2837. XATTR_SECURITY_PREFIX_LEN);
  2838. memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
  2839. xattr->name, len);
  2840. simple_xattr_list_add(&info->xattrs, new_xattr);
  2841. }
  2842. return 0;
  2843. }
  2844. static int shmem_xattr_handler_get(const struct xattr_handler *handler,
  2845. struct dentry *unused, struct inode *inode,
  2846. const char *name, void *buffer, size_t size,
  2847. int flags)
  2848. {
  2849. struct shmem_inode_info *info = SHMEM_I(inode);
  2850. name = xattr_full_name(handler, name);
  2851. return simple_xattr_get(&info->xattrs, name, buffer, size);
  2852. }
  2853. static int shmem_xattr_handler_set(const struct xattr_handler *handler,
  2854. struct dentry *unused, struct inode *inode,
  2855. const char *name, const void *value,
  2856. size_t size, int flags)
  2857. {
  2858. struct shmem_inode_info *info = SHMEM_I(inode);
  2859. name = xattr_full_name(handler, name);
  2860. return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
  2861. }
  2862. static const struct xattr_handler shmem_security_xattr_handler = {
  2863. .prefix = XATTR_SECURITY_PREFIX,
  2864. .get = shmem_xattr_handler_get,
  2865. .set = shmem_xattr_handler_set,
  2866. };
  2867. static const struct xattr_handler shmem_trusted_xattr_handler = {
  2868. .prefix = XATTR_TRUSTED_PREFIX,
  2869. .get = shmem_xattr_handler_get,
  2870. .set = shmem_xattr_handler_set,
  2871. };
  2872. static const struct xattr_handler *shmem_xattr_handlers[] = {
  2873. #ifdef CONFIG_TMPFS_POSIX_ACL
  2874. &posix_acl_access_xattr_handler,
  2875. &posix_acl_default_xattr_handler,
  2876. #endif
  2877. &shmem_security_xattr_handler,
  2878. &shmem_trusted_xattr_handler,
  2879. NULL
  2880. };
  2881. static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
  2882. {
  2883. struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
  2884. return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
  2885. }
  2886. #endif /* CONFIG_TMPFS_XATTR */
  2887. static const struct inode_operations shmem_short_symlink_operations = {
  2888. .get_link = simple_get_link,
  2889. #ifdef CONFIG_TMPFS_XATTR
  2890. .listxattr = shmem_listxattr,
  2891. #endif
  2892. };
  2893. static const struct inode_operations shmem_symlink_inode_operations = {
  2894. .get_link = shmem_get_link,
  2895. #ifdef CONFIG_TMPFS_XATTR
  2896. .listxattr = shmem_listxattr,
  2897. #endif
  2898. };
  2899. static struct dentry *shmem_get_parent(struct dentry *child)
  2900. {
  2901. return ERR_PTR(-ESTALE);
  2902. }
  2903. static int shmem_match(struct inode *ino, void *vfh)
  2904. {
  2905. __u32 *fh = vfh;
  2906. __u64 inum = fh[2];
  2907. inum = (inum << 32) | fh[1];
  2908. return ino->i_ino == inum && fh[0] == ino->i_generation;
  2909. }
  2910. /* Find any alias of inode, but prefer a hashed alias */
  2911. static struct dentry *shmem_find_alias(struct inode *inode)
  2912. {
  2913. struct dentry *alias = d_find_alias(inode);
  2914. return alias ?: d_find_any_alias(inode);
  2915. }
  2916. static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
  2917. struct fid *fid, int fh_len, int fh_type)
  2918. {
  2919. struct inode *inode;
  2920. struct dentry *dentry = NULL;
  2921. u64 inum;
  2922. if (fh_len < 3)
  2923. return NULL;
  2924. inum = fid->raw[2];
  2925. inum = (inum << 32) | fid->raw[1];
  2926. inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
  2927. shmem_match, fid->raw);
  2928. if (inode) {
  2929. dentry = shmem_find_alias(inode);
  2930. iput(inode);
  2931. }
  2932. return dentry;
  2933. }
  2934. static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
  2935. struct inode *parent)
  2936. {
  2937. if (*len < 3) {
  2938. *len = 3;
  2939. return FILEID_INVALID;
  2940. }
  2941. if (inode_unhashed(inode)) {
  2942. /* Unfortunately insert_inode_hash is not idempotent,
  2943. * so as we hash inodes here rather than at creation
  2944. * time, we need a lock to ensure we only try
  2945. * to do it once
  2946. */
  2947. static DEFINE_SPINLOCK(lock);
  2948. spin_lock(&lock);
  2949. if (inode_unhashed(inode))
  2950. __insert_inode_hash(inode,
  2951. inode->i_ino + inode->i_generation);
  2952. spin_unlock(&lock);
  2953. }
  2954. fh[0] = inode->i_generation;
  2955. fh[1] = inode->i_ino;
  2956. fh[2] = ((__u64)inode->i_ino) >> 32;
  2957. *len = 3;
  2958. return 1;
  2959. }
  2960. static const struct export_operations shmem_export_ops = {
  2961. .get_parent = shmem_get_parent,
  2962. .encode_fh = shmem_encode_fh,
  2963. .fh_to_dentry = shmem_fh_to_dentry,
  2964. };
  2965. enum shmem_param {
  2966. Opt_gid,
  2967. Opt_huge,
  2968. Opt_mode,
  2969. Opt_mpol,
  2970. Opt_nr_blocks,
  2971. Opt_nr_inodes,
  2972. Opt_size,
  2973. Opt_uid,
  2974. Opt_inode32,
  2975. Opt_inode64,
  2976. };
  2977. static const struct constant_table shmem_param_enums_huge[] = {
  2978. {"never", SHMEM_HUGE_NEVER },
  2979. {"always", SHMEM_HUGE_ALWAYS },
  2980. {"within_size", SHMEM_HUGE_WITHIN_SIZE },
  2981. {"advise", SHMEM_HUGE_ADVISE },
  2982. {}
  2983. };
  2984. const struct fs_parameter_spec shmem_fs_parameters[] = {
  2985. fsparam_u32 ("gid", Opt_gid),
  2986. fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge),
  2987. fsparam_u32oct("mode", Opt_mode),
  2988. fsparam_string("mpol", Opt_mpol),
  2989. fsparam_string("nr_blocks", Opt_nr_blocks),
  2990. fsparam_string("nr_inodes", Opt_nr_inodes),
  2991. fsparam_string("size", Opt_size),
  2992. fsparam_u32 ("uid", Opt_uid),
  2993. fsparam_flag ("inode32", Opt_inode32),
  2994. fsparam_flag ("inode64", Opt_inode64),
  2995. {}
  2996. };
  2997. static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param)
  2998. {
  2999. struct shmem_options *ctx = fc->fs_private;
  3000. struct fs_parse_result result;
  3001. unsigned long long size;
  3002. char *rest;
  3003. int opt;
  3004. opt = fs_parse(fc, shmem_fs_parameters, param, &result);
  3005. if (opt < 0)
  3006. return opt;
  3007. switch (opt) {
  3008. case Opt_size:
  3009. size = memparse(param->string, &rest);
  3010. if (*rest == '%') {
  3011. size <<= PAGE_SHIFT;
  3012. size *= totalram_pages();
  3013. do_div(size, 100);
  3014. rest++;
  3015. }
  3016. if (*rest)
  3017. goto bad_value;
  3018. ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE);
  3019. ctx->seen |= SHMEM_SEEN_BLOCKS;
  3020. break;
  3021. case Opt_nr_blocks:
  3022. ctx->blocks = memparse(param->string, &rest);
  3023. if (*rest)
  3024. goto bad_value;
  3025. ctx->seen |= SHMEM_SEEN_BLOCKS;
  3026. break;
  3027. case Opt_nr_inodes:
  3028. ctx->inodes = memparse(param->string, &rest);
  3029. if (*rest)
  3030. goto bad_value;
  3031. ctx->seen |= SHMEM_SEEN_INODES;
  3032. break;
  3033. case Opt_mode:
  3034. ctx->mode = result.uint_32 & 07777;
  3035. break;
  3036. case Opt_uid:
  3037. ctx->uid = make_kuid(current_user_ns(), result.uint_32);
  3038. if (!uid_valid(ctx->uid))
  3039. goto bad_value;
  3040. break;
  3041. case Opt_gid:
  3042. ctx->gid = make_kgid(current_user_ns(), result.uint_32);
  3043. if (!gid_valid(ctx->gid))
  3044. goto bad_value;
  3045. break;
  3046. case Opt_huge:
  3047. ctx->huge = result.uint_32;
  3048. if (ctx->huge != SHMEM_HUGE_NEVER &&
  3049. !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
  3050. has_transparent_hugepage()))
  3051. goto unsupported_parameter;
  3052. ctx->seen |= SHMEM_SEEN_HUGE;
  3053. break;
  3054. case Opt_mpol:
  3055. if (IS_ENABLED(CONFIG_NUMA)) {
  3056. mpol_put(ctx->mpol);
  3057. ctx->mpol = NULL;
  3058. if (mpol_parse_str(param->string, &ctx->mpol))
  3059. goto bad_value;
  3060. break;
  3061. }
  3062. goto unsupported_parameter;
  3063. case Opt_inode32:
  3064. ctx->full_inums = false;
  3065. ctx->seen |= SHMEM_SEEN_INUMS;
  3066. break;
  3067. case Opt_inode64:
  3068. if (sizeof(ino_t) < 8) {
  3069. return invalfc(fc,
  3070. "Cannot use inode64 with <64bit inums in kernel\n");
  3071. }
  3072. ctx->full_inums = true;
  3073. ctx->seen |= SHMEM_SEEN_INUMS;
  3074. break;
  3075. }
  3076. return 0;
  3077. unsupported_parameter:
  3078. return invalfc(fc, "Unsupported parameter '%s'", param->key);
  3079. bad_value:
  3080. return invalfc(fc, "Bad value for '%s'", param->key);
  3081. }
  3082. static int shmem_parse_options(struct fs_context *fc, void *data)
  3083. {
  3084. char *options = data;
  3085. if (options) {
  3086. int err = security_sb_eat_lsm_opts(options, &fc->security);
  3087. if (err)
  3088. return err;
  3089. }
  3090. while (options != NULL) {
  3091. char *this_char = options;
  3092. for (;;) {
  3093. /*
  3094. * NUL-terminate this option: unfortunately,
  3095. * mount options form a comma-separated list,
  3096. * but mpol's nodelist may also contain commas.
  3097. */
  3098. options = strchr(options, ',');
  3099. if (options == NULL)
  3100. break;
  3101. options++;
  3102. if (!isdigit(*options)) {
  3103. options[-1] = '\0';
  3104. break;
  3105. }
  3106. }
  3107. if (*this_char) {
  3108. char *value = strchr(this_char,'=');
  3109. size_t len = 0;
  3110. int err;
  3111. if (value) {
  3112. *value++ = '\0';
  3113. len = strlen(value);
  3114. }
  3115. err = vfs_parse_fs_string(fc, this_char, value, len);
  3116. if (err < 0)
  3117. return err;
  3118. }
  3119. }
  3120. return 0;
  3121. }
  3122. /*
  3123. * Reconfigure a shmem filesystem.
  3124. *
  3125. * Note that we disallow change from limited->unlimited blocks/inodes while any
  3126. * are in use; but we must separately disallow unlimited->limited, because in
  3127. * that case we have no record of how much is already in use.
  3128. */
  3129. static int shmem_reconfigure(struct fs_context *fc)
  3130. {
  3131. struct shmem_options *ctx = fc->fs_private;
  3132. struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
  3133. unsigned long inodes;
  3134. const char *err;
  3135. spin_lock(&sbinfo->stat_lock);
  3136. inodes = sbinfo->max_inodes - sbinfo->free_inodes;
  3137. if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
  3138. if (!sbinfo->max_blocks) {
  3139. err = "Cannot retroactively limit size";
  3140. goto out;
  3141. }
  3142. if (percpu_counter_compare(&sbinfo->used_blocks,
  3143. ctx->blocks) > 0) {
  3144. err = "Too small a size for current use";
  3145. goto out;
  3146. }
  3147. }
  3148. if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) {
  3149. if (!sbinfo->max_inodes) {
  3150. err = "Cannot retroactively limit inodes";
  3151. goto out;
  3152. }
  3153. if (ctx->inodes < inodes) {
  3154. err = "Too few inodes for current use";
  3155. goto out;
  3156. }
  3157. }
  3158. if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums &&
  3159. sbinfo->next_ino > UINT_MAX) {
  3160. err = "Current inum too high to switch to 32-bit inums";
  3161. goto out;
  3162. }
  3163. if (ctx->seen & SHMEM_SEEN_HUGE)
  3164. sbinfo->huge = ctx->huge;
  3165. if (ctx->seen & SHMEM_SEEN_INUMS)
  3166. sbinfo->full_inums = ctx->full_inums;
  3167. if (ctx->seen & SHMEM_SEEN_BLOCKS)
  3168. sbinfo->max_blocks = ctx->blocks;
  3169. if (ctx->seen & SHMEM_SEEN_INODES) {
  3170. sbinfo->max_inodes = ctx->inodes;
  3171. sbinfo->free_inodes = ctx->inodes - inodes;
  3172. }
  3173. /*
  3174. * Preserve previous mempolicy unless mpol remount option was specified.
  3175. */
  3176. if (ctx->mpol) {
  3177. mpol_put(sbinfo->mpol);
  3178. sbinfo->mpol = ctx->mpol; /* transfers initial ref */
  3179. ctx->mpol = NULL;
  3180. }
  3181. spin_unlock(&sbinfo->stat_lock);
  3182. return 0;
  3183. out:
  3184. spin_unlock(&sbinfo->stat_lock);
  3185. return invalfc(fc, "%s", err);
  3186. }
  3187. static int shmem_show_options(struct seq_file *seq, struct dentry *root)
  3188. {
  3189. struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
  3190. if (sbinfo->max_blocks != shmem_default_max_blocks())
  3191. seq_printf(seq, ",size=%luk",
  3192. sbinfo->max_blocks << (PAGE_SHIFT - 10));
  3193. if (sbinfo->max_inodes != shmem_default_max_inodes())
  3194. seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
  3195. if (sbinfo->mode != (0777 | S_ISVTX))
  3196. seq_printf(seq, ",mode=%03ho", sbinfo->mode);
  3197. if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
  3198. seq_printf(seq, ",uid=%u",
  3199. from_kuid_munged(&init_user_ns, sbinfo->uid));
  3200. if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
  3201. seq_printf(seq, ",gid=%u",
  3202. from_kgid_munged(&init_user_ns, sbinfo->gid));
  3203. /*
  3204. * Showing inode{64,32} might be useful even if it's the system default,
  3205. * since then people don't have to resort to checking both here and
  3206. * /proc/config.gz to confirm 64-bit inums were successfully applied
  3207. * (which may not even exist if IKCONFIG_PROC isn't enabled).
  3208. *
  3209. * We hide it when inode64 isn't the default and we are using 32-bit
  3210. * inodes, since that probably just means the feature isn't even under
  3211. * consideration.
  3212. *
  3213. * As such:
  3214. *
  3215. * +-----------------+-----------------+
  3216. * | TMPFS_INODE64=y | TMPFS_INODE64=n |
  3217. * +------------------+-----------------+-----------------+
  3218. * | full_inums=true | show | show |
  3219. * | full_inums=false | show | hide |
  3220. * +------------------+-----------------+-----------------+
  3221. *
  3222. */
  3223. if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums)
  3224. seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32));
  3225. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3226. /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
  3227. if (sbinfo->huge)
  3228. seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
  3229. #endif
  3230. shmem_show_mpol(seq, sbinfo->mpol);
  3231. return 0;
  3232. }
  3233. #endif /* CONFIG_TMPFS */
  3234. static void shmem_put_super(struct super_block *sb)
  3235. {
  3236. struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
  3237. free_percpu(sbinfo->ino_batch);
  3238. percpu_counter_destroy(&sbinfo->used_blocks);
  3239. mpol_put(sbinfo->mpol);
  3240. kfree(sbinfo);
  3241. sb->s_fs_info = NULL;
  3242. }
  3243. static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
  3244. {
  3245. struct shmem_options *ctx = fc->fs_private;
  3246. struct inode *inode;
  3247. struct shmem_sb_info *sbinfo;
  3248. int err = -ENOMEM;
  3249. /* Round up to L1_CACHE_BYTES to resist false sharing */
  3250. sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
  3251. L1_CACHE_BYTES), GFP_KERNEL);
  3252. if (!sbinfo)
  3253. return -ENOMEM;
  3254. sb->s_fs_info = sbinfo;
  3255. #ifdef CONFIG_TMPFS
  3256. /*
  3257. * Per default we only allow half of the physical ram per
  3258. * tmpfs instance, limiting inodes to one per page of lowmem;
  3259. * but the internal instance is left unlimited.
  3260. */
  3261. if (!(sb->s_flags & SB_KERNMOUNT)) {
  3262. if (!(ctx->seen & SHMEM_SEEN_BLOCKS))
  3263. ctx->blocks = shmem_default_max_blocks();
  3264. if (!(ctx->seen & SHMEM_SEEN_INODES))
  3265. ctx->inodes = shmem_default_max_inodes();
  3266. if (!(ctx->seen & SHMEM_SEEN_INUMS))
  3267. ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64);
  3268. } else {
  3269. sb->s_flags |= SB_NOUSER;
  3270. }
  3271. sb->s_export_op = &shmem_export_ops;
  3272. sb->s_flags |= SB_NOSEC;
  3273. #else
  3274. sb->s_flags |= SB_NOUSER;
  3275. #endif
  3276. sbinfo->max_blocks = ctx->blocks;
  3277. sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes;
  3278. if (sb->s_flags & SB_KERNMOUNT) {
  3279. sbinfo->ino_batch = alloc_percpu(ino_t);
  3280. if (!sbinfo->ino_batch)
  3281. goto failed;
  3282. }
  3283. sbinfo->uid = ctx->uid;
  3284. sbinfo->gid = ctx->gid;
  3285. sbinfo->full_inums = ctx->full_inums;
  3286. sbinfo->mode = ctx->mode;
  3287. sbinfo->huge = ctx->huge;
  3288. sbinfo->mpol = ctx->mpol;
  3289. ctx->mpol = NULL;
  3290. spin_lock_init(&sbinfo->stat_lock);
  3291. if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
  3292. goto failed;
  3293. spin_lock_init(&sbinfo->shrinklist_lock);
  3294. INIT_LIST_HEAD(&sbinfo->shrinklist);
  3295. sb->s_maxbytes = MAX_LFS_FILESIZE;
  3296. sb->s_blocksize = PAGE_SIZE;
  3297. sb->s_blocksize_bits = PAGE_SHIFT;
  3298. sb->s_magic = TMPFS_MAGIC;
  3299. sb->s_op = &shmem_ops;
  3300. sb->s_time_gran = 1;
  3301. #ifdef CONFIG_TMPFS_XATTR
  3302. sb->s_xattr = shmem_xattr_handlers;
  3303. #endif
  3304. #ifdef CONFIG_TMPFS_POSIX_ACL
  3305. sb->s_flags |= SB_POSIXACL;
  3306. #endif
  3307. uuid_gen(&sb->s_uuid);
  3308. inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
  3309. if (!inode)
  3310. goto failed;
  3311. inode->i_uid = sbinfo->uid;
  3312. inode->i_gid = sbinfo->gid;
  3313. sb->s_root = d_make_root(inode);
  3314. if (!sb->s_root)
  3315. goto failed;
  3316. return 0;
  3317. failed:
  3318. shmem_put_super(sb);
  3319. return err;
  3320. }
  3321. static int shmem_get_tree(struct fs_context *fc)
  3322. {
  3323. return get_tree_nodev(fc, shmem_fill_super);
  3324. }
  3325. static void shmem_free_fc(struct fs_context *fc)
  3326. {
  3327. struct shmem_options *ctx = fc->fs_private;
  3328. if (ctx) {
  3329. mpol_put(ctx->mpol);
  3330. kfree(ctx);
  3331. }
  3332. }
  3333. static const struct fs_context_operations shmem_fs_context_ops = {
  3334. .free = shmem_free_fc,
  3335. .get_tree = shmem_get_tree,
  3336. #ifdef CONFIG_TMPFS
  3337. .parse_monolithic = shmem_parse_options,
  3338. .parse_param = shmem_parse_one,
  3339. .reconfigure = shmem_reconfigure,
  3340. #endif
  3341. };
  3342. static struct kmem_cache *shmem_inode_cachep;
  3343. static struct inode *shmem_alloc_inode(struct super_block *sb)
  3344. {
  3345. struct shmem_inode_info *info;
  3346. info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
  3347. if (!info)
  3348. return NULL;
  3349. return &info->vfs_inode;
  3350. }
  3351. static void shmem_free_in_core_inode(struct inode *inode)
  3352. {
  3353. if (S_ISLNK(inode->i_mode))
  3354. kfree(inode->i_link);
  3355. kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
  3356. }
  3357. static void shmem_destroy_inode(struct inode *inode)
  3358. {
  3359. if (S_ISREG(inode->i_mode))
  3360. mpol_free_shared_policy(&SHMEM_I(inode)->policy);
  3361. }
  3362. static void shmem_init_inode(void *foo)
  3363. {
  3364. struct shmem_inode_info *info = foo;
  3365. inode_init_once(&info->vfs_inode);
  3366. }
  3367. static void shmem_init_inodecache(void)
  3368. {
  3369. shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
  3370. sizeof(struct shmem_inode_info),
  3371. 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
  3372. }
  3373. static void shmem_destroy_inodecache(void)
  3374. {
  3375. kmem_cache_destroy(shmem_inode_cachep);
  3376. }
  3377. static const struct address_space_operations shmem_aops = {
  3378. .writepage = shmem_writepage,
  3379. .set_page_dirty = __set_page_dirty_no_writeback,
  3380. #ifdef CONFIG_TMPFS
  3381. .write_begin = shmem_write_begin,
  3382. .write_end = shmem_write_end,
  3383. #endif
  3384. #ifdef CONFIG_MIGRATION
  3385. .migratepage = migrate_page,
  3386. #endif
  3387. .error_remove_page = generic_error_remove_page,
  3388. };
  3389. static const struct file_operations shmem_file_operations = {
  3390. .mmap = shmem_mmap,
  3391. .get_unmapped_area = shmem_get_unmapped_area,
  3392. #ifdef CONFIG_TMPFS
  3393. .llseek = shmem_file_llseek,
  3394. .read_iter = shmem_file_read_iter,
  3395. .write_iter = generic_file_write_iter,
  3396. .fsync = noop_fsync,
  3397. .splice_read = generic_file_splice_read,
  3398. .splice_write = iter_file_splice_write,
  3399. .fallocate = shmem_fallocate,
  3400. #endif
  3401. };
  3402. static const struct inode_operations shmem_inode_operations = {
  3403. .getattr = shmem_getattr,
  3404. .setattr = shmem_setattr,
  3405. #ifdef CONFIG_TMPFS_XATTR
  3406. .listxattr = shmem_listxattr,
  3407. .set_acl = simple_set_acl,
  3408. #endif
  3409. };
  3410. static const struct inode_operations shmem_dir_inode_operations = {
  3411. #ifdef CONFIG_TMPFS
  3412. .create = shmem_create,
  3413. .lookup = simple_lookup,
  3414. .link = shmem_link,
  3415. .unlink = shmem_unlink,
  3416. .symlink = shmem_symlink,
  3417. .mkdir = shmem_mkdir,
  3418. .rmdir = shmem_rmdir,
  3419. .mknod = shmem_mknod,
  3420. .rename = shmem_rename2,
  3421. .tmpfile = shmem_tmpfile,
  3422. #endif
  3423. #ifdef CONFIG_TMPFS_XATTR
  3424. .listxattr = shmem_listxattr,
  3425. #endif
  3426. #ifdef CONFIG_TMPFS_POSIX_ACL
  3427. .setattr = shmem_setattr,
  3428. .set_acl = simple_set_acl,
  3429. #endif
  3430. };
  3431. static const struct inode_operations shmem_special_inode_operations = {
  3432. #ifdef CONFIG_TMPFS_XATTR
  3433. .listxattr = shmem_listxattr,
  3434. #endif
  3435. #ifdef CONFIG_TMPFS_POSIX_ACL
  3436. .setattr = shmem_setattr,
  3437. .set_acl = simple_set_acl,
  3438. #endif
  3439. };
  3440. static const struct super_operations shmem_ops = {
  3441. .alloc_inode = shmem_alloc_inode,
  3442. .free_inode = shmem_free_in_core_inode,
  3443. .destroy_inode = shmem_destroy_inode,
  3444. #ifdef CONFIG_TMPFS
  3445. .statfs = shmem_statfs,
  3446. .show_options = shmem_show_options,
  3447. #endif
  3448. .evict_inode = shmem_evict_inode,
  3449. .drop_inode = generic_delete_inode,
  3450. .put_super = shmem_put_super,
  3451. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3452. .nr_cached_objects = shmem_unused_huge_count,
  3453. .free_cached_objects = shmem_unused_huge_scan,
  3454. #endif
  3455. };
  3456. static const struct vm_operations_struct shmem_vm_ops = {
  3457. .fault = shmem_fault,
  3458. .map_pages = filemap_map_pages,
  3459. #ifdef CONFIG_NUMA
  3460. .set_policy = shmem_set_policy,
  3461. .get_policy = shmem_get_policy,
  3462. #endif
  3463. #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
  3464. .allow_speculation = filemap_allow_speculation,
  3465. #endif
  3466. };
  3467. int shmem_init_fs_context(struct fs_context *fc)
  3468. {
  3469. struct shmem_options *ctx;
  3470. ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL);
  3471. if (!ctx)
  3472. return -ENOMEM;
  3473. ctx->mode = 0777 | S_ISVTX;
  3474. ctx->uid = current_fsuid();
  3475. ctx->gid = current_fsgid();
  3476. fc->fs_private = ctx;
  3477. fc->ops = &shmem_fs_context_ops;
  3478. return 0;
  3479. }
  3480. static struct file_system_type shmem_fs_type = {
  3481. .owner = THIS_MODULE,
  3482. .name = "tmpfs",
  3483. .init_fs_context = shmem_init_fs_context,
  3484. #ifdef CONFIG_TMPFS
  3485. .parameters = shmem_fs_parameters,
  3486. #endif
  3487. .kill_sb = kill_litter_super,
  3488. .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT,
  3489. };
  3490. int __init shmem_init(void)
  3491. {
  3492. int error;
  3493. shmem_init_inodecache();
  3494. error = register_filesystem(&shmem_fs_type);
  3495. if (error) {
  3496. pr_err("Could not register tmpfs\n");
  3497. goto out2;
  3498. }
  3499. shm_mnt = kern_mount(&shmem_fs_type);
  3500. if (IS_ERR(shm_mnt)) {
  3501. error = PTR_ERR(shm_mnt);
  3502. pr_err("Could not kern_mount tmpfs\n");
  3503. goto out1;
  3504. }
  3505. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3506. if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
  3507. SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
  3508. else
  3509. shmem_huge = 0; /* just in case it was patched */
  3510. #endif
  3511. return 0;
  3512. out1:
  3513. unregister_filesystem(&shmem_fs_type);
  3514. out2:
  3515. shmem_destroy_inodecache();
  3516. shm_mnt = ERR_PTR(error);
  3517. return error;
  3518. }
  3519. #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
  3520. static ssize_t shmem_enabled_show(struct kobject *kobj,
  3521. struct kobj_attribute *attr, char *buf)
  3522. {
  3523. static const int values[] = {
  3524. SHMEM_HUGE_ALWAYS,
  3525. SHMEM_HUGE_WITHIN_SIZE,
  3526. SHMEM_HUGE_ADVISE,
  3527. SHMEM_HUGE_NEVER,
  3528. SHMEM_HUGE_DENY,
  3529. SHMEM_HUGE_FORCE,
  3530. };
  3531. int i, count;
  3532. for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
  3533. const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
  3534. count += sprintf(buf + count, fmt,
  3535. shmem_format_huge(values[i]));
  3536. }
  3537. buf[count - 1] = '\n';
  3538. return count;
  3539. }
  3540. static ssize_t shmem_enabled_store(struct kobject *kobj,
  3541. struct kobj_attribute *attr, const char *buf, size_t count)
  3542. {
  3543. char tmp[16];
  3544. int huge;
  3545. if (count + 1 > sizeof(tmp))
  3546. return -EINVAL;
  3547. memcpy(tmp, buf, count);
  3548. tmp[count] = '\0';
  3549. if (count && tmp[count - 1] == '\n')
  3550. tmp[count - 1] = '\0';
  3551. huge = shmem_parse_huge(tmp);
  3552. if (huge == -EINVAL)
  3553. return -EINVAL;
  3554. if (!has_transparent_hugepage() &&
  3555. huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
  3556. return -EINVAL;
  3557. shmem_huge = huge;
  3558. if (shmem_huge > SHMEM_HUGE_DENY)
  3559. SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
  3560. return count;
  3561. }
  3562. struct kobj_attribute shmem_enabled_attr =
  3563. __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
  3564. #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */
  3565. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  3566. bool shmem_huge_enabled(struct vm_area_struct *vma)
  3567. {
  3568. struct inode *inode = file_inode(vma->vm_file);
  3569. struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
  3570. loff_t i_size;
  3571. pgoff_t off;
  3572. if (!transhuge_vma_enabled(vma, vma->vm_flags))
  3573. return false;
  3574. if (shmem_huge == SHMEM_HUGE_FORCE)
  3575. return true;
  3576. if (shmem_huge == SHMEM_HUGE_DENY)
  3577. return false;
  3578. switch (sbinfo->huge) {
  3579. case SHMEM_HUGE_NEVER:
  3580. return false;
  3581. case SHMEM_HUGE_ALWAYS:
  3582. return true;
  3583. case SHMEM_HUGE_WITHIN_SIZE:
  3584. off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
  3585. i_size = round_up(i_size_read(inode), PAGE_SIZE);
  3586. if (i_size >= HPAGE_PMD_SIZE &&
  3587. i_size >> PAGE_SHIFT >= off)
  3588. return true;
  3589. fallthrough;
  3590. case SHMEM_HUGE_ADVISE:
  3591. /* TODO: implement fadvise() hints */
  3592. return (vma->vm_flags & VM_HUGEPAGE);
  3593. default:
  3594. VM_BUG_ON(1);
  3595. return false;
  3596. }
  3597. }
  3598. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  3599. #else /* !CONFIG_SHMEM */
  3600. /*
  3601. * tiny-shmem: simple shmemfs and tmpfs using ramfs code
  3602. *
  3603. * This is intended for small system where the benefits of the full
  3604. * shmem code (swap-backed and resource-limited) are outweighed by
  3605. * their complexity. On systems without swap this code should be
  3606. * effectively equivalent, but much lighter weight.
  3607. */
  3608. static struct file_system_type shmem_fs_type = {
  3609. .name = "tmpfs",
  3610. .init_fs_context = ramfs_init_fs_context,
  3611. .parameters = ramfs_fs_parameters,
  3612. .kill_sb = kill_litter_super,
  3613. .fs_flags = FS_USERNS_MOUNT,
  3614. };
  3615. int __init shmem_init(void)
  3616. {
  3617. BUG_ON(register_filesystem(&shmem_fs_type) != 0);
  3618. shm_mnt = kern_mount(&shmem_fs_type);
  3619. BUG_ON(IS_ERR(shm_mnt));
  3620. return 0;
  3621. }
  3622. int shmem_unuse(unsigned int type, bool frontswap,
  3623. unsigned long *fs_pages_to_unuse)
  3624. {
  3625. return 0;
  3626. }
  3627. int shmem_lock(struct file *file, int lock, struct user_struct *user)
  3628. {
  3629. return 0;
  3630. }
  3631. void shmem_unlock_mapping(struct address_space *mapping)
  3632. {
  3633. }
  3634. #ifdef CONFIG_MMU
  3635. unsigned long shmem_get_unmapped_area(struct file *file,
  3636. unsigned long addr, unsigned long len,
  3637. unsigned long pgoff, unsigned long flags)
  3638. {
  3639. return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
  3640. }
  3641. #endif
  3642. void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
  3643. {
  3644. truncate_inode_pages_range(inode->i_mapping, lstart, lend);
  3645. }
  3646. EXPORT_SYMBOL_GPL(shmem_truncate_range);
  3647. #define shmem_vm_ops generic_file_vm_ops
  3648. #define shmem_file_operations ramfs_file_operations
  3649. #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
  3650. #define shmem_acct_size(flags, size) 0
  3651. #define shmem_unacct_size(flags, size) do {} while (0)
  3652. #endif /* CONFIG_SHMEM */
  3653. /* common code */
  3654. static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
  3655. unsigned long flags, unsigned int i_flags)
  3656. {
  3657. struct inode *inode;
  3658. struct file *res;
  3659. if (IS_ERR(mnt))
  3660. return ERR_CAST(mnt);
  3661. if (size < 0 || size > MAX_LFS_FILESIZE)
  3662. return ERR_PTR(-EINVAL);
  3663. if (shmem_acct_size(flags, size))
  3664. return ERR_PTR(-ENOMEM);
  3665. inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0,
  3666. flags);
  3667. if (unlikely(!inode)) {
  3668. shmem_unacct_size(flags, size);
  3669. return ERR_PTR(-ENOSPC);
  3670. }
  3671. inode->i_flags |= i_flags;
  3672. inode->i_size = size;
  3673. clear_nlink(inode); /* It is unlinked */
  3674. res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
  3675. if (!IS_ERR(res))
  3676. res = alloc_file_pseudo(inode, mnt, name, O_RDWR,
  3677. &shmem_file_operations);
  3678. if (IS_ERR(res))
  3679. iput(inode);
  3680. return res;
  3681. }
  3682. /**
  3683. * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
  3684. * kernel internal. There will be NO LSM permission checks against the
  3685. * underlying inode. So users of this interface must do LSM checks at a
  3686. * higher layer. The users are the big_key and shm implementations. LSM
  3687. * checks are provided at the key or shm level rather than the inode.
  3688. * @name: name for dentry (to be seen in /proc/<pid>/maps
  3689. * @size: size to be set for the file
  3690. * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
  3691. */
  3692. struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
  3693. {
  3694. return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE);
  3695. }
  3696. /**
  3697. * shmem_file_setup - get an unlinked file living in tmpfs
  3698. * @name: name for dentry (to be seen in /proc/<pid>/maps
  3699. * @size: size to be set for the file
  3700. * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
  3701. */
  3702. struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
  3703. {
  3704. return __shmem_file_setup(shm_mnt, name, size, flags, 0);
  3705. }
  3706. EXPORT_SYMBOL_GPL(shmem_file_setup);
  3707. /**
  3708. * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs
  3709. * @mnt: the tmpfs mount where the file will be created
  3710. * @name: name for dentry (to be seen in /proc/<pid>/maps
  3711. * @size: size to be set for the file
  3712. * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
  3713. */
  3714. struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name,
  3715. loff_t size, unsigned long flags)
  3716. {
  3717. return __shmem_file_setup(mnt, name, size, flags, 0);
  3718. }
  3719. EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt);
  3720. /**
  3721. * shmem_zero_setup - setup a shared anonymous mapping
  3722. * @vma: the vma to be mmapped is prepared by do_mmap
  3723. */
  3724. int shmem_zero_setup(struct vm_area_struct *vma)
  3725. {
  3726. struct file *file;
  3727. loff_t size = vma->vm_end - vma->vm_start;
  3728. /*
  3729. * Cloning a new file under mmap_lock leads to a lock ordering conflict
  3730. * between XFS directory reading and selinux: since this file is only
  3731. * accessible to the user through its mapping, use S_PRIVATE flag to
  3732. * bypass file security, in the same way as shmem_kernel_file_setup().
  3733. */
  3734. file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags);
  3735. if (IS_ERR(file))
  3736. return PTR_ERR(file);
  3737. if (vma->vm_file)
  3738. fput(vma->vm_file);
  3739. vma->vm_file = file;
  3740. vma->vm_ops = &shmem_vm_ops;
  3741. if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
  3742. ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
  3743. (vma->vm_end & HPAGE_PMD_MASK)) {
  3744. khugepaged_enter(vma, vma->vm_flags);
  3745. }
  3746. return 0;
  3747. }
  3748. /**
  3749. * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
  3750. * @mapping: the page's address_space
  3751. * @index: the page index
  3752. * @gfp: the page allocator flags to use if allocating
  3753. *
  3754. * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
  3755. * with any new page allocations done using the specified allocation flags.
  3756. * But read_cache_page_gfp() uses the ->readpage() method: which does not
  3757. * suit tmpfs, since it may have pages in swapcache, and needs to find those
  3758. * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
  3759. *
  3760. * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
  3761. * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
  3762. */
  3763. struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
  3764. pgoff_t index, gfp_t gfp)
  3765. {
  3766. #ifdef CONFIG_SHMEM
  3767. struct inode *inode = mapping->host;
  3768. struct page *page;
  3769. int error;
  3770. BUG_ON(mapping->a_ops != &shmem_aops);
  3771. error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
  3772. gfp, NULL, NULL, NULL);
  3773. if (error)
  3774. page = ERR_PTR(error);
  3775. else
  3776. unlock_page(page);
  3777. return page;
  3778. #else
  3779. /*
  3780. * The tiny !SHMEM case uses ramfs without swap
  3781. */
  3782. return read_cache_page_gfp(mapping, index, gfp);
  3783. #endif
  3784. }
  3785. EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
  3786. void shmem_mark_page_lazyfree(struct page *page, bool tail)
  3787. {
  3788. mark_page_lazyfree_movetail(page, tail);
  3789. }
  3790. EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree);
  3791. int reclaim_shmem_address_space(struct address_space *mapping)
  3792. {
  3793. #ifdef CONFIG_SHMEM
  3794. pgoff_t start = 0;
  3795. struct page *page;
  3796. LIST_HEAD(page_list);
  3797. int reclaimed;
  3798. XA_STATE(xas, &mapping->i_pages, start);
  3799. if (!shmem_mapping(mapping))
  3800. return -EINVAL;
  3801. lru_add_drain();
  3802. rcu_read_lock();
  3803. xas_for_each(&xas, page, ULONG_MAX) {
  3804. if (xas_retry(&xas, page))
  3805. continue;
  3806. if (xa_is_value(page))
  3807. continue;
  3808. if (isolate_lru_page(page))
  3809. continue;
  3810. list_add(&page->lru, &page_list);
  3811. inc_node_page_state(page, NR_ISOLATED_ANON +
  3812. page_is_file_lru(page));
  3813. if (need_resched()) {
  3814. xas_pause(&xas);
  3815. cond_resched_rcu();
  3816. }
  3817. }
  3818. rcu_read_unlock();
  3819. reclaimed = reclaim_pages_from_list(&page_list);
  3820. return reclaimed;
  3821. #else
  3822. return 0;
  3823. #endif
  3824. }
  3825. EXPORT_SYMBOL_GPL(reclaim_shmem_address_space);