xfs_ioctl.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_mount.h"
  13. #include "xfs_inode.h"
  14. #include "xfs_rtalloc.h"
  15. #include "xfs_iwalk.h"
  16. #include "xfs_itable.h"
  17. #include "xfs_error.h"
  18. #include "xfs_attr.h"
  19. #include "xfs_bmap.h"
  20. #include "xfs_bmap_util.h"
  21. #include "xfs_fsops.h"
  22. #include "xfs_discard.h"
  23. #include "xfs_quota.h"
  24. #include "xfs_export.h"
  25. #include "xfs_trace.h"
  26. #include "xfs_icache.h"
  27. #include "xfs_trans.h"
  28. #include "xfs_acl.h"
  29. #include "xfs_btree.h"
  30. #include <linux/fsmap.h>
  31. #include "xfs_fsmap.h"
  32. #include "scrub/xfs_scrub.h"
  33. #include "xfs_sb.h"
  34. #include "xfs_ag.h"
  35. #include "xfs_health.h"
  36. #include "xfs_reflink.h"
  37. #include "xfs_ioctl.h"
  38. #include "xfs_da_format.h"
  39. #include "xfs_da_btree.h"
  40. #include <linux/mount.h>
  41. #include <linux/namei.h>
  42. /*
  43. * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
  44. * a file or fs handle.
  45. *
  46. * XFS_IOC_PATH_TO_FSHANDLE
  47. * returns fs handle for a mount point or path within that mount point
  48. * XFS_IOC_FD_TO_HANDLE
  49. * returns full handle for a FD opened in user space
  50. * XFS_IOC_PATH_TO_HANDLE
  51. * returns full handle for a path
  52. */
  53. int
  54. xfs_find_handle(
  55. unsigned int cmd,
  56. xfs_fsop_handlereq_t *hreq)
  57. {
  58. int hsize;
  59. xfs_handle_t handle;
  60. struct inode *inode;
  61. struct fd f = {NULL};
  62. struct path path;
  63. int error;
  64. struct xfs_inode *ip;
  65. if (cmd == XFS_IOC_FD_TO_HANDLE) {
  66. f = fdget(hreq->fd);
  67. if (!f.file)
  68. return -EBADF;
  69. inode = file_inode(f.file);
  70. } else {
  71. error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
  72. if (error)
  73. return error;
  74. inode = d_inode(path.dentry);
  75. }
  76. ip = XFS_I(inode);
  77. /*
  78. * We can only generate handles for inodes residing on a XFS filesystem,
  79. * and only for regular files, directories or symbolic links.
  80. */
  81. error = -EINVAL;
  82. if (inode->i_sb->s_magic != XFS_SB_MAGIC)
  83. goto out_put;
  84. error = -EBADF;
  85. if (!S_ISREG(inode->i_mode) &&
  86. !S_ISDIR(inode->i_mode) &&
  87. !S_ISLNK(inode->i_mode))
  88. goto out_put;
  89. memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t));
  90. if (cmd == XFS_IOC_PATH_TO_FSHANDLE) {
  91. /*
  92. * This handle only contains an fsid, zero the rest.
  93. */
  94. memset(&handle.ha_fid, 0, sizeof(handle.ha_fid));
  95. hsize = sizeof(xfs_fsid_t);
  96. } else {
  97. handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
  98. sizeof(handle.ha_fid.fid_len);
  99. handle.ha_fid.fid_pad = 0;
  100. handle.ha_fid.fid_gen = inode->i_generation;
  101. handle.ha_fid.fid_ino = ip->i_ino;
  102. hsize = sizeof(xfs_handle_t);
  103. }
  104. error = -EFAULT;
  105. if (copy_to_user(hreq->ohandle, &handle, hsize) ||
  106. copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
  107. goto out_put;
  108. error = 0;
  109. out_put:
  110. if (cmd == XFS_IOC_FD_TO_HANDLE)
  111. fdput(f);
  112. else
  113. path_put(&path);
  114. return error;
  115. }
  116. /*
  117. * No need to do permission checks on the various pathname components
  118. * as the handle operations are privileged.
  119. */
  120. STATIC int
  121. xfs_handle_acceptable(
  122. void *context,
  123. struct dentry *dentry)
  124. {
  125. return 1;
  126. }
  127. /*
  128. * Convert userspace handle data into a dentry.
  129. */
  130. struct dentry *
  131. xfs_handle_to_dentry(
  132. struct file *parfilp,
  133. void __user *uhandle,
  134. u32 hlen)
  135. {
  136. xfs_handle_t handle;
  137. struct xfs_fid64 fid;
  138. /*
  139. * Only allow handle opens under a directory.
  140. */
  141. if (!S_ISDIR(file_inode(parfilp)->i_mode))
  142. return ERR_PTR(-ENOTDIR);
  143. if (hlen != sizeof(xfs_handle_t))
  144. return ERR_PTR(-EINVAL);
  145. if (copy_from_user(&handle, uhandle, hlen))
  146. return ERR_PTR(-EFAULT);
  147. if (handle.ha_fid.fid_len !=
  148. sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len))
  149. return ERR_PTR(-EINVAL);
  150. memset(&fid, 0, sizeof(struct fid));
  151. fid.ino = handle.ha_fid.fid_ino;
  152. fid.gen = handle.ha_fid.fid_gen;
  153. return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3,
  154. FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG,
  155. xfs_handle_acceptable, NULL);
  156. }
  157. STATIC struct dentry *
  158. xfs_handlereq_to_dentry(
  159. struct file *parfilp,
  160. xfs_fsop_handlereq_t *hreq)
  161. {
  162. return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen);
  163. }
  164. int
  165. xfs_open_by_handle(
  166. struct file *parfilp,
  167. xfs_fsop_handlereq_t *hreq)
  168. {
  169. const struct cred *cred = current_cred();
  170. int error;
  171. int fd;
  172. int permflag;
  173. struct file *filp;
  174. struct inode *inode;
  175. struct dentry *dentry;
  176. fmode_t fmode;
  177. struct path path;
  178. if (!capable(CAP_SYS_ADMIN))
  179. return -EPERM;
  180. dentry = xfs_handlereq_to_dentry(parfilp, hreq);
  181. if (IS_ERR(dentry))
  182. return PTR_ERR(dentry);
  183. inode = d_inode(dentry);
  184. /* Restrict xfs_open_by_handle to directories & regular files. */
  185. if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
  186. error = -EPERM;
  187. goto out_dput;
  188. }
  189. #if BITS_PER_LONG != 32
  190. hreq->oflags |= O_LARGEFILE;
  191. #endif
  192. permflag = hreq->oflags;
  193. fmode = OPEN_FMODE(permflag);
  194. if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
  195. (fmode & FMODE_WRITE) && IS_APPEND(inode)) {
  196. error = -EPERM;
  197. goto out_dput;
  198. }
  199. if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
  200. error = -EPERM;
  201. goto out_dput;
  202. }
  203. /* Can't write directories. */
  204. if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) {
  205. error = -EISDIR;
  206. goto out_dput;
  207. }
  208. fd = get_unused_fd_flags(0);
  209. if (fd < 0) {
  210. error = fd;
  211. goto out_dput;
  212. }
  213. path.mnt = parfilp->f_path.mnt;
  214. path.dentry = dentry;
  215. filp = dentry_open(&path, hreq->oflags, cred);
  216. dput(dentry);
  217. if (IS_ERR(filp)) {
  218. put_unused_fd(fd);
  219. return PTR_ERR(filp);
  220. }
  221. if (S_ISREG(inode->i_mode)) {
  222. filp->f_flags |= O_NOATIME;
  223. filp->f_mode |= FMODE_NOCMTIME;
  224. }
  225. fd_install(fd, filp);
  226. return fd;
  227. out_dput:
  228. dput(dentry);
  229. return error;
  230. }
  231. int
  232. xfs_readlink_by_handle(
  233. struct file *parfilp,
  234. xfs_fsop_handlereq_t *hreq)
  235. {
  236. struct dentry *dentry;
  237. __u32 olen;
  238. int error;
  239. if (!capable(CAP_SYS_ADMIN))
  240. return -EPERM;
  241. dentry = xfs_handlereq_to_dentry(parfilp, hreq);
  242. if (IS_ERR(dentry))
  243. return PTR_ERR(dentry);
  244. /* Restrict this handle operation to symlinks only. */
  245. if (!d_is_symlink(dentry)) {
  246. error = -EINVAL;
  247. goto out_dput;
  248. }
  249. if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
  250. error = -EFAULT;
  251. goto out_dput;
  252. }
  253. error = vfs_readlink(dentry, hreq->ohandle, olen);
  254. out_dput:
  255. dput(dentry);
  256. return error;
  257. }
  258. /*
  259. * Format an attribute and copy it out to the user's buffer.
  260. * Take care to check values and protect against them changing later,
  261. * we may be reading them directly out of a user buffer.
  262. */
  263. static void
  264. xfs_ioc_attr_put_listent(
  265. struct xfs_attr_list_context *context,
  266. int flags,
  267. unsigned char *name,
  268. int namelen,
  269. int valuelen)
  270. {
  271. struct xfs_attrlist *alist = context->buffer;
  272. struct xfs_attrlist_ent *aep;
  273. int arraytop;
  274. ASSERT(!context->seen_enough);
  275. ASSERT(context->count >= 0);
  276. ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
  277. ASSERT(context->firstu >= sizeof(*alist));
  278. ASSERT(context->firstu <= context->bufsize);
  279. /*
  280. * Only list entries in the right namespace.
  281. */
  282. if (context->attr_filter != (flags & XFS_ATTR_NSP_ONDISK_MASK))
  283. return;
  284. arraytop = sizeof(*alist) +
  285. context->count * sizeof(alist->al_offset[0]);
  286. /* decrement by the actual bytes used by the attr */
  287. context->firstu -= round_up(offsetof(struct xfs_attrlist_ent, a_name) +
  288. namelen + 1, sizeof(uint32_t));
  289. if (context->firstu < arraytop) {
  290. trace_xfs_attr_list_full(context);
  291. alist->al_more = 1;
  292. context->seen_enough = 1;
  293. return;
  294. }
  295. aep = context->buffer + context->firstu;
  296. aep->a_valuelen = valuelen;
  297. memcpy(aep->a_name, name, namelen);
  298. aep->a_name[namelen] = 0;
  299. alist->al_offset[context->count++] = context->firstu;
  300. alist->al_count = context->count;
  301. trace_xfs_attr_list_add(context);
  302. }
  303. static unsigned int
  304. xfs_attr_filter(
  305. u32 ioc_flags)
  306. {
  307. if (ioc_flags & XFS_IOC_ATTR_ROOT)
  308. return XFS_ATTR_ROOT;
  309. if (ioc_flags & XFS_IOC_ATTR_SECURE)
  310. return XFS_ATTR_SECURE;
  311. return 0;
  312. }
  313. static unsigned int
  314. xfs_attr_flags(
  315. u32 ioc_flags)
  316. {
  317. if (ioc_flags & XFS_IOC_ATTR_CREATE)
  318. return XATTR_CREATE;
  319. if (ioc_flags & XFS_IOC_ATTR_REPLACE)
  320. return XATTR_REPLACE;
  321. return 0;
  322. }
  323. int
  324. xfs_ioc_attr_list(
  325. struct xfs_inode *dp,
  326. void __user *ubuf,
  327. int bufsize,
  328. int flags,
  329. struct xfs_attrlist_cursor __user *ucursor)
  330. {
  331. struct xfs_attr_list_context context = { };
  332. struct xfs_attrlist *alist;
  333. void *buffer;
  334. int error;
  335. if (bufsize < sizeof(struct xfs_attrlist) ||
  336. bufsize > XFS_XATTR_LIST_MAX)
  337. return -EINVAL;
  338. /*
  339. * Reject flags, only allow namespaces.
  340. */
  341. if (flags & ~(XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
  342. return -EINVAL;
  343. if (flags == (XFS_IOC_ATTR_ROOT | XFS_IOC_ATTR_SECURE))
  344. return -EINVAL;
  345. /*
  346. * Validate the cursor.
  347. */
  348. if (copy_from_user(&context.cursor, ucursor, sizeof(context.cursor)))
  349. return -EFAULT;
  350. if (context.cursor.pad1 || context.cursor.pad2)
  351. return -EINVAL;
  352. if (!context.cursor.initted &&
  353. (context.cursor.hashval || context.cursor.blkno ||
  354. context.cursor.offset))
  355. return -EINVAL;
  356. buffer = kvzalloc(bufsize, GFP_KERNEL);
  357. if (!buffer)
  358. return -ENOMEM;
  359. /*
  360. * Initialize the output buffer.
  361. */
  362. context.dp = dp;
  363. context.resynch = 1;
  364. context.attr_filter = xfs_attr_filter(flags);
  365. context.buffer = buffer;
  366. context.bufsize = round_down(bufsize, sizeof(uint32_t));
  367. context.firstu = context.bufsize;
  368. context.put_listent = xfs_ioc_attr_put_listent;
  369. alist = context.buffer;
  370. alist->al_count = 0;
  371. alist->al_more = 0;
  372. alist->al_offset[0] = context.bufsize;
  373. error = xfs_attr_list(&context);
  374. if (error)
  375. goto out_free;
  376. if (copy_to_user(ubuf, buffer, bufsize) ||
  377. copy_to_user(ucursor, &context.cursor, sizeof(context.cursor)))
  378. error = -EFAULT;
  379. out_free:
  380. kmem_free(buffer);
  381. return error;
  382. }
  383. STATIC int
  384. xfs_attrlist_by_handle(
  385. struct file *parfilp,
  386. struct xfs_fsop_attrlist_handlereq __user *p)
  387. {
  388. struct xfs_fsop_attrlist_handlereq al_hreq;
  389. struct dentry *dentry;
  390. int error = -ENOMEM;
  391. if (!capable(CAP_SYS_ADMIN))
  392. return -EPERM;
  393. if (copy_from_user(&al_hreq, p, sizeof(al_hreq)))
  394. return -EFAULT;
  395. dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
  396. if (IS_ERR(dentry))
  397. return PTR_ERR(dentry);
  398. error = xfs_ioc_attr_list(XFS_I(d_inode(dentry)), al_hreq.buffer,
  399. al_hreq.buflen, al_hreq.flags, &p->pos);
  400. dput(dentry);
  401. return error;
  402. }
  403. static int
  404. xfs_attrmulti_attr_get(
  405. struct inode *inode,
  406. unsigned char *name,
  407. unsigned char __user *ubuf,
  408. uint32_t *len,
  409. uint32_t flags)
  410. {
  411. struct xfs_da_args args = {
  412. .dp = XFS_I(inode),
  413. .attr_filter = xfs_attr_filter(flags),
  414. .attr_flags = xfs_attr_flags(flags),
  415. .name = name,
  416. .namelen = strlen(name),
  417. .valuelen = *len,
  418. };
  419. int error;
  420. if (*len > XFS_XATTR_SIZE_MAX)
  421. return -EINVAL;
  422. error = xfs_attr_get(&args);
  423. if (error)
  424. goto out_kfree;
  425. *len = args.valuelen;
  426. if (copy_to_user(ubuf, args.value, args.valuelen))
  427. error = -EFAULT;
  428. out_kfree:
  429. kmem_free(args.value);
  430. return error;
  431. }
  432. static int
  433. xfs_attrmulti_attr_set(
  434. struct inode *inode,
  435. unsigned char *name,
  436. const unsigned char __user *ubuf,
  437. uint32_t len,
  438. uint32_t flags)
  439. {
  440. struct xfs_da_args args = {
  441. .dp = XFS_I(inode),
  442. .attr_filter = xfs_attr_filter(flags),
  443. .attr_flags = xfs_attr_flags(flags),
  444. .name = name,
  445. .namelen = strlen(name),
  446. };
  447. int error;
  448. if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
  449. return -EPERM;
  450. if (ubuf) {
  451. if (len > XFS_XATTR_SIZE_MAX)
  452. return -EINVAL;
  453. args.value = memdup_user(ubuf, len);
  454. if (IS_ERR(args.value))
  455. return PTR_ERR(args.value);
  456. args.valuelen = len;
  457. }
  458. error = xfs_attr_set(&args);
  459. if (!error && (flags & XFS_IOC_ATTR_ROOT))
  460. xfs_forget_acl(inode, name);
  461. kfree(args.value);
  462. return error;
  463. }
  464. int
  465. xfs_ioc_attrmulti_one(
  466. struct file *parfilp,
  467. struct inode *inode,
  468. uint32_t opcode,
  469. void __user *uname,
  470. void __user *value,
  471. uint32_t *len,
  472. uint32_t flags)
  473. {
  474. unsigned char *name;
  475. int error;
  476. if ((flags & XFS_IOC_ATTR_ROOT) && (flags & XFS_IOC_ATTR_SECURE))
  477. return -EINVAL;
  478. name = strndup_user(uname, MAXNAMELEN);
  479. if (IS_ERR(name))
  480. return PTR_ERR(name);
  481. switch (opcode) {
  482. case ATTR_OP_GET:
  483. error = xfs_attrmulti_attr_get(inode, name, value, len, flags);
  484. break;
  485. case ATTR_OP_REMOVE:
  486. value = NULL;
  487. *len = 0;
  488. /* fall through */
  489. case ATTR_OP_SET:
  490. error = mnt_want_write_file(parfilp);
  491. if (error)
  492. break;
  493. error = xfs_attrmulti_attr_set(inode, name, value, *len, flags);
  494. mnt_drop_write_file(parfilp);
  495. break;
  496. default:
  497. error = -EINVAL;
  498. break;
  499. }
  500. kfree(name);
  501. return error;
  502. }
  503. STATIC int
  504. xfs_attrmulti_by_handle(
  505. struct file *parfilp,
  506. void __user *arg)
  507. {
  508. int error;
  509. xfs_attr_multiop_t *ops;
  510. xfs_fsop_attrmulti_handlereq_t am_hreq;
  511. struct dentry *dentry;
  512. unsigned int i, size;
  513. if (!capable(CAP_SYS_ADMIN))
  514. return -EPERM;
  515. if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
  516. return -EFAULT;
  517. /* overflow check */
  518. if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t))
  519. return -E2BIG;
  520. dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
  521. if (IS_ERR(dentry))
  522. return PTR_ERR(dentry);
  523. error = -E2BIG;
  524. size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
  525. if (!size || size > 16 * PAGE_SIZE)
  526. goto out_dput;
  527. ops = memdup_user(am_hreq.ops, size);
  528. if (IS_ERR(ops)) {
  529. error = PTR_ERR(ops);
  530. goto out_dput;
  531. }
  532. error = 0;
  533. for (i = 0; i < am_hreq.opcount; i++) {
  534. ops[i].am_error = xfs_ioc_attrmulti_one(parfilp,
  535. d_inode(dentry), ops[i].am_opcode,
  536. ops[i].am_attrname, ops[i].am_attrvalue,
  537. &ops[i].am_length, ops[i].am_flags);
  538. }
  539. if (copy_to_user(am_hreq.ops, ops, size))
  540. error = -EFAULT;
  541. kfree(ops);
  542. out_dput:
  543. dput(dentry);
  544. return error;
  545. }
  546. int
  547. xfs_ioc_space(
  548. struct file *filp,
  549. xfs_flock64_t *bf)
  550. {
  551. struct inode *inode = file_inode(filp);
  552. struct xfs_inode *ip = XFS_I(inode);
  553. struct iattr iattr;
  554. enum xfs_prealloc_flags flags = XFS_PREALLOC_CLEAR;
  555. uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
  556. int error;
  557. if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
  558. return -EPERM;
  559. if (!(filp->f_mode & FMODE_WRITE))
  560. return -EBADF;
  561. if (!S_ISREG(inode->i_mode))
  562. return -EINVAL;
  563. if (xfs_is_always_cow_inode(ip))
  564. return -EOPNOTSUPP;
  565. if (filp->f_flags & O_DSYNC)
  566. flags |= XFS_PREALLOC_SYNC;
  567. if (filp->f_mode & FMODE_NOCMTIME)
  568. flags |= XFS_PREALLOC_INVISIBLE;
  569. error = mnt_want_write_file(filp);
  570. if (error)
  571. return error;
  572. xfs_ilock(ip, iolock);
  573. error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
  574. if (error)
  575. goto out_unlock;
  576. inode_dio_wait(inode);
  577. switch (bf->l_whence) {
  578. case 0: /*SEEK_SET*/
  579. break;
  580. case 1: /*SEEK_CUR*/
  581. bf->l_start += filp->f_pos;
  582. break;
  583. case 2: /*SEEK_END*/
  584. bf->l_start += XFS_ISIZE(ip);
  585. break;
  586. default:
  587. error = -EINVAL;
  588. goto out_unlock;
  589. }
  590. if (bf->l_start < 0 || bf->l_start > inode->i_sb->s_maxbytes) {
  591. error = -EINVAL;
  592. goto out_unlock;
  593. }
  594. if (bf->l_start > XFS_ISIZE(ip)) {
  595. error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
  596. bf->l_start - XFS_ISIZE(ip),
  597. XFS_BMAPI_PREALLOC);
  598. if (error)
  599. goto out_unlock;
  600. }
  601. iattr.ia_valid = ATTR_SIZE;
  602. iattr.ia_size = bf->l_start;
  603. error = xfs_vn_setattr_size(file_dentry(filp), &iattr);
  604. if (error)
  605. goto out_unlock;
  606. error = xfs_update_prealloc_flags(ip, flags);
  607. out_unlock:
  608. xfs_iunlock(ip, iolock);
  609. mnt_drop_write_file(filp);
  610. return error;
  611. }
  612. /* Return 0 on success or positive error */
  613. int
  614. xfs_fsbulkstat_one_fmt(
  615. struct xfs_ibulk *breq,
  616. const struct xfs_bulkstat *bstat)
  617. {
  618. struct xfs_bstat bs1;
  619. xfs_bulkstat_to_bstat(breq->mp, &bs1, bstat);
  620. if (copy_to_user(breq->ubuffer, &bs1, sizeof(bs1)))
  621. return -EFAULT;
  622. return xfs_ibulk_advance(breq, sizeof(struct xfs_bstat));
  623. }
  624. int
  625. xfs_fsinumbers_fmt(
  626. struct xfs_ibulk *breq,
  627. const struct xfs_inumbers *igrp)
  628. {
  629. struct xfs_inogrp ig1;
  630. xfs_inumbers_to_inogrp(&ig1, igrp);
  631. if (copy_to_user(breq->ubuffer, &ig1, sizeof(struct xfs_inogrp)))
  632. return -EFAULT;
  633. return xfs_ibulk_advance(breq, sizeof(struct xfs_inogrp));
  634. }
  635. STATIC int
  636. xfs_ioc_fsbulkstat(
  637. xfs_mount_t *mp,
  638. unsigned int cmd,
  639. void __user *arg)
  640. {
  641. struct xfs_fsop_bulkreq bulkreq;
  642. struct xfs_ibulk breq = {
  643. .mp = mp,
  644. .ocount = 0,
  645. };
  646. xfs_ino_t lastino;
  647. int error;
  648. /* done = 1 if there are more stats to get and if bulkstat */
  649. /* should be called again (unused here, but used in dmapi) */
  650. if (!capable(CAP_SYS_ADMIN))
  651. return -EPERM;
  652. if (XFS_FORCED_SHUTDOWN(mp))
  653. return -EIO;
  654. if (copy_from_user(&bulkreq, arg, sizeof(struct xfs_fsop_bulkreq)))
  655. return -EFAULT;
  656. if (copy_from_user(&lastino, bulkreq.lastip, sizeof(__s64)))
  657. return -EFAULT;
  658. if (bulkreq.icount <= 0)
  659. return -EINVAL;
  660. if (bulkreq.ubuffer == NULL)
  661. return -EINVAL;
  662. breq.ubuffer = bulkreq.ubuffer;
  663. breq.icount = bulkreq.icount;
  664. /*
  665. * FSBULKSTAT_SINGLE expects that *lastip contains the inode number
  666. * that we want to stat. However, FSINUMBERS and FSBULKSTAT expect
  667. * that *lastip contains either zero or the number of the last inode to
  668. * be examined by the previous call and return results starting with
  669. * the next inode after that. The new bulk request back end functions
  670. * take the inode to start with, so we have to compute the startino
  671. * parameter from lastino to maintain correct function. lastino == 0
  672. * is a special case because it has traditionally meant "first inode
  673. * in filesystem".
  674. */
  675. if (cmd == XFS_IOC_FSINUMBERS) {
  676. breq.startino = lastino ? lastino + 1 : 0;
  677. error = xfs_inumbers(&breq, xfs_fsinumbers_fmt);
  678. lastino = breq.startino - 1;
  679. } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) {
  680. breq.startino = lastino;
  681. breq.icount = 1;
  682. error = xfs_bulkstat_one(&breq, xfs_fsbulkstat_one_fmt);
  683. } else { /* XFS_IOC_FSBULKSTAT */
  684. breq.startino = lastino ? lastino + 1 : 0;
  685. error = xfs_bulkstat(&breq, xfs_fsbulkstat_one_fmt);
  686. lastino = breq.startino - 1;
  687. }
  688. if (error)
  689. return error;
  690. if (bulkreq.lastip != NULL &&
  691. copy_to_user(bulkreq.lastip, &lastino, sizeof(xfs_ino_t)))
  692. return -EFAULT;
  693. if (bulkreq.ocount != NULL &&
  694. copy_to_user(bulkreq.ocount, &breq.ocount, sizeof(__s32)))
  695. return -EFAULT;
  696. return 0;
  697. }
  698. /* Return 0 on success or positive error */
  699. static int
  700. xfs_bulkstat_fmt(
  701. struct xfs_ibulk *breq,
  702. const struct xfs_bulkstat *bstat)
  703. {
  704. if (copy_to_user(breq->ubuffer, bstat, sizeof(struct xfs_bulkstat)))
  705. return -EFAULT;
  706. return xfs_ibulk_advance(breq, sizeof(struct xfs_bulkstat));
  707. }
  708. /*
  709. * Check the incoming bulk request @hdr from userspace and initialize the
  710. * internal @breq bulk request appropriately. Returns 0 if the bulk request
  711. * should proceed; -ECANCELED if there's nothing to do; or the usual
  712. * negative error code.
  713. */
  714. static int
  715. xfs_bulk_ireq_setup(
  716. struct xfs_mount *mp,
  717. struct xfs_bulk_ireq *hdr,
  718. struct xfs_ibulk *breq,
  719. void __user *ubuffer)
  720. {
  721. if (hdr->icount == 0 ||
  722. (hdr->flags & ~XFS_BULK_IREQ_FLAGS_ALL) ||
  723. memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
  724. return -EINVAL;
  725. breq->startino = hdr->ino;
  726. breq->ubuffer = ubuffer;
  727. breq->icount = hdr->icount;
  728. breq->ocount = 0;
  729. breq->flags = 0;
  730. /*
  731. * The @ino parameter is a special value, so we must look it up here.
  732. * We're not allowed to have IREQ_AGNO, and we only return one inode
  733. * worth of data.
  734. */
  735. if (hdr->flags & XFS_BULK_IREQ_SPECIAL) {
  736. if (hdr->flags & XFS_BULK_IREQ_AGNO)
  737. return -EINVAL;
  738. switch (hdr->ino) {
  739. case XFS_BULK_IREQ_SPECIAL_ROOT:
  740. hdr->ino = mp->m_sb.sb_rootino;
  741. break;
  742. default:
  743. return -EINVAL;
  744. }
  745. breq->icount = 1;
  746. }
  747. /*
  748. * The IREQ_AGNO flag means that we only want results from a given AG.
  749. * If @hdr->ino is zero, we start iterating in that AG. If @hdr->ino is
  750. * beyond the specified AG then we return no results.
  751. */
  752. if (hdr->flags & XFS_BULK_IREQ_AGNO) {
  753. if (hdr->agno >= mp->m_sb.sb_agcount)
  754. return -EINVAL;
  755. if (breq->startino == 0)
  756. breq->startino = XFS_AGINO_TO_INO(mp, hdr->agno, 0);
  757. else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)
  758. return -EINVAL;
  759. breq->flags |= XFS_IBULK_SAME_AG;
  760. /* Asking for an inode past the end of the AG? We're done! */
  761. if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
  762. return -ECANCELED;
  763. } else if (hdr->agno)
  764. return -EINVAL;
  765. /* Asking for an inode past the end of the FS? We're done! */
  766. if (XFS_INO_TO_AGNO(mp, breq->startino) >= mp->m_sb.sb_agcount)
  767. return -ECANCELED;
  768. return 0;
  769. }
  770. /*
  771. * Update the userspace bulk request @hdr to reflect the end state of the
  772. * internal bulk request @breq.
  773. */
  774. static void
  775. xfs_bulk_ireq_teardown(
  776. struct xfs_bulk_ireq *hdr,
  777. struct xfs_ibulk *breq)
  778. {
  779. hdr->ino = breq->startino;
  780. hdr->ocount = breq->ocount;
  781. }
  782. /* Handle the v5 bulkstat ioctl. */
  783. STATIC int
  784. xfs_ioc_bulkstat(
  785. struct xfs_mount *mp,
  786. unsigned int cmd,
  787. struct xfs_bulkstat_req __user *arg)
  788. {
  789. struct xfs_bulk_ireq hdr;
  790. struct xfs_ibulk breq = {
  791. .mp = mp,
  792. };
  793. int error;
  794. if (!capable(CAP_SYS_ADMIN))
  795. return -EPERM;
  796. if (XFS_FORCED_SHUTDOWN(mp))
  797. return -EIO;
  798. if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
  799. return -EFAULT;
  800. error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->bulkstat);
  801. if (error == -ECANCELED)
  802. goto out_teardown;
  803. if (error < 0)
  804. return error;
  805. error = xfs_bulkstat(&breq, xfs_bulkstat_fmt);
  806. if (error)
  807. return error;
  808. out_teardown:
  809. xfs_bulk_ireq_teardown(&hdr, &breq);
  810. if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
  811. return -EFAULT;
  812. return 0;
  813. }
  814. STATIC int
  815. xfs_inumbers_fmt(
  816. struct xfs_ibulk *breq,
  817. const struct xfs_inumbers *igrp)
  818. {
  819. if (copy_to_user(breq->ubuffer, igrp, sizeof(struct xfs_inumbers)))
  820. return -EFAULT;
  821. return xfs_ibulk_advance(breq, sizeof(struct xfs_inumbers));
  822. }
  823. /* Handle the v5 inumbers ioctl. */
  824. STATIC int
  825. xfs_ioc_inumbers(
  826. struct xfs_mount *mp,
  827. unsigned int cmd,
  828. struct xfs_inumbers_req __user *arg)
  829. {
  830. struct xfs_bulk_ireq hdr;
  831. struct xfs_ibulk breq = {
  832. .mp = mp,
  833. };
  834. int error;
  835. if (!capable(CAP_SYS_ADMIN))
  836. return -EPERM;
  837. if (XFS_FORCED_SHUTDOWN(mp))
  838. return -EIO;
  839. if (copy_from_user(&hdr, &arg->hdr, sizeof(hdr)))
  840. return -EFAULT;
  841. error = xfs_bulk_ireq_setup(mp, &hdr, &breq, arg->inumbers);
  842. if (error == -ECANCELED)
  843. goto out_teardown;
  844. if (error < 0)
  845. return error;
  846. error = xfs_inumbers(&breq, xfs_inumbers_fmt);
  847. if (error)
  848. return error;
  849. out_teardown:
  850. xfs_bulk_ireq_teardown(&hdr, &breq);
  851. if (copy_to_user(&arg->hdr, &hdr, sizeof(hdr)))
  852. return -EFAULT;
  853. return 0;
  854. }
  855. STATIC int
  856. xfs_ioc_fsgeometry(
  857. struct xfs_mount *mp,
  858. void __user *arg,
  859. int struct_version)
  860. {
  861. struct xfs_fsop_geom fsgeo;
  862. size_t len;
  863. xfs_fs_geometry(&mp->m_sb, &fsgeo, struct_version);
  864. if (struct_version <= 3)
  865. len = sizeof(struct xfs_fsop_geom_v1);
  866. else if (struct_version == 4)
  867. len = sizeof(struct xfs_fsop_geom_v4);
  868. else {
  869. xfs_fsop_geom_health(mp, &fsgeo);
  870. len = sizeof(fsgeo);
  871. }
  872. if (copy_to_user(arg, &fsgeo, len))
  873. return -EFAULT;
  874. return 0;
  875. }
  876. STATIC int
  877. xfs_ioc_ag_geometry(
  878. struct xfs_mount *mp,
  879. void __user *arg)
  880. {
  881. struct xfs_ag_geometry ageo;
  882. int error;
  883. if (copy_from_user(&ageo, arg, sizeof(ageo)))
  884. return -EFAULT;
  885. if (ageo.ag_flags)
  886. return -EINVAL;
  887. if (memchr_inv(&ageo.ag_reserved, 0, sizeof(ageo.ag_reserved)))
  888. return -EINVAL;
  889. error = xfs_ag_get_geometry(mp, ageo.ag_number, &ageo);
  890. if (error)
  891. return error;
  892. if (copy_to_user(arg, &ageo, sizeof(ageo)))
  893. return -EFAULT;
  894. return 0;
  895. }
  896. /*
  897. * Linux extended inode flags interface.
  898. */
  899. STATIC unsigned int
  900. xfs_merge_ioc_xflags(
  901. unsigned int flags,
  902. unsigned int start)
  903. {
  904. unsigned int xflags = start;
  905. if (flags & FS_IMMUTABLE_FL)
  906. xflags |= FS_XFLAG_IMMUTABLE;
  907. else
  908. xflags &= ~FS_XFLAG_IMMUTABLE;
  909. if (flags & FS_APPEND_FL)
  910. xflags |= FS_XFLAG_APPEND;
  911. else
  912. xflags &= ~FS_XFLAG_APPEND;
  913. if (flags & FS_SYNC_FL)
  914. xflags |= FS_XFLAG_SYNC;
  915. else
  916. xflags &= ~FS_XFLAG_SYNC;
  917. if (flags & FS_NOATIME_FL)
  918. xflags |= FS_XFLAG_NOATIME;
  919. else
  920. xflags &= ~FS_XFLAG_NOATIME;
  921. if (flags & FS_NODUMP_FL)
  922. xflags |= FS_XFLAG_NODUMP;
  923. else
  924. xflags &= ~FS_XFLAG_NODUMP;
  925. if (flags & FS_DAX_FL)
  926. xflags |= FS_XFLAG_DAX;
  927. else
  928. xflags &= ~FS_XFLAG_DAX;
  929. return xflags;
  930. }
  931. STATIC unsigned int
  932. xfs_di2lxflags(
  933. uint16_t di_flags,
  934. uint64_t di_flags2)
  935. {
  936. unsigned int flags = 0;
  937. if (di_flags & XFS_DIFLAG_IMMUTABLE)
  938. flags |= FS_IMMUTABLE_FL;
  939. if (di_flags & XFS_DIFLAG_APPEND)
  940. flags |= FS_APPEND_FL;
  941. if (di_flags & XFS_DIFLAG_SYNC)
  942. flags |= FS_SYNC_FL;
  943. if (di_flags & XFS_DIFLAG_NOATIME)
  944. flags |= FS_NOATIME_FL;
  945. if (di_flags & XFS_DIFLAG_NODUMP)
  946. flags |= FS_NODUMP_FL;
  947. if (di_flags2 & XFS_DIFLAG2_DAX) {
  948. flags |= FS_DAX_FL;
  949. }
  950. return flags;
  951. }
  952. static void
  953. xfs_fill_fsxattr(
  954. struct xfs_inode *ip,
  955. bool attr,
  956. struct fsxattr *fa)
  957. {
  958. struct xfs_ifork *ifp = attr ? ip->i_afp : &ip->i_df;
  959. simple_fill_fsxattr(fa, xfs_ip2xflags(ip));
  960. fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
  961. fa->fsx_cowextsize = ip->i_d.di_cowextsize <<
  962. ip->i_mount->m_sb.sb_blocklog;
  963. fa->fsx_projid = ip->i_d.di_projid;
  964. if (ifp && (ifp->if_flags & XFS_IFEXTENTS))
  965. fa->fsx_nextents = xfs_iext_count(ifp);
  966. else
  967. fa->fsx_nextents = xfs_ifork_nextents(ifp);
  968. }
  969. STATIC int
  970. xfs_ioc_fsgetxattr(
  971. xfs_inode_t *ip,
  972. int attr,
  973. void __user *arg)
  974. {
  975. struct fsxattr fa;
  976. xfs_ilock(ip, XFS_ILOCK_SHARED);
  977. xfs_fill_fsxattr(ip, attr, &fa);
  978. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  979. if (copy_to_user(arg, &fa, sizeof(fa)))
  980. return -EFAULT;
  981. return 0;
  982. }
  983. STATIC uint16_t
  984. xfs_flags2diflags(
  985. struct xfs_inode *ip,
  986. unsigned int xflags)
  987. {
  988. /* can't set PREALLOC this way, just preserve it */
  989. uint16_t di_flags =
  990. (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
  991. if (xflags & FS_XFLAG_IMMUTABLE)
  992. di_flags |= XFS_DIFLAG_IMMUTABLE;
  993. if (xflags & FS_XFLAG_APPEND)
  994. di_flags |= XFS_DIFLAG_APPEND;
  995. if (xflags & FS_XFLAG_SYNC)
  996. di_flags |= XFS_DIFLAG_SYNC;
  997. if (xflags & FS_XFLAG_NOATIME)
  998. di_flags |= XFS_DIFLAG_NOATIME;
  999. if (xflags & FS_XFLAG_NODUMP)
  1000. di_flags |= XFS_DIFLAG_NODUMP;
  1001. if (xflags & FS_XFLAG_NODEFRAG)
  1002. di_flags |= XFS_DIFLAG_NODEFRAG;
  1003. if (xflags & FS_XFLAG_FILESTREAM)
  1004. di_flags |= XFS_DIFLAG_FILESTREAM;
  1005. if (S_ISDIR(VFS_I(ip)->i_mode)) {
  1006. if (xflags & FS_XFLAG_RTINHERIT)
  1007. di_flags |= XFS_DIFLAG_RTINHERIT;
  1008. if (xflags & FS_XFLAG_NOSYMLINKS)
  1009. di_flags |= XFS_DIFLAG_NOSYMLINKS;
  1010. if (xflags & FS_XFLAG_EXTSZINHERIT)
  1011. di_flags |= XFS_DIFLAG_EXTSZINHERIT;
  1012. if (xflags & FS_XFLAG_PROJINHERIT)
  1013. di_flags |= XFS_DIFLAG_PROJINHERIT;
  1014. } else if (S_ISREG(VFS_I(ip)->i_mode)) {
  1015. if (xflags & FS_XFLAG_REALTIME)
  1016. di_flags |= XFS_DIFLAG_REALTIME;
  1017. if (xflags & FS_XFLAG_EXTSIZE)
  1018. di_flags |= XFS_DIFLAG_EXTSIZE;
  1019. }
  1020. return di_flags;
  1021. }
  1022. STATIC uint64_t
  1023. xfs_flags2diflags2(
  1024. struct xfs_inode *ip,
  1025. unsigned int xflags)
  1026. {
  1027. uint64_t di_flags2 =
  1028. (ip->i_d.di_flags2 & (XFS_DIFLAG2_REFLINK |
  1029. XFS_DIFLAG2_BIGTIME));
  1030. if (xflags & FS_XFLAG_DAX)
  1031. di_flags2 |= XFS_DIFLAG2_DAX;
  1032. if (xflags & FS_XFLAG_COWEXTSIZE)
  1033. di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
  1034. return di_flags2;
  1035. }
  1036. static int
  1037. xfs_ioctl_setattr_xflags(
  1038. struct xfs_trans *tp,
  1039. struct xfs_inode *ip,
  1040. struct fsxattr *fa)
  1041. {
  1042. struct xfs_mount *mp = ip->i_mount;
  1043. uint64_t di_flags2;
  1044. /* Can't change realtime flag if any extents are allocated. */
  1045. if ((ip->i_df.if_nextents || ip->i_delayed_blks) &&
  1046. XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME))
  1047. return -EINVAL;
  1048. /* If realtime flag is set then must have realtime device */
  1049. if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
  1050. if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
  1051. (ip->i_d.di_extsize % mp->m_sb.sb_rextsize))
  1052. return -EINVAL;
  1053. }
  1054. /* Clear reflink if we are actually able to set the rt flag. */
  1055. if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip))
  1056. ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
  1057. /* Don't allow us to set DAX mode for a reflinked file for now. */
  1058. if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip))
  1059. return -EINVAL;
  1060. /* diflags2 only valid for v3 inodes. */
  1061. di_flags2 = xfs_flags2diflags2(ip, fa->fsx_xflags);
  1062. if (di_flags2 && !xfs_sb_version_has_v3inode(&mp->m_sb))
  1063. return -EINVAL;
  1064. ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
  1065. ip->i_d.di_flags2 = di_flags2;
  1066. xfs_diflags_to_iflags(ip, false);
  1067. xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
  1068. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1069. XFS_STATS_INC(mp, xs_ig_attrchg);
  1070. return 0;
  1071. }
  1072. static void
  1073. xfs_ioctl_setattr_prepare_dax(
  1074. struct xfs_inode *ip,
  1075. struct fsxattr *fa)
  1076. {
  1077. struct xfs_mount *mp = ip->i_mount;
  1078. struct inode *inode = VFS_I(ip);
  1079. if (S_ISDIR(inode->i_mode))
  1080. return;
  1081. if ((mp->m_flags & XFS_MOUNT_DAX_ALWAYS) ||
  1082. (mp->m_flags & XFS_MOUNT_DAX_NEVER))
  1083. return;
  1084. if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
  1085. !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) ||
  1086. (!(fa->fsx_xflags & FS_XFLAG_DAX) &&
  1087. (ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)))
  1088. d_mark_dontcache(inode);
  1089. }
  1090. /*
  1091. * Set up the transaction structure for the setattr operation, checking that we
  1092. * have permission to do so. On success, return a clean transaction and the
  1093. * inode locked exclusively ready for further operation specific checks. On
  1094. * failure, return an error without modifying or locking the inode.
  1095. */
  1096. static struct xfs_trans *
  1097. xfs_ioctl_setattr_get_trans(
  1098. struct xfs_inode *ip)
  1099. {
  1100. struct xfs_mount *mp = ip->i_mount;
  1101. struct xfs_trans *tp;
  1102. int error = -EROFS;
  1103. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1104. goto out_unlock;
  1105. error = -EIO;
  1106. if (XFS_FORCED_SHUTDOWN(mp))
  1107. goto out_unlock;
  1108. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
  1109. if (error)
  1110. goto out_unlock;
  1111. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1112. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  1113. /*
  1114. * CAP_FOWNER overrides the following restrictions:
  1115. *
  1116. * The user ID of the calling process must be equal to the file owner
  1117. * ID, except in cases where the CAP_FSETID capability is applicable.
  1118. */
  1119. if (!inode_owner_or_capable(VFS_I(ip))) {
  1120. error = -EPERM;
  1121. goto out_cancel;
  1122. }
  1123. if (mp->m_flags & XFS_MOUNT_WSYNC)
  1124. xfs_trans_set_sync(tp);
  1125. return tp;
  1126. out_cancel:
  1127. xfs_trans_cancel(tp);
  1128. out_unlock:
  1129. return ERR_PTR(error);
  1130. }
  1131. /*
  1132. * extent size hint validation is somewhat cumbersome. Rules are:
  1133. *
  1134. * 1. extent size hint is only valid for directories and regular files
  1135. * 2. FS_XFLAG_EXTSIZE is only valid for regular files
  1136. * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories.
  1137. * 4. can only be changed on regular files if no extents are allocated
  1138. * 5. can be changed on directories at any time
  1139. * 6. extsize hint of 0 turns off hints, clears inode flags.
  1140. * 7. Extent size must be a multiple of the appropriate block size.
  1141. * 8. for non-realtime files, the extent size hint must be limited
  1142. * to half the AG size to avoid alignment extending the extent beyond the
  1143. * limits of the AG.
  1144. *
  1145. * Please keep this function in sync with xfs_scrub_inode_extsize.
  1146. */
  1147. static int
  1148. xfs_ioctl_setattr_check_extsize(
  1149. struct xfs_inode *ip,
  1150. struct fsxattr *fa)
  1151. {
  1152. struct xfs_mount *mp = ip->i_mount;
  1153. xfs_extlen_t size;
  1154. xfs_fsblock_t extsize_fsb;
  1155. if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents &&
  1156. ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
  1157. return -EINVAL;
  1158. if (fa->fsx_extsize == 0)
  1159. return 0;
  1160. extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
  1161. if (extsize_fsb > MAXEXTLEN)
  1162. return -EINVAL;
  1163. if (XFS_IS_REALTIME_INODE(ip) ||
  1164. (fa->fsx_xflags & FS_XFLAG_REALTIME)) {
  1165. size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
  1166. } else {
  1167. size = mp->m_sb.sb_blocksize;
  1168. if (extsize_fsb > mp->m_sb.sb_agblocks / 2)
  1169. return -EINVAL;
  1170. }
  1171. if (fa->fsx_extsize % size)
  1172. return -EINVAL;
  1173. return 0;
  1174. }
  1175. /*
  1176. * CoW extent size hint validation rules are:
  1177. *
  1178. * 1. CoW extent size hint can only be set if reflink is enabled on the fs.
  1179. * The inode does not have to have any shared blocks, but it must be a v3.
  1180. * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files;
  1181. * for a directory, the hint is propagated to new files.
  1182. * 3. Can be changed on files & directories at any time.
  1183. * 4. CoW extsize hint of 0 turns off hints, clears inode flags.
  1184. * 5. Extent size must be a multiple of the appropriate block size.
  1185. * 6. The extent size hint must be limited to half the AG size to avoid
  1186. * alignment extending the extent beyond the limits of the AG.
  1187. *
  1188. * Please keep this function in sync with xfs_scrub_inode_cowextsize.
  1189. */
  1190. static int
  1191. xfs_ioctl_setattr_check_cowextsize(
  1192. struct xfs_inode *ip,
  1193. struct fsxattr *fa)
  1194. {
  1195. struct xfs_mount *mp = ip->i_mount;
  1196. xfs_extlen_t size;
  1197. xfs_fsblock_t cowextsize_fsb;
  1198. if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE))
  1199. return 0;
  1200. if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb))
  1201. return -EINVAL;
  1202. if (fa->fsx_cowextsize == 0)
  1203. return 0;
  1204. cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize);
  1205. if (cowextsize_fsb > MAXEXTLEN)
  1206. return -EINVAL;
  1207. size = mp->m_sb.sb_blocksize;
  1208. if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2)
  1209. return -EINVAL;
  1210. if (fa->fsx_cowextsize % size)
  1211. return -EINVAL;
  1212. return 0;
  1213. }
  1214. static int
  1215. xfs_ioctl_setattr_check_projid(
  1216. struct xfs_inode *ip,
  1217. struct fsxattr *fa)
  1218. {
  1219. /* Disallow 32bit project ids if projid32bit feature is not enabled. */
  1220. if (fa->fsx_projid > (uint16_t)-1 &&
  1221. !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb))
  1222. return -EINVAL;
  1223. return 0;
  1224. }
  1225. STATIC int
  1226. xfs_ioctl_setattr(
  1227. xfs_inode_t *ip,
  1228. struct fsxattr *fa)
  1229. {
  1230. struct fsxattr old_fa;
  1231. struct xfs_mount *mp = ip->i_mount;
  1232. struct xfs_trans *tp;
  1233. struct xfs_dquot *pdqp = NULL;
  1234. struct xfs_dquot *olddquot = NULL;
  1235. int code;
  1236. trace_xfs_ioctl_setattr(ip);
  1237. code = xfs_ioctl_setattr_check_projid(ip, fa);
  1238. if (code)
  1239. return code;
  1240. /*
  1241. * If disk quotas is on, we make sure that the dquots do exist on disk,
  1242. * before we start any other transactions. Trying to do this later
  1243. * is messy. We don't care to take a readlock to look at the ids
  1244. * in inode here, because we can't hold it across the trans_reserve.
  1245. * If the IDs do change before we take the ilock, we're covered
  1246. * because the i_*dquot fields will get updated anyway.
  1247. */
  1248. if (XFS_IS_QUOTA_ON(mp)) {
  1249. code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
  1250. VFS_I(ip)->i_gid, fa->fsx_projid,
  1251. XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp);
  1252. if (code)
  1253. return code;
  1254. }
  1255. xfs_ioctl_setattr_prepare_dax(ip, fa);
  1256. tp = xfs_ioctl_setattr_get_trans(ip);
  1257. if (IS_ERR(tp)) {
  1258. code = PTR_ERR(tp);
  1259. goto error_free_dquots;
  1260. }
  1261. if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
  1262. ip->i_d.di_projid != fa->fsx_projid) {
  1263. code = xfs_qm_vop_chown_reserve(tp, ip, NULL, NULL, pdqp,
  1264. capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
  1265. if (code) /* out of quota */
  1266. goto error_trans_cancel;
  1267. }
  1268. xfs_fill_fsxattr(ip, false, &old_fa);
  1269. code = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, fa);
  1270. if (code)
  1271. goto error_trans_cancel;
  1272. code = xfs_ioctl_setattr_check_extsize(ip, fa);
  1273. if (code)
  1274. goto error_trans_cancel;
  1275. code = xfs_ioctl_setattr_check_cowextsize(ip, fa);
  1276. if (code)
  1277. goto error_trans_cancel;
  1278. code = xfs_ioctl_setattr_xflags(tp, ip, fa);
  1279. if (code)
  1280. goto error_trans_cancel;
  1281. /*
  1282. * Change file ownership. Must be the owner or privileged. CAP_FSETID
  1283. * overrides the following restrictions:
  1284. *
  1285. * The set-user-ID and set-group-ID bits of a file will be cleared upon
  1286. * successful return from chown()
  1287. */
  1288. if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
  1289. !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
  1290. VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
  1291. /* Change the ownerships and register project quota modifications */
  1292. if (ip->i_d.di_projid != fa->fsx_projid) {
  1293. if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
  1294. olddquot = xfs_qm_vop_chown(tp, ip,
  1295. &ip->i_pdquot, pdqp);
  1296. }
  1297. ip->i_d.di_projid = fa->fsx_projid;
  1298. }
  1299. /*
  1300. * Only set the extent size hint if we've already determined that the
  1301. * extent size hint should be set on the inode. If no extent size flags
  1302. * are set on the inode then unconditionally clear the extent size hint.
  1303. */
  1304. if (ip->i_d.di_flags & (XFS_DIFLAG_EXTSIZE | XFS_DIFLAG_EXTSZINHERIT))
  1305. ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog;
  1306. else
  1307. ip->i_d.di_extsize = 0;
  1308. if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
  1309. (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
  1310. ip->i_d.di_cowextsize = fa->fsx_cowextsize >>
  1311. mp->m_sb.sb_blocklog;
  1312. else
  1313. ip->i_d.di_cowextsize = 0;
  1314. code = xfs_trans_commit(tp);
  1315. /*
  1316. * Release any dquot(s) the inode had kept before chown.
  1317. */
  1318. xfs_qm_dqrele(olddquot);
  1319. xfs_qm_dqrele(pdqp);
  1320. return code;
  1321. error_trans_cancel:
  1322. xfs_trans_cancel(tp);
  1323. error_free_dquots:
  1324. xfs_qm_dqrele(pdqp);
  1325. return code;
  1326. }
  1327. STATIC int
  1328. xfs_ioc_fssetxattr(
  1329. xfs_inode_t *ip,
  1330. struct file *filp,
  1331. void __user *arg)
  1332. {
  1333. struct fsxattr fa;
  1334. int error;
  1335. if (copy_from_user(&fa, arg, sizeof(fa)))
  1336. return -EFAULT;
  1337. error = mnt_want_write_file(filp);
  1338. if (error)
  1339. return error;
  1340. error = xfs_ioctl_setattr(ip, &fa);
  1341. mnt_drop_write_file(filp);
  1342. return error;
  1343. }
  1344. STATIC int
  1345. xfs_ioc_getxflags(
  1346. xfs_inode_t *ip,
  1347. void __user *arg)
  1348. {
  1349. unsigned int flags;
  1350. flags = xfs_di2lxflags(ip->i_d.di_flags, ip->i_d.di_flags2);
  1351. if (copy_to_user(arg, &flags, sizeof(flags)))
  1352. return -EFAULT;
  1353. return 0;
  1354. }
  1355. STATIC int
  1356. xfs_ioc_setxflags(
  1357. struct xfs_inode *ip,
  1358. struct file *filp,
  1359. void __user *arg)
  1360. {
  1361. struct xfs_trans *tp;
  1362. struct fsxattr fa;
  1363. struct fsxattr old_fa;
  1364. unsigned int flags;
  1365. int error;
  1366. if (copy_from_user(&flags, arg, sizeof(flags)))
  1367. return -EFAULT;
  1368. if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
  1369. FS_NOATIME_FL | FS_NODUMP_FL | \
  1370. FS_SYNC_FL | FS_DAX_FL))
  1371. return -EOPNOTSUPP;
  1372. fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
  1373. error = mnt_want_write_file(filp);
  1374. if (error)
  1375. return error;
  1376. xfs_ioctl_setattr_prepare_dax(ip, &fa);
  1377. tp = xfs_ioctl_setattr_get_trans(ip);
  1378. if (IS_ERR(tp)) {
  1379. error = PTR_ERR(tp);
  1380. goto out_drop_write;
  1381. }
  1382. xfs_fill_fsxattr(ip, false, &old_fa);
  1383. error = vfs_ioc_fssetxattr_check(VFS_I(ip), &old_fa, &fa);
  1384. if (error) {
  1385. xfs_trans_cancel(tp);
  1386. goto out_drop_write;
  1387. }
  1388. error = xfs_ioctl_setattr_xflags(tp, ip, &fa);
  1389. if (error) {
  1390. xfs_trans_cancel(tp);
  1391. goto out_drop_write;
  1392. }
  1393. error = xfs_trans_commit(tp);
  1394. out_drop_write:
  1395. mnt_drop_write_file(filp);
  1396. return error;
  1397. }
  1398. static bool
  1399. xfs_getbmap_format(
  1400. struct kgetbmap *p,
  1401. struct getbmapx __user *u,
  1402. size_t recsize)
  1403. {
  1404. if (put_user(p->bmv_offset, &u->bmv_offset) ||
  1405. put_user(p->bmv_block, &u->bmv_block) ||
  1406. put_user(p->bmv_length, &u->bmv_length) ||
  1407. put_user(0, &u->bmv_count) ||
  1408. put_user(0, &u->bmv_entries))
  1409. return false;
  1410. if (recsize < sizeof(struct getbmapx))
  1411. return true;
  1412. if (put_user(0, &u->bmv_iflags) ||
  1413. put_user(p->bmv_oflags, &u->bmv_oflags) ||
  1414. put_user(0, &u->bmv_unused1) ||
  1415. put_user(0, &u->bmv_unused2))
  1416. return false;
  1417. return true;
  1418. }
  1419. STATIC int
  1420. xfs_ioc_getbmap(
  1421. struct file *file,
  1422. unsigned int cmd,
  1423. void __user *arg)
  1424. {
  1425. struct getbmapx bmx = { 0 };
  1426. struct kgetbmap *buf;
  1427. size_t recsize;
  1428. int error, i;
  1429. switch (cmd) {
  1430. case XFS_IOC_GETBMAPA:
  1431. bmx.bmv_iflags = BMV_IF_ATTRFORK;
  1432. /*FALLTHRU*/
  1433. case XFS_IOC_GETBMAP:
  1434. if (file->f_mode & FMODE_NOCMTIME)
  1435. bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
  1436. /* struct getbmap is a strict subset of struct getbmapx. */
  1437. recsize = sizeof(struct getbmap);
  1438. break;
  1439. case XFS_IOC_GETBMAPX:
  1440. recsize = sizeof(struct getbmapx);
  1441. break;
  1442. default:
  1443. return -EINVAL;
  1444. }
  1445. if (copy_from_user(&bmx, arg, recsize))
  1446. return -EFAULT;
  1447. if (bmx.bmv_count < 2)
  1448. return -EINVAL;
  1449. if (bmx.bmv_count > ULONG_MAX / recsize)
  1450. return -ENOMEM;
  1451. buf = kvzalloc(bmx.bmv_count * sizeof(*buf), GFP_KERNEL);
  1452. if (!buf)
  1453. return -ENOMEM;
  1454. error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, buf);
  1455. if (error)
  1456. goto out_free_buf;
  1457. error = -EFAULT;
  1458. if (copy_to_user(arg, &bmx, recsize))
  1459. goto out_free_buf;
  1460. arg += recsize;
  1461. for (i = 0; i < bmx.bmv_entries; i++) {
  1462. if (!xfs_getbmap_format(buf + i, arg, recsize))
  1463. goto out_free_buf;
  1464. arg += recsize;
  1465. }
  1466. error = 0;
  1467. out_free_buf:
  1468. kmem_free(buf);
  1469. return error;
  1470. }
  1471. STATIC int
  1472. xfs_ioc_getfsmap(
  1473. struct xfs_inode *ip,
  1474. struct fsmap_head __user *arg)
  1475. {
  1476. struct xfs_fsmap_head xhead = {0};
  1477. struct fsmap_head head;
  1478. struct fsmap *recs;
  1479. unsigned int count;
  1480. __u32 last_flags = 0;
  1481. bool done = false;
  1482. int error;
  1483. if (copy_from_user(&head, arg, sizeof(struct fsmap_head)))
  1484. return -EFAULT;
  1485. if (memchr_inv(head.fmh_reserved, 0, sizeof(head.fmh_reserved)) ||
  1486. memchr_inv(head.fmh_keys[0].fmr_reserved, 0,
  1487. sizeof(head.fmh_keys[0].fmr_reserved)) ||
  1488. memchr_inv(head.fmh_keys[1].fmr_reserved, 0,
  1489. sizeof(head.fmh_keys[1].fmr_reserved)))
  1490. return -EINVAL;
  1491. /*
  1492. * Use an internal memory buffer so that we don't have to copy fsmap
  1493. * data to userspace while holding locks. Start by trying to allocate
  1494. * up to 128k for the buffer, but fall back to a single page if needed.
  1495. */
  1496. count = min_t(unsigned int, head.fmh_count,
  1497. 131072 / sizeof(struct fsmap));
  1498. recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
  1499. if (!recs) {
  1500. count = min_t(unsigned int, head.fmh_count,
  1501. PAGE_SIZE / sizeof(struct fsmap));
  1502. recs = kvzalloc(count * sizeof(struct fsmap), GFP_KERNEL);
  1503. if (!recs)
  1504. return -ENOMEM;
  1505. }
  1506. xhead.fmh_iflags = head.fmh_iflags;
  1507. xfs_fsmap_to_internal(&xhead.fmh_keys[0], &head.fmh_keys[0]);
  1508. xfs_fsmap_to_internal(&xhead.fmh_keys[1], &head.fmh_keys[1]);
  1509. trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
  1510. trace_xfs_getfsmap_high_key(ip->i_mount, &xhead.fmh_keys[1]);
  1511. head.fmh_entries = 0;
  1512. do {
  1513. struct fsmap __user *user_recs;
  1514. struct fsmap *last_rec;
  1515. user_recs = &arg->fmh_recs[head.fmh_entries];
  1516. xhead.fmh_entries = 0;
  1517. xhead.fmh_count = min_t(unsigned int, count,
  1518. head.fmh_count - head.fmh_entries);
  1519. /* Run query, record how many entries we got. */
  1520. error = xfs_getfsmap(ip->i_mount, &xhead, recs);
  1521. switch (error) {
  1522. case 0:
  1523. /*
  1524. * There are no more records in the result set. Copy
  1525. * whatever we got to userspace and break out.
  1526. */
  1527. done = true;
  1528. break;
  1529. case -ECANCELED:
  1530. /*
  1531. * The internal memory buffer is full. Copy whatever
  1532. * records we got to userspace and go again if we have
  1533. * not yet filled the userspace buffer.
  1534. */
  1535. error = 0;
  1536. break;
  1537. default:
  1538. goto out_free;
  1539. }
  1540. head.fmh_entries += xhead.fmh_entries;
  1541. head.fmh_oflags = xhead.fmh_oflags;
  1542. /*
  1543. * If the caller wanted a record count or there aren't any
  1544. * new records to return, we're done.
  1545. */
  1546. if (head.fmh_count == 0 || xhead.fmh_entries == 0)
  1547. break;
  1548. /* Copy all the records we got out to userspace. */
  1549. if (copy_to_user(user_recs, recs,
  1550. xhead.fmh_entries * sizeof(struct fsmap))) {
  1551. error = -EFAULT;
  1552. goto out_free;
  1553. }
  1554. /* Remember the last record flags we copied to userspace. */
  1555. last_rec = &recs[xhead.fmh_entries - 1];
  1556. last_flags = last_rec->fmr_flags;
  1557. /* Set up the low key for the next iteration. */
  1558. xfs_fsmap_to_internal(&xhead.fmh_keys[0], last_rec);
  1559. trace_xfs_getfsmap_low_key(ip->i_mount, &xhead.fmh_keys[0]);
  1560. } while (!done && head.fmh_entries < head.fmh_count);
  1561. /*
  1562. * If there are no more records in the query result set and we're not
  1563. * in counting mode, mark the last record returned with the LAST flag.
  1564. */
  1565. if (done && head.fmh_count > 0 && head.fmh_entries > 0) {
  1566. struct fsmap __user *user_rec;
  1567. last_flags |= FMR_OF_LAST;
  1568. user_rec = &arg->fmh_recs[head.fmh_entries - 1];
  1569. if (copy_to_user(&user_rec->fmr_flags, &last_flags,
  1570. sizeof(last_flags))) {
  1571. error = -EFAULT;
  1572. goto out_free;
  1573. }
  1574. }
  1575. /* copy back header */
  1576. if (copy_to_user(arg, &head, sizeof(struct fsmap_head))) {
  1577. error = -EFAULT;
  1578. goto out_free;
  1579. }
  1580. out_free:
  1581. kmem_free(recs);
  1582. return error;
  1583. }
  1584. STATIC int
  1585. xfs_ioc_scrub_metadata(
  1586. struct xfs_inode *ip,
  1587. void __user *arg)
  1588. {
  1589. struct xfs_scrub_metadata scrub;
  1590. int error;
  1591. if (!capable(CAP_SYS_ADMIN))
  1592. return -EPERM;
  1593. if (copy_from_user(&scrub, arg, sizeof(scrub)))
  1594. return -EFAULT;
  1595. error = xfs_scrub_metadata(ip, &scrub);
  1596. if (error)
  1597. return error;
  1598. if (copy_to_user(arg, &scrub, sizeof(scrub)))
  1599. return -EFAULT;
  1600. return 0;
  1601. }
  1602. int
  1603. xfs_ioc_swapext(
  1604. xfs_swapext_t *sxp)
  1605. {
  1606. xfs_inode_t *ip, *tip;
  1607. struct fd f, tmp;
  1608. int error = 0;
  1609. /* Pull information for the target fd */
  1610. f = fdget((int)sxp->sx_fdtarget);
  1611. if (!f.file) {
  1612. error = -EINVAL;
  1613. goto out;
  1614. }
  1615. if (!(f.file->f_mode & FMODE_WRITE) ||
  1616. !(f.file->f_mode & FMODE_READ) ||
  1617. (f.file->f_flags & O_APPEND)) {
  1618. error = -EBADF;
  1619. goto out_put_file;
  1620. }
  1621. tmp = fdget((int)sxp->sx_fdtmp);
  1622. if (!tmp.file) {
  1623. error = -EINVAL;
  1624. goto out_put_file;
  1625. }
  1626. if (!(tmp.file->f_mode & FMODE_WRITE) ||
  1627. !(tmp.file->f_mode & FMODE_READ) ||
  1628. (tmp.file->f_flags & O_APPEND)) {
  1629. error = -EBADF;
  1630. goto out_put_tmp_file;
  1631. }
  1632. if (IS_SWAPFILE(file_inode(f.file)) ||
  1633. IS_SWAPFILE(file_inode(tmp.file))) {
  1634. error = -EINVAL;
  1635. goto out_put_tmp_file;
  1636. }
  1637. /*
  1638. * We need to ensure that the fds passed in point to XFS inodes
  1639. * before we cast and access them as XFS structures as we have no
  1640. * control over what the user passes us here.
  1641. */
  1642. if (f.file->f_op != &xfs_file_operations ||
  1643. tmp.file->f_op != &xfs_file_operations) {
  1644. error = -EINVAL;
  1645. goto out_put_tmp_file;
  1646. }
  1647. ip = XFS_I(file_inode(f.file));
  1648. tip = XFS_I(file_inode(tmp.file));
  1649. if (ip->i_mount != tip->i_mount) {
  1650. error = -EINVAL;
  1651. goto out_put_tmp_file;
  1652. }
  1653. if (ip->i_ino == tip->i_ino) {
  1654. error = -EINVAL;
  1655. goto out_put_tmp_file;
  1656. }
  1657. if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  1658. error = -EIO;
  1659. goto out_put_tmp_file;
  1660. }
  1661. error = xfs_swap_extents(ip, tip, sxp);
  1662. out_put_tmp_file:
  1663. fdput(tmp);
  1664. out_put_file:
  1665. fdput(f);
  1666. out:
  1667. return error;
  1668. }
  1669. static int
  1670. xfs_ioc_getlabel(
  1671. struct xfs_mount *mp,
  1672. char __user *user_label)
  1673. {
  1674. struct xfs_sb *sbp = &mp->m_sb;
  1675. char label[XFSLABEL_MAX + 1];
  1676. /* Paranoia */
  1677. BUILD_BUG_ON(sizeof(sbp->sb_fname) > FSLABEL_MAX);
  1678. /* 1 larger than sb_fname, so this ensures a trailing NUL char */
  1679. memset(label, 0, sizeof(label));
  1680. spin_lock(&mp->m_sb_lock);
  1681. strncpy(label, sbp->sb_fname, XFSLABEL_MAX);
  1682. spin_unlock(&mp->m_sb_lock);
  1683. if (copy_to_user(user_label, label, sizeof(label)))
  1684. return -EFAULT;
  1685. return 0;
  1686. }
  1687. static int
  1688. xfs_ioc_setlabel(
  1689. struct file *filp,
  1690. struct xfs_mount *mp,
  1691. char __user *newlabel)
  1692. {
  1693. struct xfs_sb *sbp = &mp->m_sb;
  1694. char label[XFSLABEL_MAX + 1];
  1695. size_t len;
  1696. int error;
  1697. if (!capable(CAP_SYS_ADMIN))
  1698. return -EPERM;
  1699. /*
  1700. * The generic ioctl allows up to FSLABEL_MAX chars, but XFS is much
  1701. * smaller, at 12 bytes. We copy one more to be sure we find the
  1702. * (required) NULL character to test the incoming label length.
  1703. * NB: The on disk label doesn't need to be null terminated.
  1704. */
  1705. if (copy_from_user(label, newlabel, XFSLABEL_MAX + 1))
  1706. return -EFAULT;
  1707. len = strnlen(label, XFSLABEL_MAX + 1);
  1708. if (len > sizeof(sbp->sb_fname))
  1709. return -EINVAL;
  1710. error = mnt_want_write_file(filp);
  1711. if (error)
  1712. return error;
  1713. spin_lock(&mp->m_sb_lock);
  1714. memset(sbp->sb_fname, 0, sizeof(sbp->sb_fname));
  1715. memcpy(sbp->sb_fname, label, len);
  1716. spin_unlock(&mp->m_sb_lock);
  1717. /*
  1718. * Now we do several things to satisfy userspace.
  1719. * In addition to normal logging of the primary superblock, we also
  1720. * immediately write these changes to sector zero for the primary, then
  1721. * update all backup supers (as xfs_db does for a label change), then
  1722. * invalidate the block device page cache. This is so that any prior
  1723. * buffered reads from userspace (i.e. from blkid) are invalidated,
  1724. * and userspace will see the newly-written label.
  1725. */
  1726. error = xfs_sync_sb_buf(mp);
  1727. if (error)
  1728. goto out;
  1729. /*
  1730. * growfs also updates backup supers so lock against that.
  1731. */
  1732. mutex_lock(&mp->m_growlock);
  1733. error = xfs_update_secondary_sbs(mp);
  1734. mutex_unlock(&mp->m_growlock);
  1735. invalidate_bdev(mp->m_ddev_targp->bt_bdev);
  1736. out:
  1737. mnt_drop_write_file(filp);
  1738. return error;
  1739. }
  1740. static inline int
  1741. xfs_fs_eofblocks_from_user(
  1742. struct xfs_fs_eofblocks *src,
  1743. struct xfs_eofblocks *dst)
  1744. {
  1745. if (src->eof_version != XFS_EOFBLOCKS_VERSION)
  1746. return -EINVAL;
  1747. if (src->eof_flags & ~XFS_EOF_FLAGS_VALID)
  1748. return -EINVAL;
  1749. if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) ||
  1750. memchr_inv(src->pad64, 0, sizeof(src->pad64)))
  1751. return -EINVAL;
  1752. dst->eof_flags = src->eof_flags;
  1753. dst->eof_prid = src->eof_prid;
  1754. dst->eof_min_file_size = src->eof_min_file_size;
  1755. dst->eof_uid = INVALID_UID;
  1756. if (src->eof_flags & XFS_EOF_FLAGS_UID) {
  1757. dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid);
  1758. if (!uid_valid(dst->eof_uid))
  1759. return -EINVAL;
  1760. }
  1761. dst->eof_gid = INVALID_GID;
  1762. if (src->eof_flags & XFS_EOF_FLAGS_GID) {
  1763. dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid);
  1764. if (!gid_valid(dst->eof_gid))
  1765. return -EINVAL;
  1766. }
  1767. return 0;
  1768. }
  1769. /*
  1770. * Note: some of the ioctl's return positive numbers as a
  1771. * byte count indicating success, such as readlink_by_handle.
  1772. * So we don't "sign flip" like most other routines. This means
  1773. * true errors need to be returned as a negative value.
  1774. */
  1775. long
  1776. xfs_file_ioctl(
  1777. struct file *filp,
  1778. unsigned int cmd,
  1779. unsigned long p)
  1780. {
  1781. struct inode *inode = file_inode(filp);
  1782. struct xfs_inode *ip = XFS_I(inode);
  1783. struct xfs_mount *mp = ip->i_mount;
  1784. void __user *arg = (void __user *)p;
  1785. int error;
  1786. trace_xfs_file_ioctl(ip);
  1787. switch (cmd) {
  1788. case FITRIM:
  1789. return xfs_ioc_trim(mp, arg);
  1790. case FS_IOC_GETFSLABEL:
  1791. return xfs_ioc_getlabel(mp, arg);
  1792. case FS_IOC_SETFSLABEL:
  1793. return xfs_ioc_setlabel(filp, mp, arg);
  1794. case XFS_IOC_ALLOCSP:
  1795. case XFS_IOC_FREESP:
  1796. case XFS_IOC_ALLOCSP64:
  1797. case XFS_IOC_FREESP64: {
  1798. xfs_flock64_t bf;
  1799. if (copy_from_user(&bf, arg, sizeof(bf)))
  1800. return -EFAULT;
  1801. return xfs_ioc_space(filp, &bf);
  1802. }
  1803. case XFS_IOC_DIOINFO: {
  1804. struct xfs_buftarg *target = xfs_inode_buftarg(ip);
  1805. struct dioattr da;
  1806. da.d_mem = da.d_miniosz = target->bt_logical_sectorsize;
  1807. da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
  1808. if (copy_to_user(arg, &da, sizeof(da)))
  1809. return -EFAULT;
  1810. return 0;
  1811. }
  1812. case XFS_IOC_FSBULKSTAT_SINGLE:
  1813. case XFS_IOC_FSBULKSTAT:
  1814. case XFS_IOC_FSINUMBERS:
  1815. return xfs_ioc_fsbulkstat(mp, cmd, arg);
  1816. case XFS_IOC_BULKSTAT:
  1817. return xfs_ioc_bulkstat(mp, cmd, arg);
  1818. case XFS_IOC_INUMBERS:
  1819. return xfs_ioc_inumbers(mp, cmd, arg);
  1820. case XFS_IOC_FSGEOMETRY_V1:
  1821. return xfs_ioc_fsgeometry(mp, arg, 3);
  1822. case XFS_IOC_FSGEOMETRY_V4:
  1823. return xfs_ioc_fsgeometry(mp, arg, 4);
  1824. case XFS_IOC_FSGEOMETRY:
  1825. return xfs_ioc_fsgeometry(mp, arg, 5);
  1826. case XFS_IOC_AG_GEOMETRY:
  1827. return xfs_ioc_ag_geometry(mp, arg);
  1828. case XFS_IOC_GETVERSION:
  1829. return put_user(inode->i_generation, (int __user *)arg);
  1830. case XFS_IOC_FSGETXATTR:
  1831. return xfs_ioc_fsgetxattr(ip, 0, arg);
  1832. case XFS_IOC_FSGETXATTRA:
  1833. return xfs_ioc_fsgetxattr(ip, 1, arg);
  1834. case XFS_IOC_FSSETXATTR:
  1835. return xfs_ioc_fssetxattr(ip, filp, arg);
  1836. case XFS_IOC_GETXFLAGS:
  1837. return xfs_ioc_getxflags(ip, arg);
  1838. case XFS_IOC_SETXFLAGS:
  1839. return xfs_ioc_setxflags(ip, filp, arg);
  1840. case XFS_IOC_GETBMAP:
  1841. case XFS_IOC_GETBMAPA:
  1842. case XFS_IOC_GETBMAPX:
  1843. return xfs_ioc_getbmap(filp, cmd, arg);
  1844. case FS_IOC_GETFSMAP:
  1845. return xfs_ioc_getfsmap(ip, arg);
  1846. case XFS_IOC_SCRUB_METADATA:
  1847. return xfs_ioc_scrub_metadata(ip, arg);
  1848. case XFS_IOC_FD_TO_HANDLE:
  1849. case XFS_IOC_PATH_TO_HANDLE:
  1850. case XFS_IOC_PATH_TO_FSHANDLE: {
  1851. xfs_fsop_handlereq_t hreq;
  1852. if (copy_from_user(&hreq, arg, sizeof(hreq)))
  1853. return -EFAULT;
  1854. return xfs_find_handle(cmd, &hreq);
  1855. }
  1856. case XFS_IOC_OPEN_BY_HANDLE: {
  1857. xfs_fsop_handlereq_t hreq;
  1858. if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
  1859. return -EFAULT;
  1860. return xfs_open_by_handle(filp, &hreq);
  1861. }
  1862. case XFS_IOC_READLINK_BY_HANDLE: {
  1863. xfs_fsop_handlereq_t hreq;
  1864. if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
  1865. return -EFAULT;
  1866. return xfs_readlink_by_handle(filp, &hreq);
  1867. }
  1868. case XFS_IOC_ATTRLIST_BY_HANDLE:
  1869. return xfs_attrlist_by_handle(filp, arg);
  1870. case XFS_IOC_ATTRMULTI_BY_HANDLE:
  1871. return xfs_attrmulti_by_handle(filp, arg);
  1872. case XFS_IOC_SWAPEXT: {
  1873. struct xfs_swapext sxp;
  1874. if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
  1875. return -EFAULT;
  1876. error = mnt_want_write_file(filp);
  1877. if (error)
  1878. return error;
  1879. error = xfs_ioc_swapext(&sxp);
  1880. mnt_drop_write_file(filp);
  1881. return error;
  1882. }
  1883. case XFS_IOC_FSCOUNTS: {
  1884. xfs_fsop_counts_t out;
  1885. xfs_fs_counts(mp, &out);
  1886. if (copy_to_user(arg, &out, sizeof(out)))
  1887. return -EFAULT;
  1888. return 0;
  1889. }
  1890. case XFS_IOC_SET_RESBLKS: {
  1891. xfs_fsop_resblks_t inout;
  1892. uint64_t in;
  1893. if (!capable(CAP_SYS_ADMIN))
  1894. return -EPERM;
  1895. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1896. return -EROFS;
  1897. if (copy_from_user(&inout, arg, sizeof(inout)))
  1898. return -EFAULT;
  1899. error = mnt_want_write_file(filp);
  1900. if (error)
  1901. return error;
  1902. /* input parameter is passed in resblks field of structure */
  1903. in = inout.resblks;
  1904. error = xfs_reserve_blocks(mp, &in, &inout);
  1905. mnt_drop_write_file(filp);
  1906. if (error)
  1907. return error;
  1908. if (copy_to_user(arg, &inout, sizeof(inout)))
  1909. return -EFAULT;
  1910. return 0;
  1911. }
  1912. case XFS_IOC_GET_RESBLKS: {
  1913. xfs_fsop_resblks_t out;
  1914. if (!capable(CAP_SYS_ADMIN))
  1915. return -EPERM;
  1916. error = xfs_reserve_blocks(mp, NULL, &out);
  1917. if (error)
  1918. return error;
  1919. if (copy_to_user(arg, &out, sizeof(out)))
  1920. return -EFAULT;
  1921. return 0;
  1922. }
  1923. case XFS_IOC_FSGROWFSDATA: {
  1924. xfs_growfs_data_t in;
  1925. if (copy_from_user(&in, arg, sizeof(in)))
  1926. return -EFAULT;
  1927. error = mnt_want_write_file(filp);
  1928. if (error)
  1929. return error;
  1930. error = xfs_growfs_data(mp, &in);
  1931. mnt_drop_write_file(filp);
  1932. return error;
  1933. }
  1934. case XFS_IOC_FSGROWFSLOG: {
  1935. xfs_growfs_log_t in;
  1936. if (copy_from_user(&in, arg, sizeof(in)))
  1937. return -EFAULT;
  1938. error = mnt_want_write_file(filp);
  1939. if (error)
  1940. return error;
  1941. error = xfs_growfs_log(mp, &in);
  1942. mnt_drop_write_file(filp);
  1943. return error;
  1944. }
  1945. case XFS_IOC_FSGROWFSRT: {
  1946. xfs_growfs_rt_t in;
  1947. if (copy_from_user(&in, arg, sizeof(in)))
  1948. return -EFAULT;
  1949. error = mnt_want_write_file(filp);
  1950. if (error)
  1951. return error;
  1952. error = xfs_growfs_rt(mp, &in);
  1953. mnt_drop_write_file(filp);
  1954. return error;
  1955. }
  1956. case XFS_IOC_GOINGDOWN: {
  1957. uint32_t in;
  1958. if (!capable(CAP_SYS_ADMIN))
  1959. return -EPERM;
  1960. if (get_user(in, (uint32_t __user *)arg))
  1961. return -EFAULT;
  1962. return xfs_fs_goingdown(mp, in);
  1963. }
  1964. case XFS_IOC_ERROR_INJECTION: {
  1965. xfs_error_injection_t in;
  1966. if (!capable(CAP_SYS_ADMIN))
  1967. return -EPERM;
  1968. if (copy_from_user(&in, arg, sizeof(in)))
  1969. return -EFAULT;
  1970. return xfs_errortag_add(mp, in.errtag);
  1971. }
  1972. case XFS_IOC_ERROR_CLEARALL:
  1973. if (!capable(CAP_SYS_ADMIN))
  1974. return -EPERM;
  1975. return xfs_errortag_clearall(mp);
  1976. case XFS_IOC_FREE_EOFBLOCKS: {
  1977. struct xfs_fs_eofblocks eofb;
  1978. struct xfs_eofblocks keofb;
  1979. if (!capable(CAP_SYS_ADMIN))
  1980. return -EPERM;
  1981. if (mp->m_flags & XFS_MOUNT_RDONLY)
  1982. return -EROFS;
  1983. if (copy_from_user(&eofb, arg, sizeof(eofb)))
  1984. return -EFAULT;
  1985. error = xfs_fs_eofblocks_from_user(&eofb, &keofb);
  1986. if (error)
  1987. return error;
  1988. sb_start_write(mp->m_super);
  1989. error = xfs_icache_free_eofblocks(mp, &keofb);
  1990. sb_end_write(mp->m_super);
  1991. return error;
  1992. }
  1993. default:
  1994. return -ENOTTY;
  1995. }
  1996. }