xfs_attr_list.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4. * Copyright (c) 2013 Red Hat, Inc.
  5. * All Rights Reserved.
  6. */
  7. #include "xfs.h"
  8. #include "xfs_fs.h"
  9. #include "xfs_shared.h"
  10. #include "xfs_format.h"
  11. #include "xfs_log_format.h"
  12. #include "xfs_trans_resv.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_da_format.h"
  15. #include "xfs_inode.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_bmap.h"
  18. #include "xfs_attr.h"
  19. #include "xfs_attr_sf.h"
  20. #include "xfs_attr_leaf.h"
  21. #include "xfs_error.h"
  22. #include "xfs_trace.h"
  23. #include "xfs_dir2.h"
  24. STATIC int
  25. xfs_attr_shortform_compare(const void *a, const void *b)
  26. {
  27. xfs_attr_sf_sort_t *sa, *sb;
  28. sa = (xfs_attr_sf_sort_t *)a;
  29. sb = (xfs_attr_sf_sort_t *)b;
  30. if (sa->hash < sb->hash) {
  31. return -1;
  32. } else if (sa->hash > sb->hash) {
  33. return 1;
  34. } else {
  35. return sa->entno - sb->entno;
  36. }
  37. }
  38. #define XFS_ISRESET_CURSOR(cursor) \
  39. (!((cursor)->initted) && !((cursor)->hashval) && \
  40. !((cursor)->blkno) && !((cursor)->offset))
  41. /*
  42. * Copy out entries of shortform attribute lists for attr_list().
  43. * Shortform attribute lists are not stored in hashval sorted order.
  44. * If the output buffer is not large enough to hold them all, then
  45. * we have to calculate each entries' hashvalue and sort them before
  46. * we can begin returning them to the user.
  47. */
  48. static int
  49. xfs_attr_shortform_list(
  50. struct xfs_attr_list_context *context)
  51. {
  52. struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
  53. struct xfs_inode *dp = context->dp;
  54. struct xfs_attr_sf_sort *sbuf, *sbp;
  55. struct xfs_attr_shortform *sf;
  56. struct xfs_attr_sf_entry *sfe;
  57. int sbsize, nsbuf, count, i;
  58. int error = 0;
  59. ASSERT(dp->i_afp != NULL);
  60. sf = (struct xfs_attr_shortform *)dp->i_afp->if_u1.if_data;
  61. ASSERT(sf != NULL);
  62. if (!sf->hdr.count)
  63. return 0;
  64. trace_xfs_attr_list_sf(context);
  65. /*
  66. * If the buffer is large enough and the cursor is at the start,
  67. * do not bother with sorting since we will return everything in
  68. * one buffer and another call using the cursor won't need to be
  69. * made.
  70. * Note the generous fudge factor of 16 overhead bytes per entry.
  71. * If bufsize is zero then put_listent must be a search function
  72. * and can just scan through what we have.
  73. */
  74. if (context->bufsize == 0 ||
  75. (XFS_ISRESET_CURSOR(cursor) &&
  76. (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
  77. for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
  78. if (XFS_IS_CORRUPT(context->dp->i_mount,
  79. !xfs_attr_namecheck(sfe->nameval,
  80. sfe->namelen)))
  81. return -EFSCORRUPTED;
  82. context->put_listent(context,
  83. sfe->flags,
  84. sfe->nameval,
  85. (int)sfe->namelen,
  86. (int)sfe->valuelen);
  87. /*
  88. * Either search callback finished early or
  89. * didn't fit it all in the buffer after all.
  90. */
  91. if (context->seen_enough)
  92. break;
  93. sfe = xfs_attr_sf_nextentry(sfe);
  94. }
  95. trace_xfs_attr_list_sf_all(context);
  96. return 0;
  97. }
  98. /* do no more for a search callback */
  99. if (context->bufsize == 0)
  100. return 0;
  101. /*
  102. * It didn't all fit, so we have to sort everything on hashval.
  103. */
  104. sbsize = sf->hdr.count * sizeof(*sbuf);
  105. sbp = sbuf = kmem_alloc(sbsize, KM_NOFS);
  106. /*
  107. * Scan the attribute list for the rest of the entries, storing
  108. * the relevant info from only those that match into a buffer.
  109. */
  110. nsbuf = 0;
  111. for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
  112. if (unlikely(
  113. ((char *)sfe < (char *)sf) ||
  114. ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
  115. XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
  116. XFS_ERRLEVEL_LOW,
  117. context->dp->i_mount, sfe,
  118. sizeof(*sfe));
  119. kmem_free(sbuf);
  120. return -EFSCORRUPTED;
  121. }
  122. sbp->entno = i;
  123. sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
  124. sbp->name = sfe->nameval;
  125. sbp->namelen = sfe->namelen;
  126. /* These are bytes, and both on-disk, don't endian-flip */
  127. sbp->valuelen = sfe->valuelen;
  128. sbp->flags = sfe->flags;
  129. sfe = xfs_attr_sf_nextentry(sfe);
  130. sbp++;
  131. nsbuf++;
  132. }
  133. /*
  134. * Sort the entries on hash then entno.
  135. */
  136. xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
  137. /*
  138. * Re-find our place IN THE SORTED LIST.
  139. */
  140. count = 0;
  141. cursor->initted = 1;
  142. cursor->blkno = 0;
  143. for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
  144. if (sbp->hash == cursor->hashval) {
  145. if (cursor->offset == count) {
  146. break;
  147. }
  148. count++;
  149. } else if (sbp->hash > cursor->hashval) {
  150. break;
  151. }
  152. }
  153. if (i == nsbuf)
  154. goto out;
  155. /*
  156. * Loop putting entries into the user buffer.
  157. */
  158. for ( ; i < nsbuf; i++, sbp++) {
  159. if (cursor->hashval != sbp->hash) {
  160. cursor->hashval = sbp->hash;
  161. cursor->offset = 0;
  162. }
  163. if (XFS_IS_CORRUPT(context->dp->i_mount,
  164. !xfs_attr_namecheck(sbp->name,
  165. sbp->namelen))) {
  166. error = -EFSCORRUPTED;
  167. goto out;
  168. }
  169. context->put_listent(context,
  170. sbp->flags,
  171. sbp->name,
  172. sbp->namelen,
  173. sbp->valuelen);
  174. if (context->seen_enough)
  175. break;
  176. cursor->offset++;
  177. }
  178. out:
  179. kmem_free(sbuf);
  180. return error;
  181. }
  182. /*
  183. * We didn't find the block & hash mentioned in the cursor state, so
  184. * walk down the attr btree looking for the hash.
  185. */
  186. STATIC int
  187. xfs_attr_node_list_lookup(
  188. struct xfs_attr_list_context *context,
  189. struct xfs_attrlist_cursor_kern *cursor,
  190. struct xfs_buf **pbp)
  191. {
  192. struct xfs_da3_icnode_hdr nodehdr;
  193. struct xfs_da_intnode *node;
  194. struct xfs_da_node_entry *btree;
  195. struct xfs_inode *dp = context->dp;
  196. struct xfs_mount *mp = dp->i_mount;
  197. struct xfs_trans *tp = context->tp;
  198. struct xfs_buf *bp;
  199. int i;
  200. int error = 0;
  201. unsigned int expected_level = 0;
  202. uint16_t magic;
  203. ASSERT(*pbp == NULL);
  204. cursor->blkno = 0;
  205. for (;;) {
  206. error = xfs_da3_node_read(tp, dp, cursor->blkno, &bp,
  207. XFS_ATTR_FORK);
  208. if (error)
  209. return error;
  210. node = bp->b_addr;
  211. magic = be16_to_cpu(node->hdr.info.magic);
  212. if (magic == XFS_ATTR_LEAF_MAGIC ||
  213. magic == XFS_ATTR3_LEAF_MAGIC)
  214. break;
  215. if (magic != XFS_DA_NODE_MAGIC &&
  216. magic != XFS_DA3_NODE_MAGIC) {
  217. XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
  218. node, sizeof(*node));
  219. goto out_corruptbuf;
  220. }
  221. xfs_da3_node_hdr_from_disk(mp, &nodehdr, node);
  222. /* Tree taller than we can handle; bail out! */
  223. if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
  224. goto out_corruptbuf;
  225. /* Check the level from the root node. */
  226. if (cursor->blkno == 0)
  227. expected_level = nodehdr.level - 1;
  228. else if (expected_level != nodehdr.level)
  229. goto out_corruptbuf;
  230. else
  231. expected_level--;
  232. btree = nodehdr.btree;
  233. for (i = 0; i < nodehdr.count; btree++, i++) {
  234. if (cursor->hashval <= be32_to_cpu(btree->hashval)) {
  235. cursor->blkno = be32_to_cpu(btree->before);
  236. trace_xfs_attr_list_node_descend(context,
  237. btree);
  238. break;
  239. }
  240. }
  241. xfs_trans_brelse(tp, bp);
  242. if (i == nodehdr.count)
  243. return 0;
  244. /* We can't point back to the root. */
  245. if (XFS_IS_CORRUPT(mp, cursor->blkno == 0))
  246. return -EFSCORRUPTED;
  247. }
  248. if (expected_level != 0)
  249. goto out_corruptbuf;
  250. *pbp = bp;
  251. return 0;
  252. out_corruptbuf:
  253. xfs_buf_mark_corrupt(bp);
  254. xfs_trans_brelse(tp, bp);
  255. return -EFSCORRUPTED;
  256. }
  257. STATIC int
  258. xfs_attr_node_list(
  259. struct xfs_attr_list_context *context)
  260. {
  261. struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
  262. struct xfs_attr3_icleaf_hdr leafhdr;
  263. struct xfs_attr_leafblock *leaf;
  264. struct xfs_da_intnode *node;
  265. struct xfs_buf *bp;
  266. struct xfs_inode *dp = context->dp;
  267. struct xfs_mount *mp = dp->i_mount;
  268. int error = 0;
  269. trace_xfs_attr_node_list(context);
  270. cursor->initted = 1;
  271. /*
  272. * Do all sorts of validation on the passed-in cursor structure.
  273. * If anything is amiss, ignore the cursor and look up the hashval
  274. * starting from the btree root.
  275. */
  276. bp = NULL;
  277. if (cursor->blkno > 0) {
  278. error = xfs_da3_node_read(context->tp, dp, cursor->blkno, &bp,
  279. XFS_ATTR_FORK);
  280. if ((error != 0) && (error != -EFSCORRUPTED))
  281. return error;
  282. if (bp) {
  283. struct xfs_attr_leaf_entry *entries;
  284. node = bp->b_addr;
  285. switch (be16_to_cpu(node->hdr.info.magic)) {
  286. case XFS_DA_NODE_MAGIC:
  287. case XFS_DA3_NODE_MAGIC:
  288. trace_xfs_attr_list_wrong_blk(context);
  289. xfs_trans_brelse(context->tp, bp);
  290. bp = NULL;
  291. break;
  292. case XFS_ATTR_LEAF_MAGIC:
  293. case XFS_ATTR3_LEAF_MAGIC:
  294. leaf = bp->b_addr;
  295. xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo,
  296. &leafhdr, leaf);
  297. entries = xfs_attr3_leaf_entryp(leaf);
  298. if (cursor->hashval > be32_to_cpu(
  299. entries[leafhdr.count - 1].hashval)) {
  300. trace_xfs_attr_list_wrong_blk(context);
  301. xfs_trans_brelse(context->tp, bp);
  302. bp = NULL;
  303. } else if (cursor->hashval <= be32_to_cpu(
  304. entries[0].hashval)) {
  305. trace_xfs_attr_list_wrong_blk(context);
  306. xfs_trans_brelse(context->tp, bp);
  307. bp = NULL;
  308. }
  309. break;
  310. default:
  311. trace_xfs_attr_list_wrong_blk(context);
  312. xfs_trans_brelse(context->tp, bp);
  313. bp = NULL;
  314. }
  315. }
  316. }
  317. /*
  318. * We did not find what we expected given the cursor's contents,
  319. * so we start from the top and work down based on the hash value.
  320. * Note that start of node block is same as start of leaf block.
  321. */
  322. if (bp == NULL) {
  323. error = xfs_attr_node_list_lookup(context, cursor, &bp);
  324. if (error || !bp)
  325. return error;
  326. }
  327. ASSERT(bp != NULL);
  328. /*
  329. * Roll upward through the blocks, processing each leaf block in
  330. * order. As long as there is space in the result buffer, keep
  331. * adding the information.
  332. */
  333. for (;;) {
  334. leaf = bp->b_addr;
  335. error = xfs_attr3_leaf_list_int(bp, context);
  336. if (error)
  337. break;
  338. xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
  339. if (context->seen_enough || leafhdr.forw == 0)
  340. break;
  341. cursor->blkno = leafhdr.forw;
  342. xfs_trans_brelse(context->tp, bp);
  343. error = xfs_attr3_leaf_read(context->tp, dp, cursor->blkno,
  344. &bp);
  345. if (error)
  346. return error;
  347. }
  348. xfs_trans_brelse(context->tp, bp);
  349. return error;
  350. }
  351. /*
  352. * Copy out attribute list entries for attr_list(), for leaf attribute lists.
  353. */
  354. int
  355. xfs_attr3_leaf_list_int(
  356. struct xfs_buf *bp,
  357. struct xfs_attr_list_context *context)
  358. {
  359. struct xfs_attrlist_cursor_kern *cursor = &context->cursor;
  360. struct xfs_attr_leafblock *leaf;
  361. struct xfs_attr3_icleaf_hdr ichdr;
  362. struct xfs_attr_leaf_entry *entries;
  363. struct xfs_attr_leaf_entry *entry;
  364. int i;
  365. struct xfs_mount *mp = context->dp->i_mount;
  366. trace_xfs_attr_list_leaf(context);
  367. leaf = bp->b_addr;
  368. xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
  369. entries = xfs_attr3_leaf_entryp(leaf);
  370. cursor->initted = 1;
  371. /*
  372. * Re-find our place in the leaf block if this is a new syscall.
  373. */
  374. if (context->resynch) {
  375. entry = &entries[0];
  376. for (i = 0; i < ichdr.count; entry++, i++) {
  377. if (be32_to_cpu(entry->hashval) == cursor->hashval) {
  378. if (cursor->offset == context->dupcnt) {
  379. context->dupcnt = 0;
  380. break;
  381. }
  382. context->dupcnt++;
  383. } else if (be32_to_cpu(entry->hashval) >
  384. cursor->hashval) {
  385. context->dupcnt = 0;
  386. break;
  387. }
  388. }
  389. if (i == ichdr.count) {
  390. trace_xfs_attr_list_notfound(context);
  391. return 0;
  392. }
  393. } else {
  394. entry = &entries[0];
  395. i = 0;
  396. }
  397. context->resynch = 0;
  398. /*
  399. * We have found our place, start copying out the new attributes.
  400. */
  401. for (; i < ichdr.count; entry++, i++) {
  402. char *name;
  403. int namelen, valuelen;
  404. if (be32_to_cpu(entry->hashval) != cursor->hashval) {
  405. cursor->hashval = be32_to_cpu(entry->hashval);
  406. cursor->offset = 0;
  407. }
  408. if ((entry->flags & XFS_ATTR_INCOMPLETE) &&
  409. !context->allow_incomplete)
  410. continue;
  411. if (entry->flags & XFS_ATTR_LOCAL) {
  412. xfs_attr_leaf_name_local_t *name_loc;
  413. name_loc = xfs_attr3_leaf_name_local(leaf, i);
  414. name = name_loc->nameval;
  415. namelen = name_loc->namelen;
  416. valuelen = be16_to_cpu(name_loc->valuelen);
  417. } else {
  418. xfs_attr_leaf_name_remote_t *name_rmt;
  419. name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
  420. name = name_rmt->name;
  421. namelen = name_rmt->namelen;
  422. valuelen = be32_to_cpu(name_rmt->valuelen);
  423. }
  424. if (XFS_IS_CORRUPT(context->dp->i_mount,
  425. !xfs_attr_namecheck(name, namelen)))
  426. return -EFSCORRUPTED;
  427. context->put_listent(context, entry->flags,
  428. name, namelen, valuelen);
  429. if (context->seen_enough)
  430. break;
  431. cursor->offset++;
  432. }
  433. trace_xfs_attr_list_leaf_end(context);
  434. return 0;
  435. }
  436. /*
  437. * Copy out attribute entries for attr_list(), for leaf attribute lists.
  438. */
  439. STATIC int
  440. xfs_attr_leaf_list(
  441. struct xfs_attr_list_context *context)
  442. {
  443. struct xfs_buf *bp;
  444. int error;
  445. trace_xfs_attr_leaf_list(context);
  446. context->cursor.blkno = 0;
  447. error = xfs_attr3_leaf_read(context->tp, context->dp, 0, &bp);
  448. if (error)
  449. return error;
  450. error = xfs_attr3_leaf_list_int(bp, context);
  451. xfs_trans_brelse(context->tp, bp);
  452. return error;
  453. }
  454. int
  455. xfs_attr_list_ilocked(
  456. struct xfs_attr_list_context *context)
  457. {
  458. struct xfs_inode *dp = context->dp;
  459. ASSERT(xfs_isilocked(dp, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
  460. /*
  461. * Decide on what work routines to call based on the inode size.
  462. */
  463. if (!xfs_inode_hasattr(dp))
  464. return 0;
  465. if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
  466. return xfs_attr_shortform_list(context);
  467. if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
  468. return xfs_attr_leaf_list(context);
  469. return xfs_attr_node_list(context);
  470. }
  471. int
  472. xfs_attr_list(
  473. struct xfs_attr_list_context *context)
  474. {
  475. struct xfs_inode *dp = context->dp;
  476. uint lock_mode;
  477. int error;
  478. XFS_STATS_INC(dp->i_mount, xs_attr_list);
  479. if (XFS_FORCED_SHUTDOWN(dp->i_mount))
  480. return -EIO;
  481. lock_mode = xfs_ilock_attr_map_shared(dp);
  482. error = xfs_attr_list_ilocked(context);
  483. xfs_iunlock(dp, lock_mode);
  484. return error;
  485. }