tree-checker.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Qu Wenruo 2017. All rights reserved.
  4. */
  5. /*
  6. * The module is used to catch unexpected/corrupted tree block data.
  7. * Such behavior can be caused either by a fuzzed image or bugs.
  8. *
  9. * The objective is to do leaf/node validation checks when tree block is read
  10. * from disk, and check *every* possible member, so other code won't
  11. * need to checking them again.
  12. *
  13. * Due to the potential and unwanted damage, every checker needs to be
  14. * carefully reviewed otherwise so it does not prevent mount of valid images.
  15. */
  16. #include <linux/types.h>
  17. #include <linux/stddef.h>
  18. #include <linux/error-injection.h>
  19. #include "ctree.h"
  20. #include "tree-checker.h"
  21. #include "disk-io.h"
  22. #include "compression.h"
  23. #include "volumes.h"
  24. #include "misc.h"
  25. /*
  26. * Error message should follow the following format:
  27. * corrupt <type>: <identifier>, <reason>[, <bad_value>]
  28. *
  29. * @type: leaf or node
  30. * @identifier: the necessary info to locate the leaf/node.
  31. * It's recommended to decode key.objecitd/offset if it's
  32. * meaningful.
  33. * @reason: describe the error
  34. * @bad_value: optional, it's recommended to output bad value and its
  35. * expected value (range).
  36. *
  37. * Since comma is used to separate the components, only space is allowed
  38. * inside each component.
  39. */
  40. /*
  41. * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
  42. * Allows callers to customize the output.
  43. */
  44. __printf(3, 4)
  45. __cold
  46. static void generic_err(const struct extent_buffer *eb, int slot,
  47. const char *fmt, ...)
  48. {
  49. const struct btrfs_fs_info *fs_info = eb->fs_info;
  50. struct va_format vaf;
  51. va_list args;
  52. va_start(args, fmt);
  53. vaf.fmt = fmt;
  54. vaf.va = &args;
  55. btrfs_crit(fs_info,
  56. "corrupt %s: root=%llu block=%llu slot=%d, %pV",
  57. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  58. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf);
  59. va_end(args);
  60. }
  61. /*
  62. * Customized reporter for extent data item, since its key objectid and
  63. * offset has its own meaning.
  64. */
  65. __printf(3, 4)
  66. __cold
  67. static void file_extent_err(const struct extent_buffer *eb, int slot,
  68. const char *fmt, ...)
  69. {
  70. const struct btrfs_fs_info *fs_info = eb->fs_info;
  71. struct btrfs_key key;
  72. struct va_format vaf;
  73. va_list args;
  74. btrfs_item_key_to_cpu(eb, &key, slot);
  75. va_start(args, fmt);
  76. vaf.fmt = fmt;
  77. vaf.va = &args;
  78. btrfs_crit(fs_info,
  79. "corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV",
  80. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  81. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
  82. key.objectid, key.offset, &vaf);
  83. va_end(args);
  84. }
  85. /*
  86. * Return 0 if the btrfs_file_extent_##name is aligned to @alignment
  87. * Else return 1
  88. */
  89. #define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment) \
  90. ({ \
  91. if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \
  92. file_extent_err((leaf), (slot), \
  93. "invalid %s for file extent, have %llu, should be aligned to %u", \
  94. (#name), btrfs_file_extent_##name((leaf), (fi)), \
  95. (alignment)); \
  96. (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \
  97. })
  98. static u64 file_extent_end(struct extent_buffer *leaf,
  99. struct btrfs_key *key,
  100. struct btrfs_file_extent_item *extent)
  101. {
  102. u64 end;
  103. u64 len;
  104. if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) {
  105. len = btrfs_file_extent_ram_bytes(leaf, extent);
  106. end = ALIGN(key->offset + len, leaf->fs_info->sectorsize);
  107. } else {
  108. len = btrfs_file_extent_num_bytes(leaf, extent);
  109. end = key->offset + len;
  110. }
  111. return end;
  112. }
  113. /*
  114. * Customized report for dir_item, the only new important information is
  115. * key->objectid, which represents inode number
  116. */
  117. __printf(3, 4)
  118. __cold
  119. static void dir_item_err(const struct extent_buffer *eb, int slot,
  120. const char *fmt, ...)
  121. {
  122. const struct btrfs_fs_info *fs_info = eb->fs_info;
  123. struct btrfs_key key;
  124. struct va_format vaf;
  125. va_list args;
  126. btrfs_item_key_to_cpu(eb, &key, slot);
  127. va_start(args, fmt);
  128. vaf.fmt = fmt;
  129. vaf.va = &args;
  130. btrfs_crit(fs_info,
  131. "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
  132. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  133. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
  134. key.objectid, &vaf);
  135. va_end(args);
  136. }
  137. /*
  138. * This functions checks prev_key->objectid, to ensure current key and prev_key
  139. * share the same objectid as inode number.
  140. *
  141. * This is to detect missing INODE_ITEM in subvolume trees.
  142. *
  143. * Return true if everything is OK or we don't need to check.
  144. * Return false if anything is wrong.
  145. */
  146. static bool check_prev_ino(struct extent_buffer *leaf,
  147. struct btrfs_key *key, int slot,
  148. struct btrfs_key *prev_key)
  149. {
  150. /* No prev key, skip check */
  151. if (slot == 0)
  152. return true;
  153. /* Only these key->types needs to be checked */
  154. ASSERT(key->type == BTRFS_XATTR_ITEM_KEY ||
  155. key->type == BTRFS_INODE_REF_KEY ||
  156. key->type == BTRFS_DIR_INDEX_KEY ||
  157. key->type == BTRFS_DIR_ITEM_KEY ||
  158. key->type == BTRFS_EXTENT_DATA_KEY);
  159. /*
  160. * Only subvolume trees along with their reloc trees need this check.
  161. * Things like log tree doesn't follow this ino requirement.
  162. */
  163. if (!is_fstree(btrfs_header_owner(leaf)))
  164. return true;
  165. if (key->objectid == prev_key->objectid)
  166. return true;
  167. /* Error found */
  168. dir_item_err(leaf, slot,
  169. "invalid previous key objectid, have %llu expect %llu",
  170. prev_key->objectid, key->objectid);
  171. return false;
  172. }
  173. static int check_extent_data_item(struct extent_buffer *leaf,
  174. struct btrfs_key *key, int slot,
  175. struct btrfs_key *prev_key)
  176. {
  177. struct btrfs_fs_info *fs_info = leaf->fs_info;
  178. struct btrfs_file_extent_item *fi;
  179. u32 sectorsize = fs_info->sectorsize;
  180. u32 item_size = btrfs_item_size_nr(leaf, slot);
  181. u64 extent_end;
  182. if (!IS_ALIGNED(key->offset, sectorsize)) {
  183. file_extent_err(leaf, slot,
  184. "unaligned file_offset for file extent, have %llu should be aligned to %u",
  185. key->offset, sectorsize);
  186. return -EUCLEAN;
  187. }
  188. /*
  189. * Previous key must have the same key->objectid (ino).
  190. * It can be XATTR_ITEM, INODE_ITEM or just another EXTENT_DATA.
  191. * But if objectids mismatch, it means we have a missing
  192. * INODE_ITEM.
  193. */
  194. if (!check_prev_ino(leaf, key, slot, prev_key))
  195. return -EUCLEAN;
  196. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  197. /*
  198. * Make sure the item contains at least inline header, so the file
  199. * extent type is not some garbage.
  200. */
  201. if (item_size < BTRFS_FILE_EXTENT_INLINE_DATA_START) {
  202. file_extent_err(leaf, slot,
  203. "invalid item size, have %u expect [%zu, %u)",
  204. item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START,
  205. SZ_4K);
  206. return -EUCLEAN;
  207. }
  208. if (btrfs_file_extent_type(leaf, fi) >= BTRFS_NR_FILE_EXTENT_TYPES) {
  209. file_extent_err(leaf, slot,
  210. "invalid type for file extent, have %u expect range [0, %u]",
  211. btrfs_file_extent_type(leaf, fi),
  212. BTRFS_NR_FILE_EXTENT_TYPES - 1);
  213. return -EUCLEAN;
  214. }
  215. /*
  216. * Support for new compression/encryption must introduce incompat flag,
  217. * and must be caught in open_ctree().
  218. */
  219. if (btrfs_file_extent_compression(leaf, fi) >= BTRFS_NR_COMPRESS_TYPES) {
  220. file_extent_err(leaf, slot,
  221. "invalid compression for file extent, have %u expect range [0, %u]",
  222. btrfs_file_extent_compression(leaf, fi),
  223. BTRFS_NR_COMPRESS_TYPES - 1);
  224. return -EUCLEAN;
  225. }
  226. if (btrfs_file_extent_encryption(leaf, fi)) {
  227. file_extent_err(leaf, slot,
  228. "invalid encryption for file extent, have %u expect 0",
  229. btrfs_file_extent_encryption(leaf, fi));
  230. return -EUCLEAN;
  231. }
  232. if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
  233. /* Inline extent must have 0 as key offset */
  234. if (key->offset) {
  235. file_extent_err(leaf, slot,
  236. "invalid file_offset for inline file extent, have %llu expect 0",
  237. key->offset);
  238. return -EUCLEAN;
  239. }
  240. /* Compressed inline extent has no on-disk size, skip it */
  241. if (btrfs_file_extent_compression(leaf, fi) !=
  242. BTRFS_COMPRESS_NONE)
  243. return 0;
  244. /* Uncompressed inline extent size must match item size */
  245. if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
  246. btrfs_file_extent_ram_bytes(leaf, fi)) {
  247. file_extent_err(leaf, slot,
  248. "invalid ram_bytes for uncompressed inline extent, have %u expect %llu",
  249. item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START +
  250. btrfs_file_extent_ram_bytes(leaf, fi));
  251. return -EUCLEAN;
  252. }
  253. return 0;
  254. }
  255. /* Regular or preallocated extent has fixed item size */
  256. if (item_size != sizeof(*fi)) {
  257. file_extent_err(leaf, slot,
  258. "invalid item size for reg/prealloc file extent, have %u expect %zu",
  259. item_size, sizeof(*fi));
  260. return -EUCLEAN;
  261. }
  262. if (CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) ||
  263. CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) ||
  264. CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) ||
  265. CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) ||
  266. CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize))
  267. return -EUCLEAN;
  268. /* Catch extent end overflow */
  269. if (check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi),
  270. key->offset, &extent_end)) {
  271. file_extent_err(leaf, slot,
  272. "extent end overflow, have file offset %llu extent num bytes %llu",
  273. key->offset,
  274. btrfs_file_extent_num_bytes(leaf, fi));
  275. return -EUCLEAN;
  276. }
  277. /*
  278. * Check that no two consecutive file extent items, in the same leaf,
  279. * present ranges that overlap each other.
  280. */
  281. if (slot > 0 &&
  282. prev_key->objectid == key->objectid &&
  283. prev_key->type == BTRFS_EXTENT_DATA_KEY) {
  284. struct btrfs_file_extent_item *prev_fi;
  285. u64 prev_end;
  286. prev_fi = btrfs_item_ptr(leaf, slot - 1,
  287. struct btrfs_file_extent_item);
  288. prev_end = file_extent_end(leaf, prev_key, prev_fi);
  289. if (prev_end > key->offset) {
  290. file_extent_err(leaf, slot - 1,
  291. "file extent end range (%llu) goes beyond start offset (%llu) of the next file extent",
  292. prev_end, key->offset);
  293. return -EUCLEAN;
  294. }
  295. }
  296. return 0;
  297. }
  298. static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
  299. int slot, struct btrfs_key *prev_key)
  300. {
  301. struct btrfs_fs_info *fs_info = leaf->fs_info;
  302. u32 sectorsize = fs_info->sectorsize;
  303. u32 csumsize = btrfs_super_csum_size(fs_info->super_copy);
  304. if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
  305. generic_err(leaf, slot,
  306. "invalid key objectid for csum item, have %llu expect %llu",
  307. key->objectid, BTRFS_EXTENT_CSUM_OBJECTID);
  308. return -EUCLEAN;
  309. }
  310. if (!IS_ALIGNED(key->offset, sectorsize)) {
  311. generic_err(leaf, slot,
  312. "unaligned key offset for csum item, have %llu should be aligned to %u",
  313. key->offset, sectorsize);
  314. return -EUCLEAN;
  315. }
  316. if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
  317. generic_err(leaf, slot,
  318. "unaligned item size for csum item, have %u should be aligned to %u",
  319. btrfs_item_size_nr(leaf, slot), csumsize);
  320. return -EUCLEAN;
  321. }
  322. if (slot > 0 && prev_key->type == BTRFS_EXTENT_CSUM_KEY) {
  323. u64 prev_csum_end;
  324. u32 prev_item_size;
  325. prev_item_size = btrfs_item_size_nr(leaf, slot - 1);
  326. prev_csum_end = (prev_item_size / csumsize) * sectorsize;
  327. prev_csum_end += prev_key->offset;
  328. if (prev_csum_end > key->offset) {
  329. generic_err(leaf, slot - 1,
  330. "csum end range (%llu) goes beyond the start range (%llu) of the next csum item",
  331. prev_csum_end, key->offset);
  332. return -EUCLEAN;
  333. }
  334. }
  335. return 0;
  336. }
  337. /* Inode item error output has the same format as dir_item_err() */
  338. #define inode_item_err(eb, slot, fmt, ...) \
  339. dir_item_err(eb, slot, fmt, __VA_ARGS__)
  340. static int check_inode_key(struct extent_buffer *leaf, struct btrfs_key *key,
  341. int slot)
  342. {
  343. struct btrfs_key item_key;
  344. bool is_inode_item;
  345. btrfs_item_key_to_cpu(leaf, &item_key, slot);
  346. is_inode_item = (item_key.type == BTRFS_INODE_ITEM_KEY);
  347. /* For XATTR_ITEM, location key should be all 0 */
  348. if (item_key.type == BTRFS_XATTR_ITEM_KEY) {
  349. if (key->type != 0 || key->objectid != 0 || key->offset != 0)
  350. return -EUCLEAN;
  351. return 0;
  352. }
  353. if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
  354. key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
  355. key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
  356. key->objectid != BTRFS_FREE_INO_OBJECTID) {
  357. if (is_inode_item) {
  358. generic_err(leaf, slot,
  359. "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
  360. key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
  361. BTRFS_FIRST_FREE_OBJECTID,
  362. BTRFS_LAST_FREE_OBJECTID,
  363. BTRFS_FREE_INO_OBJECTID);
  364. } else {
  365. dir_item_err(leaf, slot,
  366. "invalid location key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
  367. key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
  368. BTRFS_FIRST_FREE_OBJECTID,
  369. BTRFS_LAST_FREE_OBJECTID,
  370. BTRFS_FREE_INO_OBJECTID);
  371. }
  372. return -EUCLEAN;
  373. }
  374. if (key->offset != 0) {
  375. if (is_inode_item)
  376. inode_item_err(leaf, slot,
  377. "invalid key offset: has %llu expect 0",
  378. key->offset);
  379. else
  380. dir_item_err(leaf, slot,
  381. "invalid location key offset:has %llu expect 0",
  382. key->offset);
  383. return -EUCLEAN;
  384. }
  385. return 0;
  386. }
  387. static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key,
  388. int slot)
  389. {
  390. struct btrfs_key item_key;
  391. bool is_root_item;
  392. btrfs_item_key_to_cpu(leaf, &item_key, slot);
  393. is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY);
  394. /* No such tree id */
  395. if (key->objectid == 0) {
  396. if (is_root_item)
  397. generic_err(leaf, slot, "invalid root id 0");
  398. else
  399. dir_item_err(leaf, slot,
  400. "invalid location key root id 0");
  401. return -EUCLEAN;
  402. }
  403. /* DIR_ITEM/INDEX/INODE_REF is not allowed to point to non-fs trees */
  404. if (!is_fstree(key->objectid) && !is_root_item) {
  405. dir_item_err(leaf, slot,
  406. "invalid location key objectid, have %llu expect [%llu, %llu]",
  407. key->objectid, BTRFS_FIRST_FREE_OBJECTID,
  408. BTRFS_LAST_FREE_OBJECTID);
  409. return -EUCLEAN;
  410. }
  411. /*
  412. * ROOT_ITEM with non-zero offset means this is a snapshot, created at
  413. * @offset transid.
  414. * Furthermore, for location key in DIR_ITEM, its offset is always -1.
  415. *
  416. * So here we only check offset for reloc tree whose key->offset must
  417. * be a valid tree.
  418. */
  419. if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) {
  420. generic_err(leaf, slot, "invalid root id 0 for reloc tree");
  421. return -EUCLEAN;
  422. }
  423. return 0;
  424. }
  425. static int check_dir_item(struct extent_buffer *leaf,
  426. struct btrfs_key *key, struct btrfs_key *prev_key,
  427. int slot)
  428. {
  429. struct btrfs_fs_info *fs_info = leaf->fs_info;
  430. struct btrfs_dir_item *di;
  431. u32 item_size = btrfs_item_size_nr(leaf, slot);
  432. u32 cur = 0;
  433. if (!check_prev_ino(leaf, key, slot, prev_key))
  434. return -EUCLEAN;
  435. di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
  436. while (cur < item_size) {
  437. struct btrfs_key location_key;
  438. u32 name_len;
  439. u32 data_len;
  440. u32 max_name_len;
  441. u32 total_size;
  442. u32 name_hash;
  443. u8 dir_type;
  444. int ret;
  445. /* header itself should not cross item boundary */
  446. if (cur + sizeof(*di) > item_size) {
  447. dir_item_err(leaf, slot,
  448. "dir item header crosses item boundary, have %zu boundary %u",
  449. cur + sizeof(*di), item_size);
  450. return -EUCLEAN;
  451. }
  452. /* Location key check */
  453. btrfs_dir_item_key_to_cpu(leaf, di, &location_key);
  454. if (location_key.type == BTRFS_ROOT_ITEM_KEY) {
  455. ret = check_root_key(leaf, &location_key, slot);
  456. if (ret < 0)
  457. return ret;
  458. } else if (location_key.type == BTRFS_INODE_ITEM_KEY ||
  459. location_key.type == 0) {
  460. ret = check_inode_key(leaf, &location_key, slot);
  461. if (ret < 0)
  462. return ret;
  463. } else {
  464. dir_item_err(leaf, slot,
  465. "invalid location key type, have %u, expect %u or %u",
  466. location_key.type, BTRFS_ROOT_ITEM_KEY,
  467. BTRFS_INODE_ITEM_KEY);
  468. return -EUCLEAN;
  469. }
  470. /* dir type check */
  471. dir_type = btrfs_dir_type(leaf, di);
  472. if (dir_type >= BTRFS_FT_MAX) {
  473. dir_item_err(leaf, slot,
  474. "invalid dir item type, have %u expect [0, %u)",
  475. dir_type, BTRFS_FT_MAX);
  476. return -EUCLEAN;
  477. }
  478. if (key->type == BTRFS_XATTR_ITEM_KEY &&
  479. dir_type != BTRFS_FT_XATTR) {
  480. dir_item_err(leaf, slot,
  481. "invalid dir item type for XATTR key, have %u expect %u",
  482. dir_type, BTRFS_FT_XATTR);
  483. return -EUCLEAN;
  484. }
  485. if (dir_type == BTRFS_FT_XATTR &&
  486. key->type != BTRFS_XATTR_ITEM_KEY) {
  487. dir_item_err(leaf, slot,
  488. "xattr dir type found for non-XATTR key");
  489. return -EUCLEAN;
  490. }
  491. if (dir_type == BTRFS_FT_XATTR)
  492. max_name_len = XATTR_NAME_MAX;
  493. else
  494. max_name_len = BTRFS_NAME_LEN;
  495. /* Name/data length check */
  496. name_len = btrfs_dir_name_len(leaf, di);
  497. data_len = btrfs_dir_data_len(leaf, di);
  498. if (name_len > max_name_len) {
  499. dir_item_err(leaf, slot,
  500. "dir item name len too long, have %u max %u",
  501. name_len, max_name_len);
  502. return -EUCLEAN;
  503. }
  504. if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) {
  505. dir_item_err(leaf, slot,
  506. "dir item name and data len too long, have %u max %u",
  507. name_len + data_len,
  508. BTRFS_MAX_XATTR_SIZE(fs_info));
  509. return -EUCLEAN;
  510. }
  511. if (data_len && dir_type != BTRFS_FT_XATTR) {
  512. dir_item_err(leaf, slot,
  513. "dir item with invalid data len, have %u expect 0",
  514. data_len);
  515. return -EUCLEAN;
  516. }
  517. total_size = sizeof(*di) + name_len + data_len;
  518. /* header and name/data should not cross item boundary */
  519. if (cur + total_size > item_size) {
  520. dir_item_err(leaf, slot,
  521. "dir item data crosses item boundary, have %u boundary %u",
  522. cur + total_size, item_size);
  523. return -EUCLEAN;
  524. }
  525. /*
  526. * Special check for XATTR/DIR_ITEM, as key->offset is name
  527. * hash, should match its name
  528. */
  529. if (key->type == BTRFS_DIR_ITEM_KEY ||
  530. key->type == BTRFS_XATTR_ITEM_KEY) {
  531. char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
  532. read_extent_buffer(leaf, namebuf,
  533. (unsigned long)(di + 1), name_len);
  534. name_hash = btrfs_name_hash(namebuf, name_len);
  535. if (key->offset != name_hash) {
  536. dir_item_err(leaf, slot,
  537. "name hash mismatch with key, have 0x%016x expect 0x%016llx",
  538. name_hash, key->offset);
  539. return -EUCLEAN;
  540. }
  541. }
  542. cur += total_size;
  543. di = (struct btrfs_dir_item *)((void *)di + total_size);
  544. }
  545. return 0;
  546. }
  547. __printf(3, 4)
  548. __cold
  549. static void block_group_err(const struct extent_buffer *eb, int slot,
  550. const char *fmt, ...)
  551. {
  552. const struct btrfs_fs_info *fs_info = eb->fs_info;
  553. struct btrfs_key key;
  554. struct va_format vaf;
  555. va_list args;
  556. btrfs_item_key_to_cpu(eb, &key, slot);
  557. va_start(args, fmt);
  558. vaf.fmt = fmt;
  559. vaf.va = &args;
  560. btrfs_crit(fs_info,
  561. "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
  562. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  563. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
  564. key.objectid, key.offset, &vaf);
  565. va_end(args);
  566. }
  567. static int check_block_group_item(struct extent_buffer *leaf,
  568. struct btrfs_key *key, int slot)
  569. {
  570. struct btrfs_block_group_item bgi;
  571. u32 item_size = btrfs_item_size_nr(leaf, slot);
  572. u64 flags;
  573. u64 type;
  574. /*
  575. * Here we don't really care about alignment since extent allocator can
  576. * handle it. We care more about the size.
  577. */
  578. if (key->offset == 0) {
  579. block_group_err(leaf, slot,
  580. "invalid block group size 0");
  581. return -EUCLEAN;
  582. }
  583. if (item_size != sizeof(bgi)) {
  584. block_group_err(leaf, slot,
  585. "invalid item size, have %u expect %zu",
  586. item_size, sizeof(bgi));
  587. return -EUCLEAN;
  588. }
  589. read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
  590. sizeof(bgi));
  591. if (btrfs_stack_block_group_chunk_objectid(&bgi) !=
  592. BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
  593. block_group_err(leaf, slot,
  594. "invalid block group chunk objectid, have %llu expect %llu",
  595. btrfs_stack_block_group_chunk_objectid(&bgi),
  596. BTRFS_FIRST_CHUNK_TREE_OBJECTID);
  597. return -EUCLEAN;
  598. }
  599. if (btrfs_stack_block_group_used(&bgi) > key->offset) {
  600. block_group_err(leaf, slot,
  601. "invalid block group used, have %llu expect [0, %llu)",
  602. btrfs_stack_block_group_used(&bgi), key->offset);
  603. return -EUCLEAN;
  604. }
  605. flags = btrfs_stack_block_group_flags(&bgi);
  606. if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
  607. block_group_err(leaf, slot,
  608. "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
  609. flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
  610. hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
  611. return -EUCLEAN;
  612. }
  613. type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
  614. if (type != BTRFS_BLOCK_GROUP_DATA &&
  615. type != BTRFS_BLOCK_GROUP_METADATA &&
  616. type != BTRFS_BLOCK_GROUP_SYSTEM &&
  617. type != (BTRFS_BLOCK_GROUP_METADATA |
  618. BTRFS_BLOCK_GROUP_DATA)) {
  619. block_group_err(leaf, slot,
  620. "invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
  621. type, hweight64(type),
  622. BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
  623. BTRFS_BLOCK_GROUP_SYSTEM,
  624. BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
  625. return -EUCLEAN;
  626. }
  627. return 0;
  628. }
  629. __printf(4, 5)
  630. __cold
  631. static void chunk_err(const struct extent_buffer *leaf,
  632. const struct btrfs_chunk *chunk, u64 logical,
  633. const char *fmt, ...)
  634. {
  635. const struct btrfs_fs_info *fs_info = leaf->fs_info;
  636. bool is_sb;
  637. struct va_format vaf;
  638. va_list args;
  639. int i;
  640. int slot = -1;
  641. /* Only superblock eb is able to have such small offset */
  642. is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
  643. if (!is_sb) {
  644. /*
  645. * Get the slot number by iterating through all slots, this
  646. * would provide better readability.
  647. */
  648. for (i = 0; i < btrfs_header_nritems(leaf); i++) {
  649. if (btrfs_item_ptr_offset(leaf, i) ==
  650. (unsigned long)chunk) {
  651. slot = i;
  652. break;
  653. }
  654. }
  655. }
  656. va_start(args, fmt);
  657. vaf.fmt = fmt;
  658. vaf.va = &args;
  659. if (is_sb)
  660. btrfs_crit(fs_info,
  661. "corrupt superblock syschunk array: chunk_start=%llu, %pV",
  662. logical, &vaf);
  663. else
  664. btrfs_crit(fs_info,
  665. "corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
  666. BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
  667. logical, &vaf);
  668. va_end(args);
  669. }
  670. /*
  671. * The common chunk check which could also work on super block sys chunk array.
  672. *
  673. * Return -EUCLEAN if anything is corrupted.
  674. * Return 0 if everything is OK.
  675. */
  676. int btrfs_check_chunk_valid(struct extent_buffer *leaf,
  677. struct btrfs_chunk *chunk, u64 logical)
  678. {
  679. struct btrfs_fs_info *fs_info = leaf->fs_info;
  680. u64 length;
  681. u64 chunk_end;
  682. u64 stripe_len;
  683. u16 num_stripes;
  684. u16 sub_stripes;
  685. u64 type;
  686. u64 features;
  687. bool mixed = false;
  688. int raid_index;
  689. int nparity;
  690. int ncopies;
  691. length = btrfs_chunk_length(leaf, chunk);
  692. stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
  693. num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  694. sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
  695. type = btrfs_chunk_type(leaf, chunk);
  696. raid_index = btrfs_bg_flags_to_raid_index(type);
  697. ncopies = btrfs_raid_array[raid_index].ncopies;
  698. nparity = btrfs_raid_array[raid_index].nparity;
  699. if (!num_stripes) {
  700. chunk_err(leaf, chunk, logical,
  701. "invalid chunk num_stripes, have %u", num_stripes);
  702. return -EUCLEAN;
  703. }
  704. if (num_stripes < ncopies) {
  705. chunk_err(leaf, chunk, logical,
  706. "invalid chunk num_stripes < ncopies, have %u < %d",
  707. num_stripes, ncopies);
  708. return -EUCLEAN;
  709. }
  710. if (nparity && num_stripes == nparity) {
  711. chunk_err(leaf, chunk, logical,
  712. "invalid chunk num_stripes == nparity, have %u == %d",
  713. num_stripes, nparity);
  714. return -EUCLEAN;
  715. }
  716. if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
  717. chunk_err(leaf, chunk, logical,
  718. "invalid chunk logical, have %llu should aligned to %u",
  719. logical, fs_info->sectorsize);
  720. return -EUCLEAN;
  721. }
  722. if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
  723. chunk_err(leaf, chunk, logical,
  724. "invalid chunk sectorsize, have %u expect %u",
  725. btrfs_chunk_sector_size(leaf, chunk),
  726. fs_info->sectorsize);
  727. return -EUCLEAN;
  728. }
  729. if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
  730. chunk_err(leaf, chunk, logical,
  731. "invalid chunk length, have %llu", length);
  732. return -EUCLEAN;
  733. }
  734. if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
  735. chunk_err(leaf, chunk, logical,
  736. "invalid chunk logical start and length, have logical start %llu length %llu",
  737. logical, length);
  738. return -EUCLEAN;
  739. }
  740. if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
  741. chunk_err(leaf, chunk, logical,
  742. "invalid chunk stripe length: %llu",
  743. stripe_len);
  744. return -EUCLEAN;
  745. }
  746. if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
  747. type) {
  748. chunk_err(leaf, chunk, logical,
  749. "unrecognized chunk type: 0x%llx",
  750. ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
  751. BTRFS_BLOCK_GROUP_PROFILE_MASK) &
  752. btrfs_chunk_type(leaf, chunk));
  753. return -EUCLEAN;
  754. }
  755. if (!has_single_bit_set(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
  756. (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
  757. chunk_err(leaf, chunk, logical,
  758. "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
  759. type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
  760. return -EUCLEAN;
  761. }
  762. if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
  763. chunk_err(leaf, chunk, logical,
  764. "missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
  765. type, BTRFS_BLOCK_GROUP_TYPE_MASK);
  766. return -EUCLEAN;
  767. }
  768. if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
  769. (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
  770. chunk_err(leaf, chunk, logical,
  771. "system chunk with data or metadata type: 0x%llx",
  772. type);
  773. return -EUCLEAN;
  774. }
  775. features = btrfs_super_incompat_flags(fs_info->super_copy);
  776. if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
  777. mixed = true;
  778. if (!mixed) {
  779. if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
  780. (type & BTRFS_BLOCK_GROUP_DATA)) {
  781. chunk_err(leaf, chunk, logical,
  782. "mixed chunk type in non-mixed mode: 0x%llx", type);
  783. return -EUCLEAN;
  784. }
  785. }
  786. if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
  787. (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
  788. (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
  789. (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
  790. (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
  791. ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) {
  792. chunk_err(leaf, chunk, logical,
  793. "invalid num_stripes:sub_stripes %u:%u for profile %llu",
  794. num_stripes, sub_stripes,
  795. type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
  796. return -EUCLEAN;
  797. }
  798. return 0;
  799. }
  800. /*
  801. * Enhanced version of chunk item checker.
  802. *
  803. * The common btrfs_check_chunk_valid() doesn't check item size since it needs
  804. * to work on super block sys_chunk_array which doesn't have full item ptr.
  805. */
  806. static int check_leaf_chunk_item(struct extent_buffer *leaf,
  807. struct btrfs_chunk *chunk,
  808. struct btrfs_key *key, int slot)
  809. {
  810. int num_stripes;
  811. if (btrfs_item_size_nr(leaf, slot) < sizeof(struct btrfs_chunk)) {
  812. chunk_err(leaf, chunk, key->offset,
  813. "invalid chunk item size: have %u expect [%zu, %u)",
  814. btrfs_item_size_nr(leaf, slot),
  815. sizeof(struct btrfs_chunk),
  816. BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
  817. return -EUCLEAN;
  818. }
  819. num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
  820. /* Let btrfs_check_chunk_valid() handle this error type */
  821. if (num_stripes == 0)
  822. goto out;
  823. if (btrfs_chunk_item_size(num_stripes) !=
  824. btrfs_item_size_nr(leaf, slot)) {
  825. chunk_err(leaf, chunk, key->offset,
  826. "invalid chunk item size: have %u expect %lu",
  827. btrfs_item_size_nr(leaf, slot),
  828. btrfs_chunk_item_size(num_stripes));
  829. return -EUCLEAN;
  830. }
  831. out:
  832. return btrfs_check_chunk_valid(leaf, chunk, key->offset);
  833. }
  834. __printf(3, 4)
  835. __cold
  836. static void dev_item_err(const struct extent_buffer *eb, int slot,
  837. const char *fmt, ...)
  838. {
  839. struct btrfs_key key;
  840. struct va_format vaf;
  841. va_list args;
  842. btrfs_item_key_to_cpu(eb, &key, slot);
  843. va_start(args, fmt);
  844. vaf.fmt = fmt;
  845. vaf.va = &args;
  846. btrfs_crit(eb->fs_info,
  847. "corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
  848. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  849. btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
  850. key.objectid, &vaf);
  851. va_end(args);
  852. }
  853. static int check_dev_item(struct extent_buffer *leaf,
  854. struct btrfs_key *key, int slot)
  855. {
  856. struct btrfs_dev_item *ditem;
  857. const u32 item_size = btrfs_item_size_nr(leaf, slot);
  858. if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
  859. dev_item_err(leaf, slot,
  860. "invalid objectid: has=%llu expect=%llu",
  861. key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
  862. return -EUCLEAN;
  863. }
  864. if (unlikely(item_size != sizeof(*ditem))) {
  865. dev_item_err(leaf, slot, "invalid item size: has %u expect %zu",
  866. item_size, sizeof(*ditem));
  867. return -EUCLEAN;
  868. }
  869. ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
  870. if (btrfs_device_id(leaf, ditem) != key->offset) {
  871. dev_item_err(leaf, slot,
  872. "devid mismatch: key has=%llu item has=%llu",
  873. key->offset, btrfs_device_id(leaf, ditem));
  874. return -EUCLEAN;
  875. }
  876. /*
  877. * For device total_bytes, we don't have reliable way to check it, as
  878. * it can be 0 for device removal. Device size check can only be done
  879. * by dev extents check.
  880. */
  881. if (btrfs_device_bytes_used(leaf, ditem) >
  882. btrfs_device_total_bytes(leaf, ditem)) {
  883. dev_item_err(leaf, slot,
  884. "invalid bytes used: have %llu expect [0, %llu]",
  885. btrfs_device_bytes_used(leaf, ditem),
  886. btrfs_device_total_bytes(leaf, ditem));
  887. return -EUCLEAN;
  888. }
  889. /*
  890. * Remaining members like io_align/type/gen/dev_group aren't really
  891. * utilized. Skip them to make later usage of them easier.
  892. */
  893. return 0;
  894. }
  895. static int check_inode_item(struct extent_buffer *leaf,
  896. struct btrfs_key *key, int slot)
  897. {
  898. struct btrfs_fs_info *fs_info = leaf->fs_info;
  899. struct btrfs_inode_item *iitem;
  900. u64 super_gen = btrfs_super_generation(fs_info->super_copy);
  901. u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
  902. const u32 item_size = btrfs_item_size_nr(leaf, slot);
  903. u32 mode;
  904. int ret;
  905. ret = check_inode_key(leaf, key, slot);
  906. if (ret < 0)
  907. return ret;
  908. if (unlikely(item_size != sizeof(*iitem))) {
  909. generic_err(leaf, slot, "invalid item size: has %u expect %zu",
  910. item_size, sizeof(*iitem));
  911. return -EUCLEAN;
  912. }
  913. iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
  914. /* Here we use super block generation + 1 to handle log tree */
  915. if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
  916. inode_item_err(leaf, slot,
  917. "invalid inode generation: has %llu expect (0, %llu]",
  918. btrfs_inode_generation(leaf, iitem),
  919. super_gen + 1);
  920. return -EUCLEAN;
  921. }
  922. /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
  923. if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
  924. inode_item_err(leaf, slot,
  925. "invalid inode transid: has %llu expect [0, %llu]",
  926. btrfs_inode_transid(leaf, iitem), super_gen + 1);
  927. return -EUCLEAN;
  928. }
  929. /*
  930. * For size and nbytes it's better not to be too strict, as for dir
  931. * item its size/nbytes can easily get wrong, but doesn't affect
  932. * anything in the fs. So here we skip the check.
  933. */
  934. mode = btrfs_inode_mode(leaf, iitem);
  935. if (mode & ~valid_mask) {
  936. inode_item_err(leaf, slot,
  937. "unknown mode bit detected: 0x%x",
  938. mode & ~valid_mask);
  939. return -EUCLEAN;
  940. }
  941. /*
  942. * S_IFMT is not bit mapped so we can't completely rely on
  943. * is_power_of_2/has_single_bit_set, but it can save us from checking
  944. * FIFO/CHR/DIR/REG. Only needs to check BLK, LNK and SOCKS
  945. */
  946. if (!has_single_bit_set(mode & S_IFMT)) {
  947. if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
  948. inode_item_err(leaf, slot,
  949. "invalid mode: has 0%o expect valid S_IF* bit(s)",
  950. mode & S_IFMT);
  951. return -EUCLEAN;
  952. }
  953. }
  954. if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
  955. inode_item_err(leaf, slot,
  956. "invalid nlink: has %u expect no more than 1 for dir",
  957. btrfs_inode_nlink(leaf, iitem));
  958. return -EUCLEAN;
  959. }
  960. if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
  961. inode_item_err(leaf, slot,
  962. "unknown flags detected: 0x%llx",
  963. btrfs_inode_flags(leaf, iitem) &
  964. ~BTRFS_INODE_FLAG_MASK);
  965. return -EUCLEAN;
  966. }
  967. return 0;
  968. }
  969. static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
  970. int slot)
  971. {
  972. struct btrfs_fs_info *fs_info = leaf->fs_info;
  973. struct btrfs_root_item ri = { 0 };
  974. const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY |
  975. BTRFS_ROOT_SUBVOL_DEAD;
  976. int ret;
  977. ret = check_root_key(leaf, key, slot);
  978. if (ret < 0)
  979. return ret;
  980. if (btrfs_item_size_nr(leaf, slot) != sizeof(ri) &&
  981. btrfs_item_size_nr(leaf, slot) != btrfs_legacy_root_item_size()) {
  982. generic_err(leaf, slot,
  983. "invalid root item size, have %u expect %zu or %u",
  984. btrfs_item_size_nr(leaf, slot), sizeof(ri),
  985. btrfs_legacy_root_item_size());
  986. return -EUCLEAN;
  987. }
  988. /*
  989. * For legacy root item, the members starting at generation_v2 will be
  990. * all filled with 0.
  991. * And since we allow geneartion_v2 as 0, it will still pass the check.
  992. */
  993. read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
  994. btrfs_item_size_nr(leaf, slot));
  995. /* Generation related */
  996. if (btrfs_root_generation(&ri) >
  997. btrfs_super_generation(fs_info->super_copy) + 1) {
  998. generic_err(leaf, slot,
  999. "invalid root generation, have %llu expect (0, %llu]",
  1000. btrfs_root_generation(&ri),
  1001. btrfs_super_generation(fs_info->super_copy) + 1);
  1002. return -EUCLEAN;
  1003. }
  1004. if (btrfs_root_generation_v2(&ri) >
  1005. btrfs_super_generation(fs_info->super_copy) + 1) {
  1006. generic_err(leaf, slot,
  1007. "invalid root v2 generation, have %llu expect (0, %llu]",
  1008. btrfs_root_generation_v2(&ri),
  1009. btrfs_super_generation(fs_info->super_copy) + 1);
  1010. return -EUCLEAN;
  1011. }
  1012. if (btrfs_root_last_snapshot(&ri) >
  1013. btrfs_super_generation(fs_info->super_copy) + 1) {
  1014. generic_err(leaf, slot,
  1015. "invalid root last_snapshot, have %llu expect (0, %llu]",
  1016. btrfs_root_last_snapshot(&ri),
  1017. btrfs_super_generation(fs_info->super_copy) + 1);
  1018. return -EUCLEAN;
  1019. }
  1020. /* Alignment and level check */
  1021. if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) {
  1022. generic_err(leaf, slot,
  1023. "invalid root bytenr, have %llu expect to be aligned to %u",
  1024. btrfs_root_bytenr(&ri), fs_info->sectorsize);
  1025. return -EUCLEAN;
  1026. }
  1027. if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) {
  1028. generic_err(leaf, slot,
  1029. "invalid root level, have %u expect [0, %u]",
  1030. btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1);
  1031. return -EUCLEAN;
  1032. }
  1033. if (ri.drop_level >= BTRFS_MAX_LEVEL) {
  1034. generic_err(leaf, slot,
  1035. "invalid root level, have %u expect [0, %u]",
  1036. ri.drop_level, BTRFS_MAX_LEVEL - 1);
  1037. return -EUCLEAN;
  1038. }
  1039. /* Flags check */
  1040. if (btrfs_root_flags(&ri) & ~valid_root_flags) {
  1041. generic_err(leaf, slot,
  1042. "invalid root flags, have 0x%llx expect mask 0x%llx",
  1043. btrfs_root_flags(&ri), valid_root_flags);
  1044. return -EUCLEAN;
  1045. }
  1046. return 0;
  1047. }
  1048. __printf(3,4)
  1049. __cold
  1050. static void extent_err(const struct extent_buffer *eb, int slot,
  1051. const char *fmt, ...)
  1052. {
  1053. struct btrfs_key key;
  1054. struct va_format vaf;
  1055. va_list args;
  1056. u64 bytenr;
  1057. u64 len;
  1058. btrfs_item_key_to_cpu(eb, &key, slot);
  1059. bytenr = key.objectid;
  1060. if (key.type == BTRFS_METADATA_ITEM_KEY ||
  1061. key.type == BTRFS_TREE_BLOCK_REF_KEY ||
  1062. key.type == BTRFS_SHARED_BLOCK_REF_KEY)
  1063. len = eb->fs_info->nodesize;
  1064. else
  1065. len = key.offset;
  1066. va_start(args, fmt);
  1067. vaf.fmt = fmt;
  1068. vaf.va = &args;
  1069. btrfs_crit(eb->fs_info,
  1070. "corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV",
  1071. btrfs_header_level(eb) == 0 ? "leaf" : "node",
  1072. eb->start, slot, bytenr, len, &vaf);
  1073. va_end(args);
  1074. }
  1075. static int check_extent_item(struct extent_buffer *leaf,
  1076. struct btrfs_key *key, int slot)
  1077. {
  1078. struct btrfs_fs_info *fs_info = leaf->fs_info;
  1079. struct btrfs_extent_item *ei;
  1080. bool is_tree_block = false;
  1081. unsigned long ptr; /* Current pointer inside inline refs */
  1082. unsigned long end; /* Extent item end */
  1083. const u32 item_size = btrfs_item_size_nr(leaf, slot);
  1084. u64 flags;
  1085. u64 generation;
  1086. u64 total_refs; /* Total refs in btrfs_extent_item */
  1087. u64 inline_refs = 0; /* found total inline refs */
  1088. if (key->type == BTRFS_METADATA_ITEM_KEY &&
  1089. !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
  1090. generic_err(leaf, slot,
  1091. "invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled");
  1092. return -EUCLEAN;
  1093. }
  1094. /* key->objectid is the bytenr for both key types */
  1095. if (!IS_ALIGNED(key->objectid, fs_info->sectorsize)) {
  1096. generic_err(leaf, slot,
  1097. "invalid key objectid, have %llu expect to be aligned to %u",
  1098. key->objectid, fs_info->sectorsize);
  1099. return -EUCLEAN;
  1100. }
  1101. /* key->offset is tree level for METADATA_ITEM_KEY */
  1102. if (key->type == BTRFS_METADATA_ITEM_KEY &&
  1103. key->offset >= BTRFS_MAX_LEVEL) {
  1104. extent_err(leaf, slot,
  1105. "invalid tree level, have %llu expect [0, %u]",
  1106. key->offset, BTRFS_MAX_LEVEL - 1);
  1107. return -EUCLEAN;
  1108. }
  1109. /*
  1110. * EXTENT/METADATA_ITEM consists of:
  1111. * 1) One btrfs_extent_item
  1112. * Records the total refs, type and generation of the extent.
  1113. *
  1114. * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only)
  1115. * Records the first key and level of the tree block.
  1116. *
  1117. * 2) Zero or more btrfs_extent_inline_ref(s)
  1118. * Each inline ref has one btrfs_extent_inline_ref shows:
  1119. * 2.1) The ref type, one of the 4
  1120. * TREE_BLOCK_REF Tree block only
  1121. * SHARED_BLOCK_REF Tree block only
  1122. * EXTENT_DATA_REF Data only
  1123. * SHARED_DATA_REF Data only
  1124. * 2.2) Ref type specific data
  1125. * Either using btrfs_extent_inline_ref::offset, or specific
  1126. * data structure.
  1127. */
  1128. if (item_size < sizeof(*ei)) {
  1129. extent_err(leaf, slot,
  1130. "invalid item size, have %u expect [%zu, %u)",
  1131. item_size, sizeof(*ei),
  1132. BTRFS_LEAF_DATA_SIZE(fs_info));
  1133. return -EUCLEAN;
  1134. }
  1135. end = item_size + btrfs_item_ptr_offset(leaf, slot);
  1136. /* Checks against extent_item */
  1137. ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
  1138. flags = btrfs_extent_flags(leaf, ei);
  1139. total_refs = btrfs_extent_refs(leaf, ei);
  1140. generation = btrfs_extent_generation(leaf, ei);
  1141. if (generation > btrfs_super_generation(fs_info->super_copy) + 1) {
  1142. extent_err(leaf, slot,
  1143. "invalid generation, have %llu expect (0, %llu]",
  1144. generation,
  1145. btrfs_super_generation(fs_info->super_copy) + 1);
  1146. return -EUCLEAN;
  1147. }
  1148. if (!has_single_bit_set(flags & (BTRFS_EXTENT_FLAG_DATA |
  1149. BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
  1150. extent_err(leaf, slot,
  1151. "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx",
  1152. flags, BTRFS_EXTENT_FLAG_DATA |
  1153. BTRFS_EXTENT_FLAG_TREE_BLOCK);
  1154. return -EUCLEAN;
  1155. }
  1156. is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK);
  1157. if (is_tree_block) {
  1158. if (key->type == BTRFS_EXTENT_ITEM_KEY &&
  1159. key->offset != fs_info->nodesize) {
  1160. extent_err(leaf, slot,
  1161. "invalid extent length, have %llu expect %u",
  1162. key->offset, fs_info->nodesize);
  1163. return -EUCLEAN;
  1164. }
  1165. } else {
  1166. if (key->type != BTRFS_EXTENT_ITEM_KEY) {
  1167. extent_err(leaf, slot,
  1168. "invalid key type, have %u expect %u for data backref",
  1169. key->type, BTRFS_EXTENT_ITEM_KEY);
  1170. return -EUCLEAN;
  1171. }
  1172. if (!IS_ALIGNED(key->offset, fs_info->sectorsize)) {
  1173. extent_err(leaf, slot,
  1174. "invalid extent length, have %llu expect aligned to %u",
  1175. key->offset, fs_info->sectorsize);
  1176. return -EUCLEAN;
  1177. }
  1178. }
  1179. ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1);
  1180. /* Check the special case of btrfs_tree_block_info */
  1181. if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) {
  1182. struct btrfs_tree_block_info *info;
  1183. info = (struct btrfs_tree_block_info *)ptr;
  1184. if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) {
  1185. extent_err(leaf, slot,
  1186. "invalid tree block info level, have %u expect [0, %u]",
  1187. btrfs_tree_block_level(leaf, info),
  1188. BTRFS_MAX_LEVEL - 1);
  1189. return -EUCLEAN;
  1190. }
  1191. ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1);
  1192. }
  1193. /* Check inline refs */
  1194. while (ptr < end) {
  1195. struct btrfs_extent_inline_ref *iref;
  1196. struct btrfs_extent_data_ref *dref;
  1197. struct btrfs_shared_data_ref *sref;
  1198. u64 dref_offset;
  1199. u64 inline_offset;
  1200. u8 inline_type;
  1201. if (ptr + sizeof(*iref) > end) {
  1202. extent_err(leaf, slot,
  1203. "inline ref item overflows extent item, ptr %lu iref size %zu end %lu",
  1204. ptr, sizeof(*iref), end);
  1205. return -EUCLEAN;
  1206. }
  1207. iref = (struct btrfs_extent_inline_ref *)ptr;
  1208. inline_type = btrfs_extent_inline_ref_type(leaf, iref);
  1209. inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
  1210. if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) {
  1211. extent_err(leaf, slot,
  1212. "inline ref item overflows extent item, ptr %lu iref size %u end %lu",
  1213. ptr, inline_type, end);
  1214. return -EUCLEAN;
  1215. }
  1216. switch (inline_type) {
  1217. /* inline_offset is subvolid of the owner, no need to check */
  1218. case BTRFS_TREE_BLOCK_REF_KEY:
  1219. inline_refs++;
  1220. break;
  1221. /* Contains parent bytenr */
  1222. case BTRFS_SHARED_BLOCK_REF_KEY:
  1223. if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
  1224. extent_err(leaf, slot,
  1225. "invalid tree parent bytenr, have %llu expect aligned to %u",
  1226. inline_offset, fs_info->sectorsize);
  1227. return -EUCLEAN;
  1228. }
  1229. inline_refs++;
  1230. break;
  1231. /*
  1232. * Contains owner subvolid, owner key objectid, adjusted offset.
  1233. * The only obvious corruption can happen in that offset.
  1234. */
  1235. case BTRFS_EXTENT_DATA_REF_KEY:
  1236. dref = (struct btrfs_extent_data_ref *)(&iref->offset);
  1237. dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
  1238. if (!IS_ALIGNED(dref_offset, fs_info->sectorsize)) {
  1239. extent_err(leaf, slot,
  1240. "invalid data ref offset, have %llu expect aligned to %u",
  1241. dref_offset, fs_info->sectorsize);
  1242. return -EUCLEAN;
  1243. }
  1244. inline_refs += btrfs_extent_data_ref_count(leaf, dref);
  1245. break;
  1246. /* Contains parent bytenr and ref count */
  1247. case BTRFS_SHARED_DATA_REF_KEY:
  1248. sref = (struct btrfs_shared_data_ref *)(iref + 1);
  1249. if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
  1250. extent_err(leaf, slot,
  1251. "invalid data parent bytenr, have %llu expect aligned to %u",
  1252. inline_offset, fs_info->sectorsize);
  1253. return -EUCLEAN;
  1254. }
  1255. inline_refs += btrfs_shared_data_ref_count(leaf, sref);
  1256. break;
  1257. default:
  1258. extent_err(leaf, slot, "unknown inline ref type: %u",
  1259. inline_type);
  1260. return -EUCLEAN;
  1261. }
  1262. ptr += btrfs_extent_inline_ref_size(inline_type);
  1263. }
  1264. /* No padding is allowed */
  1265. if (ptr != end) {
  1266. extent_err(leaf, slot,
  1267. "invalid extent item size, padding bytes found");
  1268. return -EUCLEAN;
  1269. }
  1270. /* Finally, check the inline refs against total refs */
  1271. if (inline_refs > total_refs) {
  1272. extent_err(leaf, slot,
  1273. "invalid extent refs, have %llu expect >= inline %llu",
  1274. total_refs, inline_refs);
  1275. return -EUCLEAN;
  1276. }
  1277. return 0;
  1278. }
  1279. static int check_simple_keyed_refs(struct extent_buffer *leaf,
  1280. struct btrfs_key *key, int slot)
  1281. {
  1282. u32 expect_item_size = 0;
  1283. if (key->type == BTRFS_SHARED_DATA_REF_KEY)
  1284. expect_item_size = sizeof(struct btrfs_shared_data_ref);
  1285. if (btrfs_item_size_nr(leaf, slot) != expect_item_size) {
  1286. generic_err(leaf, slot,
  1287. "invalid item size, have %u expect %u for key type %u",
  1288. btrfs_item_size_nr(leaf, slot),
  1289. expect_item_size, key->type);
  1290. return -EUCLEAN;
  1291. }
  1292. if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
  1293. generic_err(leaf, slot,
  1294. "invalid key objectid for shared block ref, have %llu expect aligned to %u",
  1295. key->objectid, leaf->fs_info->sectorsize);
  1296. return -EUCLEAN;
  1297. }
  1298. if (key->type != BTRFS_TREE_BLOCK_REF_KEY &&
  1299. !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize)) {
  1300. extent_err(leaf, slot,
  1301. "invalid tree parent bytenr, have %llu expect aligned to %u",
  1302. key->offset, leaf->fs_info->sectorsize);
  1303. return -EUCLEAN;
  1304. }
  1305. return 0;
  1306. }
  1307. static int check_extent_data_ref(struct extent_buffer *leaf,
  1308. struct btrfs_key *key, int slot)
  1309. {
  1310. struct btrfs_extent_data_ref *dref;
  1311. unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
  1312. const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot);
  1313. if (btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0) {
  1314. generic_err(leaf, slot,
  1315. "invalid item size, have %u expect aligned to %zu for key type %u",
  1316. btrfs_item_size_nr(leaf, slot),
  1317. sizeof(*dref), key->type);
  1318. return -EUCLEAN;
  1319. }
  1320. if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
  1321. generic_err(leaf, slot,
  1322. "invalid key objectid for shared block ref, have %llu expect aligned to %u",
  1323. key->objectid, leaf->fs_info->sectorsize);
  1324. return -EUCLEAN;
  1325. }
  1326. for (; ptr < end; ptr += sizeof(*dref)) {
  1327. u64 offset;
  1328. /*
  1329. * We cannot check the extent_data_ref hash due to possible
  1330. * overflow from the leaf due to hash collisions.
  1331. */
  1332. dref = (struct btrfs_extent_data_ref *)ptr;
  1333. offset = btrfs_extent_data_ref_offset(leaf, dref);
  1334. if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
  1335. extent_err(leaf, slot,
  1336. "invalid extent data backref offset, have %llu expect aligned to %u",
  1337. offset, leaf->fs_info->sectorsize);
  1338. return -EUCLEAN;
  1339. }
  1340. }
  1341. return 0;
  1342. }
  1343. #define inode_ref_err(eb, slot, fmt, args...) \
  1344. inode_item_err(eb, slot, fmt, ##args)
  1345. static int check_inode_ref(struct extent_buffer *leaf,
  1346. struct btrfs_key *key, struct btrfs_key *prev_key,
  1347. int slot)
  1348. {
  1349. struct btrfs_inode_ref *iref;
  1350. unsigned long ptr;
  1351. unsigned long end;
  1352. if (!check_prev_ino(leaf, key, slot, prev_key))
  1353. return -EUCLEAN;
  1354. /* namelen can't be 0, so item_size == sizeof() is also invalid */
  1355. if (btrfs_item_size_nr(leaf, slot) <= sizeof(*iref)) {
  1356. inode_ref_err(leaf, slot,
  1357. "invalid item size, have %u expect (%zu, %u)",
  1358. btrfs_item_size_nr(leaf, slot),
  1359. sizeof(*iref), BTRFS_LEAF_DATA_SIZE(leaf->fs_info));
  1360. return -EUCLEAN;
  1361. }
  1362. ptr = btrfs_item_ptr_offset(leaf, slot);
  1363. end = ptr + btrfs_item_size_nr(leaf, slot);
  1364. while (ptr < end) {
  1365. u16 namelen;
  1366. if (ptr + sizeof(iref) > end) {
  1367. inode_ref_err(leaf, slot,
  1368. "inode ref overflow, ptr %lu end %lu inode_ref_size %zu",
  1369. ptr, end, sizeof(iref));
  1370. return -EUCLEAN;
  1371. }
  1372. iref = (struct btrfs_inode_ref *)ptr;
  1373. namelen = btrfs_inode_ref_name_len(leaf, iref);
  1374. if (ptr + sizeof(*iref) + namelen > end) {
  1375. inode_ref_err(leaf, slot,
  1376. "inode ref overflow, ptr %lu end %lu namelen %u",
  1377. ptr, end, namelen);
  1378. return -EUCLEAN;
  1379. }
  1380. /*
  1381. * NOTE: In theory we should record all found index numbers
  1382. * to find any duplicated indexes, but that will be too time
  1383. * consuming for inodes with too many hard links.
  1384. */
  1385. ptr += sizeof(*iref) + namelen;
  1386. }
  1387. return 0;
  1388. }
  1389. /*
  1390. * Common point to switch the item-specific validation.
  1391. */
  1392. static int check_leaf_item(struct extent_buffer *leaf,
  1393. struct btrfs_key *key, int slot,
  1394. struct btrfs_key *prev_key)
  1395. {
  1396. int ret = 0;
  1397. struct btrfs_chunk *chunk;
  1398. switch (key->type) {
  1399. case BTRFS_EXTENT_DATA_KEY:
  1400. ret = check_extent_data_item(leaf, key, slot, prev_key);
  1401. break;
  1402. case BTRFS_EXTENT_CSUM_KEY:
  1403. ret = check_csum_item(leaf, key, slot, prev_key);
  1404. break;
  1405. case BTRFS_DIR_ITEM_KEY:
  1406. case BTRFS_DIR_INDEX_KEY:
  1407. case BTRFS_XATTR_ITEM_KEY:
  1408. ret = check_dir_item(leaf, key, prev_key, slot);
  1409. break;
  1410. case BTRFS_INODE_REF_KEY:
  1411. ret = check_inode_ref(leaf, key, prev_key, slot);
  1412. break;
  1413. case BTRFS_BLOCK_GROUP_ITEM_KEY:
  1414. ret = check_block_group_item(leaf, key, slot);
  1415. break;
  1416. case BTRFS_CHUNK_ITEM_KEY:
  1417. chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
  1418. ret = check_leaf_chunk_item(leaf, chunk, key, slot);
  1419. break;
  1420. case BTRFS_DEV_ITEM_KEY:
  1421. ret = check_dev_item(leaf, key, slot);
  1422. break;
  1423. case BTRFS_INODE_ITEM_KEY:
  1424. ret = check_inode_item(leaf, key, slot);
  1425. break;
  1426. case BTRFS_ROOT_ITEM_KEY:
  1427. ret = check_root_item(leaf, key, slot);
  1428. break;
  1429. case BTRFS_EXTENT_ITEM_KEY:
  1430. case BTRFS_METADATA_ITEM_KEY:
  1431. ret = check_extent_item(leaf, key, slot);
  1432. break;
  1433. case BTRFS_TREE_BLOCK_REF_KEY:
  1434. case BTRFS_SHARED_DATA_REF_KEY:
  1435. case BTRFS_SHARED_BLOCK_REF_KEY:
  1436. ret = check_simple_keyed_refs(leaf, key, slot);
  1437. break;
  1438. case BTRFS_EXTENT_DATA_REF_KEY:
  1439. ret = check_extent_data_ref(leaf, key, slot);
  1440. break;
  1441. }
  1442. return ret;
  1443. }
  1444. static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
  1445. {
  1446. struct btrfs_fs_info *fs_info = leaf->fs_info;
  1447. /* No valid key type is 0, so all key should be larger than this key */
  1448. struct btrfs_key prev_key = {0, 0, 0};
  1449. struct btrfs_key key;
  1450. u32 nritems = btrfs_header_nritems(leaf);
  1451. int slot;
  1452. if (btrfs_header_level(leaf) != 0) {
  1453. generic_err(leaf, 0,
  1454. "invalid level for leaf, have %d expect 0",
  1455. btrfs_header_level(leaf));
  1456. return -EUCLEAN;
  1457. }
  1458. /*
  1459. * Extent buffers from a relocation tree have a owner field that
  1460. * corresponds to the subvolume tree they are based on. So just from an
  1461. * extent buffer alone we can not find out what is the id of the
  1462. * corresponding subvolume tree, so we can not figure out if the extent
  1463. * buffer corresponds to the root of the relocation tree or not. So
  1464. * skip this check for relocation trees.
  1465. */
  1466. if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
  1467. u64 owner = btrfs_header_owner(leaf);
  1468. /* These trees must never be empty */
  1469. if (owner == BTRFS_ROOT_TREE_OBJECTID ||
  1470. owner == BTRFS_CHUNK_TREE_OBJECTID ||
  1471. owner == BTRFS_EXTENT_TREE_OBJECTID ||
  1472. owner == BTRFS_DEV_TREE_OBJECTID ||
  1473. owner == BTRFS_FS_TREE_OBJECTID ||
  1474. owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
  1475. generic_err(leaf, 0,
  1476. "invalid root, root %llu must never be empty",
  1477. owner);
  1478. return -EUCLEAN;
  1479. }
  1480. /* Unknown tree */
  1481. if (owner == 0) {
  1482. generic_err(leaf, 0,
  1483. "invalid owner, root 0 is not defined");
  1484. return -EUCLEAN;
  1485. }
  1486. return 0;
  1487. }
  1488. if (nritems == 0)
  1489. return 0;
  1490. /*
  1491. * Check the following things to make sure this is a good leaf, and
  1492. * leaf users won't need to bother with similar sanity checks:
  1493. *
  1494. * 1) key ordering
  1495. * 2) item offset and size
  1496. * No overlap, no hole, all inside the leaf.
  1497. * 3) item content
  1498. * If possible, do comprehensive sanity check.
  1499. * NOTE: All checks must only rely on the item data itself.
  1500. */
  1501. for (slot = 0; slot < nritems; slot++) {
  1502. u32 item_end_expected;
  1503. int ret;
  1504. btrfs_item_key_to_cpu(leaf, &key, slot);
  1505. /* Make sure the keys are in the right order */
  1506. if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
  1507. generic_err(leaf, slot,
  1508. "bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
  1509. prev_key.objectid, prev_key.type,
  1510. prev_key.offset, key.objectid, key.type,
  1511. key.offset);
  1512. return -EUCLEAN;
  1513. }
  1514. /*
  1515. * Make sure the offset and ends are right, remember that the
  1516. * item data starts at the end of the leaf and grows towards the
  1517. * front.
  1518. */
  1519. if (slot == 0)
  1520. item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info);
  1521. else
  1522. item_end_expected = btrfs_item_offset_nr(leaf,
  1523. slot - 1);
  1524. if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
  1525. generic_err(leaf, slot,
  1526. "unexpected item end, have %u expect %u",
  1527. btrfs_item_end_nr(leaf, slot),
  1528. item_end_expected);
  1529. return -EUCLEAN;
  1530. }
  1531. /*
  1532. * Check to make sure that we don't point outside of the leaf,
  1533. * just in case all the items are consistent to each other, but
  1534. * all point outside of the leaf.
  1535. */
  1536. if (btrfs_item_end_nr(leaf, slot) >
  1537. BTRFS_LEAF_DATA_SIZE(fs_info)) {
  1538. generic_err(leaf, slot,
  1539. "slot end outside of leaf, have %u expect range [0, %u]",
  1540. btrfs_item_end_nr(leaf, slot),
  1541. BTRFS_LEAF_DATA_SIZE(fs_info));
  1542. return -EUCLEAN;
  1543. }
  1544. /* Also check if the item pointer overlaps with btrfs item. */
  1545. if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
  1546. btrfs_item_ptr_offset(leaf, slot)) {
  1547. generic_err(leaf, slot,
  1548. "slot overlaps with its data, item end %lu data start %lu",
  1549. btrfs_item_nr_offset(slot) +
  1550. sizeof(struct btrfs_item),
  1551. btrfs_item_ptr_offset(leaf, slot));
  1552. return -EUCLEAN;
  1553. }
  1554. if (check_item_data) {
  1555. /*
  1556. * Check if the item size and content meet other
  1557. * criteria
  1558. */
  1559. ret = check_leaf_item(leaf, &key, slot, &prev_key);
  1560. if (ret < 0)
  1561. return ret;
  1562. }
  1563. prev_key.objectid = key.objectid;
  1564. prev_key.type = key.type;
  1565. prev_key.offset = key.offset;
  1566. }
  1567. return 0;
  1568. }
  1569. int btrfs_check_leaf_full(struct extent_buffer *leaf)
  1570. {
  1571. return check_leaf(leaf, true);
  1572. }
  1573. ALLOW_ERROR_INJECTION(btrfs_check_leaf_full, ERRNO);
  1574. int btrfs_check_leaf_relaxed(struct extent_buffer *leaf)
  1575. {
  1576. return check_leaf(leaf, false);
  1577. }
  1578. int btrfs_check_node(struct extent_buffer *node)
  1579. {
  1580. struct btrfs_fs_info *fs_info = node->fs_info;
  1581. unsigned long nr = btrfs_header_nritems(node);
  1582. struct btrfs_key key, next_key;
  1583. int slot;
  1584. int level = btrfs_header_level(node);
  1585. u64 bytenr;
  1586. int ret = 0;
  1587. if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
  1588. generic_err(node, 0,
  1589. "invalid level for node, have %d expect [1, %d]",
  1590. level, BTRFS_MAX_LEVEL - 1);
  1591. return -EUCLEAN;
  1592. }
  1593. if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
  1594. btrfs_crit(fs_info,
  1595. "corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
  1596. btrfs_header_owner(node), node->start,
  1597. nr == 0 ? "small" : "large", nr,
  1598. BTRFS_NODEPTRS_PER_BLOCK(fs_info));
  1599. return -EUCLEAN;
  1600. }
  1601. for (slot = 0; slot < nr - 1; slot++) {
  1602. bytenr = btrfs_node_blockptr(node, slot);
  1603. btrfs_node_key_to_cpu(node, &key, slot);
  1604. btrfs_node_key_to_cpu(node, &next_key, slot + 1);
  1605. if (!bytenr) {
  1606. generic_err(node, slot,
  1607. "invalid NULL node pointer");
  1608. ret = -EUCLEAN;
  1609. goto out;
  1610. }
  1611. if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) {
  1612. generic_err(node, slot,
  1613. "unaligned pointer, have %llu should be aligned to %u",
  1614. bytenr, fs_info->sectorsize);
  1615. ret = -EUCLEAN;
  1616. goto out;
  1617. }
  1618. if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
  1619. generic_err(node, slot,
  1620. "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
  1621. key.objectid, key.type, key.offset,
  1622. next_key.objectid, next_key.type,
  1623. next_key.offset);
  1624. ret = -EUCLEAN;
  1625. goto out;
  1626. }
  1627. }
  1628. out:
  1629. return ret;
  1630. }
  1631. ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO);