extent_map.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* -*- mode: c; c-basic-offset: 8; -*-
  3. * vim: noexpandtab sw=8 ts=8 sts=0:
  4. *
  5. * extent_map.c
  6. *
  7. * Block/Cluster mapping functions
  8. *
  9. * Copyright (C) 2004 Oracle. All rights reserved.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/init.h>
  13. #include <linux/slab.h>
  14. #include <linux/types.h>
  15. #include <linux/fiemap.h>
  16. #include <cluster/masklog.h>
  17. #include "ocfs2.h"
  18. #include "alloc.h"
  19. #include "dlmglue.h"
  20. #include "extent_map.h"
  21. #include "inode.h"
  22. #include "super.h"
  23. #include "symlink.h"
  24. #include "aops.h"
  25. #include "ocfs2_trace.h"
  26. #include "buffer_head_io.h"
  27. /*
  28. * The extent caching implementation is intentionally trivial.
  29. *
  30. * We only cache a small number of extents stored directly on the
  31. * inode, so linear order operations are acceptable. If we ever want
  32. * to increase the size of the extent map, then these algorithms must
  33. * get smarter.
  34. */
  35. void ocfs2_extent_map_init(struct inode *inode)
  36. {
  37. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  38. oi->ip_extent_map.em_num_items = 0;
  39. INIT_LIST_HEAD(&oi->ip_extent_map.em_list);
  40. }
  41. static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
  42. unsigned int cpos,
  43. struct ocfs2_extent_map_item **ret_emi)
  44. {
  45. unsigned int range;
  46. struct ocfs2_extent_map_item *emi;
  47. *ret_emi = NULL;
  48. list_for_each_entry(emi, &em->em_list, ei_list) {
  49. range = emi->ei_cpos + emi->ei_clusters;
  50. if (cpos >= emi->ei_cpos && cpos < range) {
  51. list_move(&emi->ei_list, &em->em_list);
  52. *ret_emi = emi;
  53. break;
  54. }
  55. }
  56. }
  57. static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos,
  58. unsigned int *phys, unsigned int *len,
  59. unsigned int *flags)
  60. {
  61. unsigned int coff;
  62. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  63. struct ocfs2_extent_map_item *emi;
  64. spin_lock(&oi->ip_lock);
  65. __ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi);
  66. if (emi) {
  67. coff = cpos - emi->ei_cpos;
  68. *phys = emi->ei_phys + coff;
  69. if (len)
  70. *len = emi->ei_clusters - coff;
  71. if (flags)
  72. *flags = emi->ei_flags;
  73. }
  74. spin_unlock(&oi->ip_lock);
  75. if (emi == NULL)
  76. return -ENOENT;
  77. return 0;
  78. }
  79. /*
  80. * Forget about all clusters equal to or greater than cpos.
  81. */
  82. void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos)
  83. {
  84. struct ocfs2_extent_map_item *emi, *n;
  85. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  86. struct ocfs2_extent_map *em = &oi->ip_extent_map;
  87. LIST_HEAD(tmp_list);
  88. unsigned int range;
  89. spin_lock(&oi->ip_lock);
  90. list_for_each_entry_safe(emi, n, &em->em_list, ei_list) {
  91. if (emi->ei_cpos >= cpos) {
  92. /* Full truncate of this record. */
  93. list_move(&emi->ei_list, &tmp_list);
  94. BUG_ON(em->em_num_items == 0);
  95. em->em_num_items--;
  96. continue;
  97. }
  98. range = emi->ei_cpos + emi->ei_clusters;
  99. if (range > cpos) {
  100. /* Partial truncate */
  101. emi->ei_clusters = cpos - emi->ei_cpos;
  102. }
  103. }
  104. spin_unlock(&oi->ip_lock);
  105. list_for_each_entry_safe(emi, n, &tmp_list, ei_list) {
  106. list_del(&emi->ei_list);
  107. kfree(emi);
  108. }
  109. }
  110. /*
  111. * Is any part of emi2 contained within emi1
  112. */
  113. static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1,
  114. struct ocfs2_extent_map_item *emi2)
  115. {
  116. unsigned int range1, range2;
  117. /*
  118. * Check if logical start of emi2 is inside emi1
  119. */
  120. range1 = emi1->ei_cpos + emi1->ei_clusters;
  121. if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1)
  122. return 1;
  123. /*
  124. * Check if logical end of emi2 is inside emi1
  125. */
  126. range2 = emi2->ei_cpos + emi2->ei_clusters;
  127. if (range2 > emi1->ei_cpos && range2 <= range1)
  128. return 1;
  129. return 0;
  130. }
  131. static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest,
  132. struct ocfs2_extent_map_item *src)
  133. {
  134. dest->ei_cpos = src->ei_cpos;
  135. dest->ei_phys = src->ei_phys;
  136. dest->ei_clusters = src->ei_clusters;
  137. dest->ei_flags = src->ei_flags;
  138. }
  139. /*
  140. * Try to merge emi with ins. Returns 1 if merge succeeds, zero
  141. * otherwise.
  142. */
  143. static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
  144. struct ocfs2_extent_map_item *ins)
  145. {
  146. /*
  147. * Handle contiguousness
  148. */
  149. if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) &&
  150. ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) &&
  151. ins->ei_flags == emi->ei_flags) {
  152. emi->ei_clusters += ins->ei_clusters;
  153. return 1;
  154. } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
  155. (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
  156. ins->ei_flags == emi->ei_flags) {
  157. emi->ei_phys = ins->ei_phys;
  158. emi->ei_cpos = ins->ei_cpos;
  159. emi->ei_clusters += ins->ei_clusters;
  160. return 1;
  161. }
  162. /*
  163. * Overlapping extents - this shouldn't happen unless we've
  164. * split an extent to change it's flags. That is exceedingly
  165. * rare, so there's no sense in trying to optimize it yet.
  166. */
  167. if (ocfs2_ei_is_contained(emi, ins) ||
  168. ocfs2_ei_is_contained(ins, emi)) {
  169. ocfs2_copy_emi_fields(emi, ins);
  170. return 1;
  171. }
  172. /* No merge was possible. */
  173. return 0;
  174. }
  175. /*
  176. * In order to reduce complexity on the caller, this insert function
  177. * is intentionally liberal in what it will accept.
  178. *
  179. * The only rule is that the truncate call *must* be used whenever
  180. * records have been deleted. This avoids inserting overlapping
  181. * records with different physical mappings.
  182. */
  183. void ocfs2_extent_map_insert_rec(struct inode *inode,
  184. struct ocfs2_extent_rec *rec)
  185. {
  186. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  187. struct ocfs2_extent_map *em = &oi->ip_extent_map;
  188. struct ocfs2_extent_map_item *emi, *new_emi = NULL;
  189. struct ocfs2_extent_map_item ins;
  190. ins.ei_cpos = le32_to_cpu(rec->e_cpos);
  191. ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb,
  192. le64_to_cpu(rec->e_blkno));
  193. ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters);
  194. ins.ei_flags = rec->e_flags;
  195. search:
  196. spin_lock(&oi->ip_lock);
  197. list_for_each_entry(emi, &em->em_list, ei_list) {
  198. if (ocfs2_try_to_merge_extent_map(emi, &ins)) {
  199. list_move(&emi->ei_list, &em->em_list);
  200. spin_unlock(&oi->ip_lock);
  201. goto out;
  202. }
  203. }
  204. /*
  205. * No item could be merged.
  206. *
  207. * Either allocate and add a new item, or overwrite the last recently
  208. * inserted.
  209. */
  210. if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) {
  211. if (new_emi == NULL) {
  212. spin_unlock(&oi->ip_lock);
  213. new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS);
  214. if (new_emi == NULL)
  215. goto out;
  216. goto search;
  217. }
  218. ocfs2_copy_emi_fields(new_emi, &ins);
  219. list_add(&new_emi->ei_list, &em->em_list);
  220. em->em_num_items++;
  221. new_emi = NULL;
  222. } else {
  223. BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0);
  224. emi = list_entry(em->em_list.prev,
  225. struct ocfs2_extent_map_item, ei_list);
  226. list_move(&emi->ei_list, &em->em_list);
  227. ocfs2_copy_emi_fields(emi, &ins);
  228. }
  229. spin_unlock(&oi->ip_lock);
  230. out:
  231. kfree(new_emi);
  232. }
  233. static int ocfs2_last_eb_is_empty(struct inode *inode,
  234. struct ocfs2_dinode *di)
  235. {
  236. int ret, next_free;
  237. u64 last_eb_blk = le64_to_cpu(di->i_last_eb_blk);
  238. struct buffer_head *eb_bh = NULL;
  239. struct ocfs2_extent_block *eb;
  240. struct ocfs2_extent_list *el;
  241. ret = ocfs2_read_extent_block(INODE_CACHE(inode), last_eb_blk, &eb_bh);
  242. if (ret) {
  243. mlog_errno(ret);
  244. goto out;
  245. }
  246. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  247. el = &eb->h_list;
  248. if (el->l_tree_depth) {
  249. ocfs2_error(inode->i_sb,
  250. "Inode %lu has non zero tree depth in leaf block %llu\n",
  251. inode->i_ino,
  252. (unsigned long long)eb_bh->b_blocknr);
  253. ret = -EROFS;
  254. goto out;
  255. }
  256. next_free = le16_to_cpu(el->l_next_free_rec);
  257. if (next_free == 0 ||
  258. (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0])))
  259. ret = 1;
  260. out:
  261. brelse(eb_bh);
  262. return ret;
  263. }
  264. /*
  265. * Return the 1st index within el which contains an extent start
  266. * larger than v_cluster.
  267. */
  268. static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el,
  269. u32 v_cluster)
  270. {
  271. int i;
  272. struct ocfs2_extent_rec *rec;
  273. for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
  274. rec = &el->l_recs[i];
  275. if (v_cluster < le32_to_cpu(rec->e_cpos))
  276. break;
  277. }
  278. return i;
  279. }
  280. /*
  281. * Figure out the size of a hole which starts at v_cluster within the given
  282. * extent list.
  283. *
  284. * If there is no more allocation past v_cluster, we return the maximum
  285. * cluster size minus v_cluster.
  286. *
  287. * If we have in-inode extents, then el points to the dinode list and
  288. * eb_bh is NULL. Otherwise, eb_bh should point to the extent block
  289. * containing el.
  290. */
  291. int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci,
  292. struct ocfs2_extent_list *el,
  293. struct buffer_head *eb_bh,
  294. u32 v_cluster,
  295. u32 *num_clusters)
  296. {
  297. int ret, i;
  298. struct buffer_head *next_eb_bh = NULL;
  299. struct ocfs2_extent_block *eb, *next_eb;
  300. i = ocfs2_search_for_hole_index(el, v_cluster);
  301. if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) {
  302. eb = (struct ocfs2_extent_block *)eb_bh->b_data;
  303. /*
  304. * Check the next leaf for any extents.
  305. */
  306. if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL)
  307. goto no_more_extents;
  308. ret = ocfs2_read_extent_block(ci,
  309. le64_to_cpu(eb->h_next_leaf_blk),
  310. &next_eb_bh);
  311. if (ret) {
  312. mlog_errno(ret);
  313. goto out;
  314. }
  315. next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data;
  316. el = &next_eb->h_list;
  317. i = ocfs2_search_for_hole_index(el, v_cluster);
  318. }
  319. no_more_extents:
  320. if (i == le16_to_cpu(el->l_next_free_rec)) {
  321. /*
  322. * We're at the end of our existing allocation. Just
  323. * return the maximum number of clusters we could
  324. * possibly allocate.
  325. */
  326. *num_clusters = UINT_MAX - v_cluster;
  327. } else {
  328. *num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster;
  329. }
  330. ret = 0;
  331. out:
  332. brelse(next_eb_bh);
  333. return ret;
  334. }
  335. static int ocfs2_get_clusters_nocache(struct inode *inode,
  336. struct buffer_head *di_bh,
  337. u32 v_cluster, unsigned int *hole_len,
  338. struct ocfs2_extent_rec *ret_rec,
  339. unsigned int *is_last)
  340. {
  341. int i, ret, tree_height, len;
  342. struct ocfs2_dinode *di;
  343. struct ocfs2_extent_block *eb;
  344. struct ocfs2_extent_list *el;
  345. struct ocfs2_extent_rec *rec;
  346. struct buffer_head *eb_bh = NULL;
  347. memset(ret_rec, 0, sizeof(*ret_rec));
  348. if (is_last)
  349. *is_last = 0;
  350. di = (struct ocfs2_dinode *) di_bh->b_data;
  351. el = &di->id2.i_list;
  352. tree_height = le16_to_cpu(el->l_tree_depth);
  353. if (tree_height > 0) {
  354. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
  355. &eb_bh);
  356. if (ret) {
  357. mlog_errno(ret);
  358. goto out;
  359. }
  360. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  361. el = &eb->h_list;
  362. if (el->l_tree_depth) {
  363. ocfs2_error(inode->i_sb,
  364. "Inode %lu has non zero tree depth in leaf block %llu\n",
  365. inode->i_ino,
  366. (unsigned long long)eb_bh->b_blocknr);
  367. ret = -EROFS;
  368. goto out;
  369. }
  370. }
  371. i = ocfs2_search_extent_list(el, v_cluster);
  372. if (i == -1) {
  373. /*
  374. * Holes can be larger than the maximum size of an
  375. * extent, so we return their lengths in a separate
  376. * field.
  377. */
  378. if (hole_len) {
  379. ret = ocfs2_figure_hole_clusters(INODE_CACHE(inode),
  380. el, eb_bh,
  381. v_cluster, &len);
  382. if (ret) {
  383. mlog_errno(ret);
  384. goto out;
  385. }
  386. *hole_len = len;
  387. }
  388. goto out_hole;
  389. }
  390. rec = &el->l_recs[i];
  391. BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
  392. if (!rec->e_blkno) {
  393. ocfs2_error(inode->i_sb,
  394. "Inode %lu has bad extent record (%u, %u, 0)\n",
  395. inode->i_ino,
  396. le32_to_cpu(rec->e_cpos),
  397. ocfs2_rec_clusters(el, rec));
  398. ret = -EROFS;
  399. goto out;
  400. }
  401. *ret_rec = *rec;
  402. /*
  403. * Checking for last extent is potentially expensive - we
  404. * might have to look at the next leaf over to see if it's
  405. * empty.
  406. *
  407. * The first two checks are to see whether the caller even
  408. * cares for this information, and if the extent is at least
  409. * the last in it's list.
  410. *
  411. * If those hold true, then the extent is last if any of the
  412. * additional conditions hold true:
  413. * - Extent list is in-inode
  414. * - Extent list is right-most
  415. * - Extent list is 2nd to rightmost, with empty right-most
  416. */
  417. if (is_last) {
  418. if (i == (le16_to_cpu(el->l_next_free_rec) - 1)) {
  419. if (tree_height == 0)
  420. *is_last = 1;
  421. else if (eb->h_blkno == di->i_last_eb_blk)
  422. *is_last = 1;
  423. else if (eb->h_next_leaf_blk == di->i_last_eb_blk) {
  424. ret = ocfs2_last_eb_is_empty(inode, di);
  425. if (ret < 0) {
  426. mlog_errno(ret);
  427. goto out;
  428. }
  429. if (ret == 1)
  430. *is_last = 1;
  431. }
  432. }
  433. }
  434. out_hole:
  435. ret = 0;
  436. out:
  437. brelse(eb_bh);
  438. return ret;
  439. }
  440. static void ocfs2_relative_extent_offsets(struct super_block *sb,
  441. u32 v_cluster,
  442. struct ocfs2_extent_rec *rec,
  443. u32 *p_cluster, u32 *num_clusters)
  444. {
  445. u32 coff = v_cluster - le32_to_cpu(rec->e_cpos);
  446. *p_cluster = ocfs2_blocks_to_clusters(sb, le64_to_cpu(rec->e_blkno));
  447. *p_cluster = *p_cluster + coff;
  448. if (num_clusters)
  449. *num_clusters = le16_to_cpu(rec->e_leaf_clusters) - coff;
  450. }
  451. int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
  452. u32 *p_cluster, u32 *num_clusters,
  453. struct ocfs2_extent_list *el,
  454. unsigned int *extent_flags)
  455. {
  456. int ret = 0, i;
  457. struct buffer_head *eb_bh = NULL;
  458. struct ocfs2_extent_block *eb;
  459. struct ocfs2_extent_rec *rec;
  460. u32 coff;
  461. if (el->l_tree_depth) {
  462. ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
  463. &eb_bh);
  464. if (ret) {
  465. mlog_errno(ret);
  466. goto out;
  467. }
  468. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  469. el = &eb->h_list;
  470. if (el->l_tree_depth) {
  471. ocfs2_error(inode->i_sb,
  472. "Inode %lu has non zero tree depth in xattr leaf block %llu\n",
  473. inode->i_ino,
  474. (unsigned long long)eb_bh->b_blocknr);
  475. ret = -EROFS;
  476. goto out;
  477. }
  478. }
  479. i = ocfs2_search_extent_list(el, v_cluster);
  480. if (i == -1) {
  481. ret = -EROFS;
  482. mlog_errno(ret);
  483. goto out;
  484. } else {
  485. rec = &el->l_recs[i];
  486. BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
  487. if (!rec->e_blkno) {
  488. ocfs2_error(inode->i_sb,
  489. "Inode %lu has bad extent record (%u, %u, 0) in xattr\n",
  490. inode->i_ino,
  491. le32_to_cpu(rec->e_cpos),
  492. ocfs2_rec_clusters(el, rec));
  493. ret = -EROFS;
  494. goto out;
  495. }
  496. coff = v_cluster - le32_to_cpu(rec->e_cpos);
  497. *p_cluster = ocfs2_blocks_to_clusters(inode->i_sb,
  498. le64_to_cpu(rec->e_blkno));
  499. *p_cluster = *p_cluster + coff;
  500. if (num_clusters)
  501. *num_clusters = ocfs2_rec_clusters(el, rec) - coff;
  502. if (extent_flags)
  503. *extent_flags = rec->e_flags;
  504. }
  505. out:
  506. brelse(eb_bh);
  507. return ret;
  508. }
  509. int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
  510. u32 *p_cluster, u32 *num_clusters,
  511. unsigned int *extent_flags)
  512. {
  513. int ret;
  514. unsigned int hole_len, flags = 0;
  515. struct buffer_head *di_bh = NULL;
  516. struct ocfs2_extent_rec rec;
  517. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  518. ret = -ERANGE;
  519. mlog_errno(ret);
  520. goto out;
  521. }
  522. ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
  523. num_clusters, extent_flags);
  524. if (ret == 0)
  525. goto out;
  526. ret = ocfs2_read_inode_block(inode, &di_bh);
  527. if (ret) {
  528. mlog_errno(ret);
  529. goto out;
  530. }
  531. ret = ocfs2_get_clusters_nocache(inode, di_bh, v_cluster, &hole_len,
  532. &rec, NULL);
  533. if (ret) {
  534. mlog_errno(ret);
  535. goto out;
  536. }
  537. if (rec.e_blkno == 0ULL) {
  538. /*
  539. * A hole was found. Return some canned values that
  540. * callers can key on. If asked for, num_clusters will
  541. * be populated with the size of the hole.
  542. */
  543. *p_cluster = 0;
  544. if (num_clusters) {
  545. *num_clusters = hole_len;
  546. }
  547. } else {
  548. ocfs2_relative_extent_offsets(inode->i_sb, v_cluster, &rec,
  549. p_cluster, num_clusters);
  550. flags = rec.e_flags;
  551. ocfs2_extent_map_insert_rec(inode, &rec);
  552. }
  553. if (extent_flags)
  554. *extent_flags = flags;
  555. out:
  556. brelse(di_bh);
  557. return ret;
  558. }
  559. /*
  560. * This expects alloc_sem to be held. The allocation cannot change at
  561. * all while the map is in the process of being updated.
  562. */
  563. int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
  564. u64 *ret_count, unsigned int *extent_flags)
  565. {
  566. int ret;
  567. int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
  568. u32 cpos, num_clusters, p_cluster;
  569. u64 boff = 0;
  570. cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno);
  571. ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters,
  572. extent_flags);
  573. if (ret) {
  574. mlog_errno(ret);
  575. goto out;
  576. }
  577. /*
  578. * p_cluster == 0 indicates a hole.
  579. */
  580. if (p_cluster) {
  581. boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
  582. boff += (v_blkno & (u64)(bpc - 1));
  583. }
  584. *p_blkno = boff;
  585. if (ret_count) {
  586. *ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
  587. *ret_count -= v_blkno & (u64)(bpc - 1);
  588. }
  589. out:
  590. return ret;
  591. }
  592. /*
  593. * The ocfs2_fiemap_inline() may be a little bit misleading, since
  594. * it not only handles the fiemap for inlined files, but also deals
  595. * with the fast symlink, cause they have no difference for extent
  596. * mapping per se.
  597. */
  598. static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
  599. struct fiemap_extent_info *fieinfo,
  600. u64 map_start)
  601. {
  602. int ret;
  603. unsigned int id_count;
  604. struct ocfs2_dinode *di;
  605. u64 phys;
  606. u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
  607. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  608. di = (struct ocfs2_dinode *)di_bh->b_data;
  609. if (ocfs2_inode_is_fast_symlink(inode))
  610. id_count = ocfs2_fast_symlink_chars(inode->i_sb);
  611. else
  612. id_count = le16_to_cpu(di->id2.i_data.id_count);
  613. if (map_start < id_count) {
  614. phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits;
  615. if (ocfs2_inode_is_fast_symlink(inode))
  616. phys += offsetof(struct ocfs2_dinode, id2.i_symlink);
  617. else
  618. phys += offsetof(struct ocfs2_dinode,
  619. id2.i_data.id_data);
  620. ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
  621. flags);
  622. if (ret < 0)
  623. return ret;
  624. }
  625. return 0;
  626. }
  627. int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  628. u64 map_start, u64 map_len)
  629. {
  630. int ret, is_last;
  631. u32 mapping_end, cpos;
  632. unsigned int hole_size;
  633. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  634. u64 len_bytes, phys_bytes, virt_bytes;
  635. struct buffer_head *di_bh = NULL;
  636. struct ocfs2_extent_rec rec;
  637. ret = fiemap_prep(inode, fieinfo, map_start, &map_len, 0);
  638. if (ret)
  639. return ret;
  640. ret = ocfs2_inode_lock(inode, &di_bh, 0);
  641. if (ret) {
  642. mlog_errno(ret);
  643. goto out;
  644. }
  645. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  646. /*
  647. * Handle inline-data and fast symlink separately.
  648. */
  649. if ((OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
  650. ocfs2_inode_is_fast_symlink(inode)) {
  651. ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start);
  652. goto out_unlock;
  653. }
  654. cpos = map_start >> osb->s_clustersize_bits;
  655. mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
  656. map_start + map_len);
  657. is_last = 0;
  658. while (cpos < mapping_end && !is_last) {
  659. u32 fe_flags;
  660. ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
  661. &hole_size, &rec, &is_last);
  662. if (ret) {
  663. mlog_errno(ret);
  664. goto out_unlock;
  665. }
  666. if (rec.e_blkno == 0ULL) {
  667. cpos += hole_size;
  668. continue;
  669. }
  670. fe_flags = 0;
  671. if (rec.e_flags & OCFS2_EXT_UNWRITTEN)
  672. fe_flags |= FIEMAP_EXTENT_UNWRITTEN;
  673. if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
  674. fe_flags |= FIEMAP_EXTENT_SHARED;
  675. if (is_last)
  676. fe_flags |= FIEMAP_EXTENT_LAST;
  677. len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
  678. phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
  679. virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
  680. ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
  681. len_bytes, fe_flags);
  682. if (ret)
  683. break;
  684. cpos = le32_to_cpu(rec.e_cpos)+ le16_to_cpu(rec.e_leaf_clusters);
  685. }
  686. if (ret > 0)
  687. ret = 0;
  688. out_unlock:
  689. brelse(di_bh);
  690. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  691. ocfs2_inode_unlock(inode, 0);
  692. out:
  693. return ret;
  694. }
  695. /* Is IO overwriting allocated blocks? */
  696. int ocfs2_overwrite_io(struct inode *inode, struct buffer_head *di_bh,
  697. u64 map_start, u64 map_len)
  698. {
  699. int ret = 0, is_last;
  700. u32 mapping_end, cpos;
  701. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  702. struct ocfs2_extent_rec rec;
  703. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  704. if (ocfs2_size_fits_inline_data(di_bh, map_start + map_len))
  705. return ret;
  706. else
  707. return -EAGAIN;
  708. }
  709. cpos = map_start >> osb->s_clustersize_bits;
  710. mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
  711. map_start + map_len);
  712. is_last = 0;
  713. while (cpos < mapping_end && !is_last) {
  714. ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
  715. NULL, &rec, &is_last);
  716. if (ret) {
  717. mlog_errno(ret);
  718. goto out;
  719. }
  720. if (rec.e_blkno == 0ULL)
  721. break;
  722. if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
  723. break;
  724. cpos = le32_to_cpu(rec.e_cpos) +
  725. le16_to_cpu(rec.e_leaf_clusters);
  726. }
  727. if (cpos < mapping_end)
  728. ret = -EAGAIN;
  729. out:
  730. return ret;
  731. }
  732. int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int whence)
  733. {
  734. struct inode *inode = file->f_mapping->host;
  735. int ret;
  736. unsigned int is_last = 0, is_data = 0;
  737. u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
  738. u32 cpos, cend, clen, hole_size;
  739. u64 extoff, extlen;
  740. struct buffer_head *di_bh = NULL;
  741. struct ocfs2_extent_rec rec;
  742. BUG_ON(whence != SEEK_DATA && whence != SEEK_HOLE);
  743. ret = ocfs2_inode_lock(inode, &di_bh, 0);
  744. if (ret) {
  745. mlog_errno(ret);
  746. goto out;
  747. }
  748. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  749. if (*offset >= i_size_read(inode)) {
  750. ret = -ENXIO;
  751. goto out_unlock;
  752. }
  753. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  754. if (whence == SEEK_HOLE)
  755. *offset = i_size_read(inode);
  756. goto out_unlock;
  757. }
  758. clen = 0;
  759. cpos = *offset >> cs_bits;
  760. cend = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
  761. while (cpos < cend && !is_last) {
  762. ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
  763. &rec, &is_last);
  764. if (ret) {
  765. mlog_errno(ret);
  766. goto out_unlock;
  767. }
  768. extoff = cpos;
  769. extoff <<= cs_bits;
  770. if (rec.e_blkno == 0ULL) {
  771. clen = hole_size;
  772. is_data = 0;
  773. } else {
  774. clen = le16_to_cpu(rec.e_leaf_clusters) -
  775. (cpos - le32_to_cpu(rec.e_cpos));
  776. is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1;
  777. }
  778. if ((!is_data && whence == SEEK_HOLE) ||
  779. (is_data && whence == SEEK_DATA)) {
  780. if (extoff > *offset)
  781. *offset = extoff;
  782. goto out_unlock;
  783. }
  784. if (!is_last)
  785. cpos += clen;
  786. }
  787. if (whence == SEEK_HOLE) {
  788. extoff = cpos;
  789. extoff <<= cs_bits;
  790. extlen = clen;
  791. extlen <<= cs_bits;
  792. if ((extoff + extlen) > i_size_read(inode))
  793. extlen = i_size_read(inode) - extoff;
  794. extoff += extlen;
  795. if (extoff > *offset)
  796. *offset = extoff;
  797. goto out_unlock;
  798. }
  799. ret = -ENXIO;
  800. out_unlock:
  801. brelse(di_bh);
  802. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  803. ocfs2_inode_unlock(inode, 0);
  804. out:
  805. return ret;
  806. }
  807. int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
  808. struct buffer_head *bhs[], int flags,
  809. int (*validate)(struct super_block *sb,
  810. struct buffer_head *bh))
  811. {
  812. int rc = 0;
  813. u64 p_block, p_count;
  814. int i, count, done = 0;
  815. trace_ocfs2_read_virt_blocks(
  816. inode, (unsigned long long)v_block, nr, bhs, flags,
  817. validate);
  818. if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >=
  819. i_size_read(inode)) {
  820. BUG_ON(!(flags & OCFS2_BH_READAHEAD));
  821. goto out;
  822. }
  823. while (done < nr) {
  824. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  825. rc = ocfs2_extent_map_get_blocks(inode, v_block + done,
  826. &p_block, &p_count, NULL);
  827. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  828. if (rc) {
  829. mlog_errno(rc);
  830. break;
  831. }
  832. if (!p_block) {
  833. rc = -EIO;
  834. mlog(ML_ERROR,
  835. "Inode #%llu contains a hole at offset %llu\n",
  836. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  837. (unsigned long long)(v_block + done) <<
  838. inode->i_sb->s_blocksize_bits);
  839. break;
  840. }
  841. count = nr - done;
  842. if (p_count < count)
  843. count = p_count;
  844. /*
  845. * If the caller passed us bhs, they should have come
  846. * from a previous readahead call to this function. Thus,
  847. * they should have the right b_blocknr.
  848. */
  849. for (i = 0; i < count; i++) {
  850. if (!bhs[done + i])
  851. continue;
  852. BUG_ON(bhs[done + i]->b_blocknr != (p_block + i));
  853. }
  854. rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, count,
  855. bhs + done, flags, validate);
  856. if (rc) {
  857. mlog_errno(rc);
  858. break;
  859. }
  860. done += count;
  861. }
  862. out:
  863. return rc;
  864. }