file.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* -*- mode: c; c-basic-offset: 8; -*-
  3. * vim: noexpandtab sw=8 ts=8 sts=0:
  4. *
  5. * file.c
  6. *
  7. * File open, close, extend, truncate
  8. *
  9. * Copyright (C) 2002, 2004 Oracle. All rights reserved.
  10. */
  11. #include <linux/capability.h>
  12. #include <linux/fs.h>
  13. #include <linux/types.h>
  14. #include <linux/slab.h>
  15. #include <linux/highmem.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/uio.h>
  18. #include <linux/sched.h>
  19. #include <linux/splice.h>
  20. #include <linux/mount.h>
  21. #include <linux/writeback.h>
  22. #include <linux/falloc.h>
  23. #include <linux/quotaops.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/backing-dev.h>
  26. #include <cluster/masklog.h>
  27. #include "ocfs2.h"
  28. #include "alloc.h"
  29. #include "aops.h"
  30. #include "dir.h"
  31. #include "dlmglue.h"
  32. #include "extent_map.h"
  33. #include "file.h"
  34. #include "sysfile.h"
  35. #include "inode.h"
  36. #include "ioctl.h"
  37. #include "journal.h"
  38. #include "locks.h"
  39. #include "mmap.h"
  40. #include "suballoc.h"
  41. #include "super.h"
  42. #include "xattr.h"
  43. #include "acl.h"
  44. #include "quota.h"
  45. #include "refcounttree.h"
  46. #include "ocfs2_trace.h"
  47. #include "buffer_head_io.h"
  48. static int ocfs2_init_file_private(struct inode *inode, struct file *file)
  49. {
  50. struct ocfs2_file_private *fp;
  51. fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
  52. if (!fp)
  53. return -ENOMEM;
  54. fp->fp_file = file;
  55. mutex_init(&fp->fp_mutex);
  56. ocfs2_file_lock_res_init(&fp->fp_flock, fp);
  57. file->private_data = fp;
  58. return 0;
  59. }
  60. static void ocfs2_free_file_private(struct inode *inode, struct file *file)
  61. {
  62. struct ocfs2_file_private *fp = file->private_data;
  63. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  64. if (fp) {
  65. ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
  66. ocfs2_lock_res_free(&fp->fp_flock);
  67. kfree(fp);
  68. file->private_data = NULL;
  69. }
  70. }
  71. static int ocfs2_file_open(struct inode *inode, struct file *file)
  72. {
  73. int status;
  74. int mode = file->f_flags;
  75. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  76. trace_ocfs2_file_open(inode, file, file->f_path.dentry,
  77. (unsigned long long)oi->ip_blkno,
  78. file->f_path.dentry->d_name.len,
  79. file->f_path.dentry->d_name.name, mode);
  80. if (file->f_mode & FMODE_WRITE) {
  81. status = dquot_initialize(inode);
  82. if (status)
  83. goto leave;
  84. }
  85. spin_lock(&oi->ip_lock);
  86. /* Check that the inode hasn't been wiped from disk by another
  87. * node. If it hasn't then we're safe as long as we hold the
  88. * spin lock until our increment of open count. */
  89. if (oi->ip_flags & OCFS2_INODE_DELETED) {
  90. spin_unlock(&oi->ip_lock);
  91. status = -ENOENT;
  92. goto leave;
  93. }
  94. if (mode & O_DIRECT)
  95. oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
  96. oi->ip_open_count++;
  97. spin_unlock(&oi->ip_lock);
  98. status = ocfs2_init_file_private(inode, file);
  99. if (status) {
  100. /*
  101. * We want to set open count back if we're failing the
  102. * open.
  103. */
  104. spin_lock(&oi->ip_lock);
  105. oi->ip_open_count--;
  106. spin_unlock(&oi->ip_lock);
  107. }
  108. file->f_mode |= FMODE_NOWAIT;
  109. leave:
  110. return status;
  111. }
  112. static int ocfs2_file_release(struct inode *inode, struct file *file)
  113. {
  114. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  115. spin_lock(&oi->ip_lock);
  116. if (!--oi->ip_open_count)
  117. oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
  118. trace_ocfs2_file_release(inode, file, file->f_path.dentry,
  119. oi->ip_blkno,
  120. file->f_path.dentry->d_name.len,
  121. file->f_path.dentry->d_name.name,
  122. oi->ip_open_count);
  123. spin_unlock(&oi->ip_lock);
  124. ocfs2_free_file_private(inode, file);
  125. return 0;
  126. }
  127. static int ocfs2_dir_open(struct inode *inode, struct file *file)
  128. {
  129. return ocfs2_init_file_private(inode, file);
  130. }
  131. static int ocfs2_dir_release(struct inode *inode, struct file *file)
  132. {
  133. ocfs2_free_file_private(inode, file);
  134. return 0;
  135. }
  136. static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
  137. int datasync)
  138. {
  139. int err = 0;
  140. struct inode *inode = file->f_mapping->host;
  141. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  142. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  143. journal_t *journal = osb->journal->j_journal;
  144. int ret;
  145. tid_t commit_tid;
  146. bool needs_barrier = false;
  147. trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
  148. oi->ip_blkno,
  149. file->f_path.dentry->d_name.len,
  150. file->f_path.dentry->d_name.name,
  151. (unsigned long long)datasync);
  152. if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
  153. return -EROFS;
  154. err = file_write_and_wait_range(file, start, end);
  155. if (err)
  156. return err;
  157. commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
  158. if (journal->j_flags & JBD2_BARRIER &&
  159. !jbd2_trans_will_send_data_barrier(journal, commit_tid))
  160. needs_barrier = true;
  161. err = jbd2_complete_transaction(journal, commit_tid);
  162. if (needs_barrier) {
  163. ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
  164. if (!err)
  165. err = ret;
  166. }
  167. if (err)
  168. mlog_errno(err);
  169. return (err < 0) ? -EIO : 0;
  170. }
  171. int ocfs2_should_update_atime(struct inode *inode,
  172. struct vfsmount *vfsmnt)
  173. {
  174. struct timespec64 now;
  175. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  176. if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
  177. return 0;
  178. if ((inode->i_flags & S_NOATIME) ||
  179. ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
  180. return 0;
  181. /*
  182. * We can be called with no vfsmnt structure - NFSD will
  183. * sometimes do this.
  184. *
  185. * Note that our action here is different than touch_atime() -
  186. * if we can't tell whether this is a noatime mount, then we
  187. * don't know whether to trust the value of s_atime_quantum.
  188. */
  189. if (vfsmnt == NULL)
  190. return 0;
  191. if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
  192. ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
  193. return 0;
  194. if (vfsmnt->mnt_flags & MNT_RELATIME) {
  195. if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
  196. (timespec64_compare(&inode->i_atime, &inode->i_ctime) <= 0))
  197. return 1;
  198. return 0;
  199. }
  200. now = current_time(inode);
  201. if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
  202. return 0;
  203. else
  204. return 1;
  205. }
  206. int ocfs2_update_inode_atime(struct inode *inode,
  207. struct buffer_head *bh)
  208. {
  209. int ret;
  210. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  211. handle_t *handle;
  212. struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
  213. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  214. if (IS_ERR(handle)) {
  215. ret = PTR_ERR(handle);
  216. mlog_errno(ret);
  217. goto out;
  218. }
  219. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
  220. OCFS2_JOURNAL_ACCESS_WRITE);
  221. if (ret) {
  222. mlog_errno(ret);
  223. goto out_commit;
  224. }
  225. /*
  226. * Don't use ocfs2_mark_inode_dirty() here as we don't always
  227. * have i_mutex to guard against concurrent changes to other
  228. * inode fields.
  229. */
  230. inode->i_atime = current_time(inode);
  231. di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
  232. di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
  233. ocfs2_update_inode_fsync_trans(handle, inode, 0);
  234. ocfs2_journal_dirty(handle, bh);
  235. out_commit:
  236. ocfs2_commit_trans(osb, handle);
  237. out:
  238. return ret;
  239. }
  240. int ocfs2_set_inode_size(handle_t *handle,
  241. struct inode *inode,
  242. struct buffer_head *fe_bh,
  243. u64 new_i_size)
  244. {
  245. int status;
  246. i_size_write(inode, new_i_size);
  247. inode->i_blocks = ocfs2_inode_sector_count(inode);
  248. inode->i_ctime = inode->i_mtime = current_time(inode);
  249. status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
  250. if (status < 0) {
  251. mlog_errno(status);
  252. goto bail;
  253. }
  254. bail:
  255. return status;
  256. }
  257. int ocfs2_simple_size_update(struct inode *inode,
  258. struct buffer_head *di_bh,
  259. u64 new_i_size)
  260. {
  261. int ret;
  262. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  263. handle_t *handle = NULL;
  264. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  265. if (IS_ERR(handle)) {
  266. ret = PTR_ERR(handle);
  267. mlog_errno(ret);
  268. goto out;
  269. }
  270. ret = ocfs2_set_inode_size(handle, inode, di_bh,
  271. new_i_size);
  272. if (ret < 0)
  273. mlog_errno(ret);
  274. ocfs2_update_inode_fsync_trans(handle, inode, 0);
  275. ocfs2_commit_trans(osb, handle);
  276. out:
  277. return ret;
  278. }
  279. static int ocfs2_cow_file_pos(struct inode *inode,
  280. struct buffer_head *fe_bh,
  281. u64 offset)
  282. {
  283. int status;
  284. u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
  285. unsigned int num_clusters = 0;
  286. unsigned int ext_flags = 0;
  287. /*
  288. * If the new offset is aligned to the range of the cluster, there is
  289. * no space for ocfs2_zero_range_for_truncate to fill, so no need to
  290. * CoW either.
  291. */
  292. if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
  293. return 0;
  294. status = ocfs2_get_clusters(inode, cpos, &phys,
  295. &num_clusters, &ext_flags);
  296. if (status) {
  297. mlog_errno(status);
  298. goto out;
  299. }
  300. if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
  301. goto out;
  302. return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
  303. out:
  304. return status;
  305. }
  306. static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
  307. struct inode *inode,
  308. struct buffer_head *fe_bh,
  309. u64 new_i_size)
  310. {
  311. int status;
  312. handle_t *handle;
  313. struct ocfs2_dinode *di;
  314. u64 cluster_bytes;
  315. /*
  316. * We need to CoW the cluster contains the offset if it is reflinked
  317. * since we will call ocfs2_zero_range_for_truncate later which will
  318. * write "0" from offset to the end of the cluster.
  319. */
  320. status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
  321. if (status) {
  322. mlog_errno(status);
  323. return status;
  324. }
  325. /* TODO: This needs to actually orphan the inode in this
  326. * transaction. */
  327. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  328. if (IS_ERR(handle)) {
  329. status = PTR_ERR(handle);
  330. mlog_errno(status);
  331. goto out;
  332. }
  333. status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
  334. OCFS2_JOURNAL_ACCESS_WRITE);
  335. if (status < 0) {
  336. mlog_errno(status);
  337. goto out_commit;
  338. }
  339. /*
  340. * Do this before setting i_size.
  341. */
  342. cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
  343. status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
  344. cluster_bytes);
  345. if (status) {
  346. mlog_errno(status);
  347. goto out_commit;
  348. }
  349. i_size_write(inode, new_i_size);
  350. inode->i_ctime = inode->i_mtime = current_time(inode);
  351. di = (struct ocfs2_dinode *) fe_bh->b_data;
  352. di->i_size = cpu_to_le64(new_i_size);
  353. di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
  354. di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  355. ocfs2_update_inode_fsync_trans(handle, inode, 0);
  356. ocfs2_journal_dirty(handle, fe_bh);
  357. out_commit:
  358. ocfs2_commit_trans(osb, handle);
  359. out:
  360. return status;
  361. }
  362. int ocfs2_truncate_file(struct inode *inode,
  363. struct buffer_head *di_bh,
  364. u64 new_i_size)
  365. {
  366. int status = 0;
  367. struct ocfs2_dinode *fe = NULL;
  368. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  369. /* We trust di_bh because it comes from ocfs2_inode_lock(), which
  370. * already validated it */
  371. fe = (struct ocfs2_dinode *) di_bh->b_data;
  372. trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
  373. (unsigned long long)le64_to_cpu(fe->i_size),
  374. (unsigned long long)new_i_size);
  375. mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
  376. "Inode %llu, inode i_size = %lld != di "
  377. "i_size = %llu, i_flags = 0x%x\n",
  378. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  379. i_size_read(inode),
  380. (unsigned long long)le64_to_cpu(fe->i_size),
  381. le32_to_cpu(fe->i_flags));
  382. if (new_i_size > le64_to_cpu(fe->i_size)) {
  383. trace_ocfs2_truncate_file_error(
  384. (unsigned long long)le64_to_cpu(fe->i_size),
  385. (unsigned long long)new_i_size);
  386. status = -EINVAL;
  387. mlog_errno(status);
  388. goto bail;
  389. }
  390. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  391. ocfs2_resv_discard(&osb->osb_la_resmap,
  392. &OCFS2_I(inode)->ip_la_data_resv);
  393. /*
  394. * The inode lock forced other nodes to sync and drop their
  395. * pages, which (correctly) happens even if we have a truncate
  396. * without allocation change - ocfs2 cluster sizes can be much
  397. * greater than page size, so we have to truncate them
  398. * anyway.
  399. */
  400. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  401. unmap_mapping_range(inode->i_mapping,
  402. new_i_size + PAGE_SIZE - 1, 0, 1);
  403. truncate_inode_pages(inode->i_mapping, new_i_size);
  404. status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
  405. i_size_read(inode), 1);
  406. if (status)
  407. mlog_errno(status);
  408. goto bail_unlock_sem;
  409. }
  410. /* alright, we're going to need to do a full blown alloc size
  411. * change. Orphan the inode so that recovery can complete the
  412. * truncate if necessary. This does the task of marking
  413. * i_size. */
  414. status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
  415. if (status < 0) {
  416. mlog_errno(status);
  417. goto bail_unlock_sem;
  418. }
  419. unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
  420. truncate_inode_pages(inode->i_mapping, new_i_size);
  421. status = ocfs2_commit_truncate(osb, inode, di_bh);
  422. if (status < 0) {
  423. mlog_errno(status);
  424. goto bail_unlock_sem;
  425. }
  426. /* TODO: orphan dir cleanup here. */
  427. bail_unlock_sem:
  428. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  429. bail:
  430. if (!status && OCFS2_I(inode)->ip_clusters == 0)
  431. status = ocfs2_try_remove_refcount_tree(inode, di_bh);
  432. return status;
  433. }
  434. /*
  435. * extend file allocation only here.
  436. * we'll update all the disk stuff, and oip->alloc_size
  437. *
  438. * expect stuff to be locked, a transaction started and enough data /
  439. * metadata reservations in the contexts.
  440. *
  441. * Will return -EAGAIN, and a reason if a restart is needed.
  442. * If passed in, *reason will always be set, even in error.
  443. */
  444. int ocfs2_add_inode_data(struct ocfs2_super *osb,
  445. struct inode *inode,
  446. u32 *logical_offset,
  447. u32 clusters_to_add,
  448. int mark_unwritten,
  449. struct buffer_head *fe_bh,
  450. handle_t *handle,
  451. struct ocfs2_alloc_context *data_ac,
  452. struct ocfs2_alloc_context *meta_ac,
  453. enum ocfs2_alloc_restarted *reason_ret)
  454. {
  455. int ret;
  456. struct ocfs2_extent_tree et;
  457. ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
  458. ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
  459. clusters_to_add, mark_unwritten,
  460. data_ac, meta_ac, reason_ret);
  461. return ret;
  462. }
  463. static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
  464. u32 clusters_to_add, int mark_unwritten)
  465. {
  466. int status = 0;
  467. int restart_func = 0;
  468. int credits;
  469. u32 prev_clusters;
  470. struct buffer_head *bh = NULL;
  471. struct ocfs2_dinode *fe = NULL;
  472. handle_t *handle = NULL;
  473. struct ocfs2_alloc_context *data_ac = NULL;
  474. struct ocfs2_alloc_context *meta_ac = NULL;
  475. enum ocfs2_alloc_restarted why = RESTART_NONE;
  476. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  477. struct ocfs2_extent_tree et;
  478. int did_quota = 0;
  479. /*
  480. * Unwritten extent only exists for file systems which
  481. * support holes.
  482. */
  483. BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
  484. status = ocfs2_read_inode_block(inode, &bh);
  485. if (status < 0) {
  486. mlog_errno(status);
  487. goto leave;
  488. }
  489. fe = (struct ocfs2_dinode *) bh->b_data;
  490. restart_all:
  491. BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
  492. ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
  493. status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
  494. &data_ac, &meta_ac);
  495. if (status) {
  496. mlog_errno(status);
  497. goto leave;
  498. }
  499. credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
  500. handle = ocfs2_start_trans(osb, credits);
  501. if (IS_ERR(handle)) {
  502. status = PTR_ERR(handle);
  503. handle = NULL;
  504. mlog_errno(status);
  505. goto leave;
  506. }
  507. restarted_transaction:
  508. trace_ocfs2_extend_allocation(
  509. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  510. (unsigned long long)i_size_read(inode),
  511. le32_to_cpu(fe->i_clusters), clusters_to_add,
  512. why, restart_func);
  513. status = dquot_alloc_space_nodirty(inode,
  514. ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
  515. if (status)
  516. goto leave;
  517. did_quota = 1;
  518. /* reserve a write to the file entry early on - that we if we
  519. * run out of credits in the allocation path, we can still
  520. * update i_size. */
  521. status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
  522. OCFS2_JOURNAL_ACCESS_WRITE);
  523. if (status < 0) {
  524. mlog_errno(status);
  525. goto leave;
  526. }
  527. prev_clusters = OCFS2_I(inode)->ip_clusters;
  528. status = ocfs2_add_inode_data(osb,
  529. inode,
  530. &logical_start,
  531. clusters_to_add,
  532. mark_unwritten,
  533. bh,
  534. handle,
  535. data_ac,
  536. meta_ac,
  537. &why);
  538. if ((status < 0) && (status != -EAGAIN)) {
  539. if (status != -ENOSPC)
  540. mlog_errno(status);
  541. goto leave;
  542. }
  543. ocfs2_update_inode_fsync_trans(handle, inode, 1);
  544. ocfs2_journal_dirty(handle, bh);
  545. spin_lock(&OCFS2_I(inode)->ip_lock);
  546. clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
  547. spin_unlock(&OCFS2_I(inode)->ip_lock);
  548. /* Release unused quota reservation */
  549. dquot_free_space(inode,
  550. ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
  551. did_quota = 0;
  552. if (why != RESTART_NONE && clusters_to_add) {
  553. if (why == RESTART_META) {
  554. restart_func = 1;
  555. status = 0;
  556. } else {
  557. BUG_ON(why != RESTART_TRANS);
  558. status = ocfs2_allocate_extend_trans(handle, 1);
  559. if (status < 0) {
  560. /* handle still has to be committed at
  561. * this point. */
  562. status = -ENOMEM;
  563. mlog_errno(status);
  564. goto leave;
  565. }
  566. goto restarted_transaction;
  567. }
  568. }
  569. trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
  570. le32_to_cpu(fe->i_clusters),
  571. (unsigned long long)le64_to_cpu(fe->i_size),
  572. OCFS2_I(inode)->ip_clusters,
  573. (unsigned long long)i_size_read(inode));
  574. leave:
  575. if (status < 0 && did_quota)
  576. dquot_free_space(inode,
  577. ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
  578. if (handle) {
  579. ocfs2_commit_trans(osb, handle);
  580. handle = NULL;
  581. }
  582. if (data_ac) {
  583. ocfs2_free_alloc_context(data_ac);
  584. data_ac = NULL;
  585. }
  586. if (meta_ac) {
  587. ocfs2_free_alloc_context(meta_ac);
  588. meta_ac = NULL;
  589. }
  590. if ((!status) && restart_func) {
  591. restart_func = 0;
  592. goto restart_all;
  593. }
  594. brelse(bh);
  595. bh = NULL;
  596. return status;
  597. }
  598. /*
  599. * While a write will already be ordering the data, a truncate will not.
  600. * Thus, we need to explicitly order the zeroed pages.
  601. */
  602. static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
  603. struct buffer_head *di_bh,
  604. loff_t start_byte,
  605. loff_t length)
  606. {
  607. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  608. handle_t *handle = NULL;
  609. int ret = 0;
  610. if (!ocfs2_should_order_data(inode))
  611. goto out;
  612. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  613. if (IS_ERR(handle)) {
  614. ret = -ENOMEM;
  615. mlog_errno(ret);
  616. goto out;
  617. }
  618. ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
  619. if (ret < 0) {
  620. mlog_errno(ret);
  621. goto out;
  622. }
  623. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
  624. OCFS2_JOURNAL_ACCESS_WRITE);
  625. if (ret)
  626. mlog_errno(ret);
  627. ocfs2_update_inode_fsync_trans(handle, inode, 1);
  628. out:
  629. if (ret) {
  630. if (!IS_ERR(handle))
  631. ocfs2_commit_trans(osb, handle);
  632. handle = ERR_PTR(ret);
  633. }
  634. return handle;
  635. }
  636. /* Some parts of this taken from generic_cont_expand, which turned out
  637. * to be too fragile to do exactly what we need without us having to
  638. * worry about recursive locking in ->write_begin() and ->write_end(). */
  639. static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
  640. u64 abs_to, struct buffer_head *di_bh)
  641. {
  642. struct address_space *mapping = inode->i_mapping;
  643. struct page *page;
  644. unsigned long index = abs_from >> PAGE_SHIFT;
  645. handle_t *handle;
  646. int ret = 0;
  647. unsigned zero_from, zero_to, block_start, block_end;
  648. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  649. BUG_ON(abs_from >= abs_to);
  650. BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
  651. BUG_ON(abs_from & (inode->i_blkbits - 1));
  652. handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
  653. abs_from,
  654. abs_to - abs_from);
  655. if (IS_ERR(handle)) {
  656. ret = PTR_ERR(handle);
  657. goto out;
  658. }
  659. page = find_or_create_page(mapping, index, GFP_NOFS);
  660. if (!page) {
  661. ret = -ENOMEM;
  662. mlog_errno(ret);
  663. goto out_commit_trans;
  664. }
  665. /* Get the offsets within the page that we want to zero */
  666. zero_from = abs_from & (PAGE_SIZE - 1);
  667. zero_to = abs_to & (PAGE_SIZE - 1);
  668. if (!zero_to)
  669. zero_to = PAGE_SIZE;
  670. trace_ocfs2_write_zero_page(
  671. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  672. (unsigned long long)abs_from,
  673. (unsigned long long)abs_to,
  674. index, zero_from, zero_to);
  675. /* We know that zero_from is block aligned */
  676. for (block_start = zero_from; block_start < zero_to;
  677. block_start = block_end) {
  678. block_end = block_start + i_blocksize(inode);
  679. /*
  680. * block_start is block-aligned. Bump it by one to force
  681. * __block_write_begin and block_commit_write to zero the
  682. * whole block.
  683. */
  684. ret = __block_write_begin(page, block_start + 1, 0,
  685. ocfs2_get_block);
  686. if (ret < 0) {
  687. mlog_errno(ret);
  688. goto out_unlock;
  689. }
  690. /* must not update i_size! */
  691. ret = block_commit_write(page, block_start + 1,
  692. block_start + 1);
  693. if (ret < 0)
  694. mlog_errno(ret);
  695. else
  696. ret = 0;
  697. }
  698. /*
  699. * fs-writeback will release the dirty pages without page lock
  700. * whose offset are over inode size, the release happens at
  701. * block_write_full_page().
  702. */
  703. i_size_write(inode, abs_to);
  704. inode->i_blocks = ocfs2_inode_sector_count(inode);
  705. di->i_size = cpu_to_le64((u64)i_size_read(inode));
  706. inode->i_mtime = inode->i_ctime = current_time(inode);
  707. di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
  708. di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
  709. di->i_mtime_nsec = di->i_ctime_nsec;
  710. if (handle) {
  711. ocfs2_journal_dirty(handle, di_bh);
  712. ocfs2_update_inode_fsync_trans(handle, inode, 1);
  713. }
  714. out_unlock:
  715. unlock_page(page);
  716. put_page(page);
  717. out_commit_trans:
  718. if (handle)
  719. ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
  720. out:
  721. return ret;
  722. }
  723. /*
  724. * Find the next range to zero. We do this in terms of bytes because
  725. * that's what ocfs2_zero_extend() wants, and it is dealing with the
  726. * pagecache. We may return multiple extents.
  727. *
  728. * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
  729. * needs to be zeroed. range_start and range_end return the next zeroing
  730. * range. A subsequent call should pass the previous range_end as its
  731. * zero_start. If range_end is 0, there's nothing to do.
  732. *
  733. * Unwritten extents are skipped over. Refcounted extents are CoWd.
  734. */
  735. static int ocfs2_zero_extend_get_range(struct inode *inode,
  736. struct buffer_head *di_bh,
  737. u64 zero_start, u64 zero_end,
  738. u64 *range_start, u64 *range_end)
  739. {
  740. int rc = 0, needs_cow = 0;
  741. u32 p_cpos, zero_clusters = 0;
  742. u32 zero_cpos =
  743. zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
  744. u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
  745. unsigned int num_clusters = 0;
  746. unsigned int ext_flags = 0;
  747. while (zero_cpos < last_cpos) {
  748. rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
  749. &num_clusters, &ext_flags);
  750. if (rc) {
  751. mlog_errno(rc);
  752. goto out;
  753. }
  754. if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
  755. zero_clusters = num_clusters;
  756. if (ext_flags & OCFS2_EXT_REFCOUNTED)
  757. needs_cow = 1;
  758. break;
  759. }
  760. zero_cpos += num_clusters;
  761. }
  762. if (!zero_clusters) {
  763. *range_end = 0;
  764. goto out;
  765. }
  766. while ((zero_cpos + zero_clusters) < last_cpos) {
  767. rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
  768. &p_cpos, &num_clusters,
  769. &ext_flags);
  770. if (rc) {
  771. mlog_errno(rc);
  772. goto out;
  773. }
  774. if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
  775. break;
  776. if (ext_flags & OCFS2_EXT_REFCOUNTED)
  777. needs_cow = 1;
  778. zero_clusters += num_clusters;
  779. }
  780. if ((zero_cpos + zero_clusters) > last_cpos)
  781. zero_clusters = last_cpos - zero_cpos;
  782. if (needs_cow) {
  783. rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
  784. zero_clusters, UINT_MAX);
  785. if (rc) {
  786. mlog_errno(rc);
  787. goto out;
  788. }
  789. }
  790. *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
  791. *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
  792. zero_cpos + zero_clusters);
  793. out:
  794. return rc;
  795. }
  796. /*
  797. * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
  798. * has made sure that the entire range needs zeroing.
  799. */
  800. static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
  801. u64 range_end, struct buffer_head *di_bh)
  802. {
  803. int rc = 0;
  804. u64 next_pos;
  805. u64 zero_pos = range_start;
  806. trace_ocfs2_zero_extend_range(
  807. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  808. (unsigned long long)range_start,
  809. (unsigned long long)range_end);
  810. BUG_ON(range_start >= range_end);
  811. while (zero_pos < range_end) {
  812. next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
  813. if (next_pos > range_end)
  814. next_pos = range_end;
  815. rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
  816. if (rc < 0) {
  817. mlog_errno(rc);
  818. break;
  819. }
  820. zero_pos = next_pos;
  821. /*
  822. * Very large extends have the potential to lock up
  823. * the cpu for extended periods of time.
  824. */
  825. cond_resched();
  826. }
  827. return rc;
  828. }
  829. int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
  830. loff_t zero_to_size)
  831. {
  832. int ret = 0;
  833. u64 zero_start, range_start = 0, range_end = 0;
  834. struct super_block *sb = inode->i_sb;
  835. zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
  836. trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
  837. (unsigned long long)zero_start,
  838. (unsigned long long)i_size_read(inode));
  839. while (zero_start < zero_to_size) {
  840. ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
  841. zero_to_size,
  842. &range_start,
  843. &range_end);
  844. if (ret) {
  845. mlog_errno(ret);
  846. break;
  847. }
  848. if (!range_end)
  849. break;
  850. /* Trim the ends */
  851. if (range_start < zero_start)
  852. range_start = zero_start;
  853. if (range_end > zero_to_size)
  854. range_end = zero_to_size;
  855. ret = ocfs2_zero_extend_range(inode, range_start,
  856. range_end, di_bh);
  857. if (ret) {
  858. mlog_errno(ret);
  859. break;
  860. }
  861. zero_start = range_end;
  862. }
  863. return ret;
  864. }
  865. int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
  866. u64 new_i_size, u64 zero_to)
  867. {
  868. int ret;
  869. u32 clusters_to_add;
  870. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  871. /*
  872. * Only quota files call this without a bh, and they can't be
  873. * refcounted.
  874. */
  875. BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
  876. BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
  877. clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
  878. if (clusters_to_add < oi->ip_clusters)
  879. clusters_to_add = 0;
  880. else
  881. clusters_to_add -= oi->ip_clusters;
  882. if (clusters_to_add) {
  883. ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
  884. clusters_to_add, 0);
  885. if (ret) {
  886. mlog_errno(ret);
  887. goto out;
  888. }
  889. }
  890. /*
  891. * Call this even if we don't add any clusters to the tree. We
  892. * still need to zero the area between the old i_size and the
  893. * new i_size.
  894. */
  895. ret = ocfs2_zero_extend(inode, di_bh, zero_to);
  896. if (ret < 0)
  897. mlog_errno(ret);
  898. out:
  899. return ret;
  900. }
  901. static int ocfs2_extend_file(struct inode *inode,
  902. struct buffer_head *di_bh,
  903. u64 new_i_size)
  904. {
  905. int ret = 0;
  906. struct ocfs2_inode_info *oi = OCFS2_I(inode);
  907. BUG_ON(!di_bh);
  908. /* setattr sometimes calls us like this. */
  909. if (new_i_size == 0)
  910. goto out;
  911. if (i_size_read(inode) == new_i_size)
  912. goto out;
  913. BUG_ON(new_i_size < i_size_read(inode));
  914. /*
  915. * The alloc sem blocks people in read/write from reading our
  916. * allocation until we're done changing it. We depend on
  917. * i_mutex to block other extend/truncate calls while we're
  918. * here. We even have to hold it for sparse files because there
  919. * might be some tail zeroing.
  920. */
  921. down_write(&oi->ip_alloc_sem);
  922. if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  923. /*
  924. * We can optimize small extends by keeping the inodes
  925. * inline data.
  926. */
  927. if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
  928. up_write(&oi->ip_alloc_sem);
  929. goto out_update_size;
  930. }
  931. ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
  932. if (ret) {
  933. up_write(&oi->ip_alloc_sem);
  934. mlog_errno(ret);
  935. goto out;
  936. }
  937. }
  938. if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
  939. ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
  940. else
  941. ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
  942. new_i_size);
  943. up_write(&oi->ip_alloc_sem);
  944. if (ret < 0) {
  945. mlog_errno(ret);
  946. goto out;
  947. }
  948. out_update_size:
  949. ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
  950. if (ret < 0)
  951. mlog_errno(ret);
  952. out:
  953. return ret;
  954. }
  955. int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
  956. {
  957. int status = 0, size_change;
  958. int inode_locked = 0;
  959. struct inode *inode = d_inode(dentry);
  960. struct super_block *sb = inode->i_sb;
  961. struct ocfs2_super *osb = OCFS2_SB(sb);
  962. struct buffer_head *bh = NULL;
  963. handle_t *handle = NULL;
  964. struct dquot *transfer_to[MAXQUOTAS] = { };
  965. int qtype;
  966. int had_lock;
  967. struct ocfs2_lock_holder oh;
  968. trace_ocfs2_setattr(inode, dentry,
  969. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  970. dentry->d_name.len, dentry->d_name.name,
  971. attr->ia_valid, attr->ia_mode,
  972. from_kuid(&init_user_ns, attr->ia_uid),
  973. from_kgid(&init_user_ns, attr->ia_gid));
  974. /* ensuring we don't even attempt to truncate a symlink */
  975. if (S_ISLNK(inode->i_mode))
  976. attr->ia_valid &= ~ATTR_SIZE;
  977. #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
  978. | ATTR_GID | ATTR_UID | ATTR_MODE)
  979. if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
  980. return 0;
  981. status = setattr_prepare(dentry, attr);
  982. if (status)
  983. return status;
  984. if (is_quota_modification(inode, attr)) {
  985. status = dquot_initialize(inode);
  986. if (status)
  987. return status;
  988. }
  989. size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
  990. if (size_change) {
  991. /*
  992. * Here we should wait dio to finish before inode lock
  993. * to avoid a deadlock between ocfs2_setattr() and
  994. * ocfs2_dio_end_io_write()
  995. */
  996. inode_dio_wait(inode);
  997. status = ocfs2_rw_lock(inode, 1);
  998. if (status < 0) {
  999. mlog_errno(status);
  1000. goto bail;
  1001. }
  1002. }
  1003. had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
  1004. if (had_lock < 0) {
  1005. status = had_lock;
  1006. goto bail_unlock_rw;
  1007. } else if (had_lock) {
  1008. /*
  1009. * As far as we know, ocfs2_setattr() could only be the first
  1010. * VFS entry point in the call chain of recursive cluster
  1011. * locking issue.
  1012. *
  1013. * For instance:
  1014. * chmod_common()
  1015. * notify_change()
  1016. * ocfs2_setattr()
  1017. * posix_acl_chmod()
  1018. * ocfs2_iop_get_acl()
  1019. *
  1020. * But, we're not 100% sure if it's always true, because the
  1021. * ordering of the VFS entry points in the call chain is out
  1022. * of our control. So, we'd better dump the stack here to
  1023. * catch the other cases of recursive locking.
  1024. */
  1025. mlog(ML_ERROR, "Another case of recursive locking:\n");
  1026. dump_stack();
  1027. }
  1028. inode_locked = 1;
  1029. if (size_change) {
  1030. status = inode_newsize_ok(inode, attr->ia_size);
  1031. if (status)
  1032. goto bail_unlock;
  1033. if (i_size_read(inode) >= attr->ia_size) {
  1034. if (ocfs2_should_order_data(inode)) {
  1035. status = ocfs2_begin_ordered_truncate(inode,
  1036. attr->ia_size);
  1037. if (status)
  1038. goto bail_unlock;
  1039. }
  1040. status = ocfs2_truncate_file(inode, bh, attr->ia_size);
  1041. } else
  1042. status = ocfs2_extend_file(inode, bh, attr->ia_size);
  1043. if (status < 0) {
  1044. if (status != -ENOSPC)
  1045. mlog_errno(status);
  1046. status = -ENOSPC;
  1047. goto bail_unlock;
  1048. }
  1049. }
  1050. if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
  1051. (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
  1052. /*
  1053. * Gather pointers to quota structures so that allocation /
  1054. * freeing of quota structures happens here and not inside
  1055. * dquot_transfer() where we have problems with lock ordering
  1056. */
  1057. if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
  1058. && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
  1059. OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
  1060. transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
  1061. if (IS_ERR(transfer_to[USRQUOTA])) {
  1062. status = PTR_ERR(transfer_to[USRQUOTA]);
  1063. transfer_to[USRQUOTA] = NULL;
  1064. goto bail_unlock;
  1065. }
  1066. }
  1067. if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
  1068. && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
  1069. OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
  1070. transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
  1071. if (IS_ERR(transfer_to[GRPQUOTA])) {
  1072. status = PTR_ERR(transfer_to[GRPQUOTA]);
  1073. transfer_to[GRPQUOTA] = NULL;
  1074. goto bail_unlock;
  1075. }
  1076. }
  1077. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  1078. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
  1079. 2 * ocfs2_quota_trans_credits(sb));
  1080. if (IS_ERR(handle)) {
  1081. status = PTR_ERR(handle);
  1082. mlog_errno(status);
  1083. goto bail_unlock_alloc;
  1084. }
  1085. status = __dquot_transfer(inode, transfer_to);
  1086. if (status < 0)
  1087. goto bail_commit;
  1088. } else {
  1089. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  1090. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  1091. if (IS_ERR(handle)) {
  1092. status = PTR_ERR(handle);
  1093. mlog_errno(status);
  1094. goto bail_unlock_alloc;
  1095. }
  1096. }
  1097. setattr_copy(inode, attr);
  1098. mark_inode_dirty(inode);
  1099. status = ocfs2_mark_inode_dirty(handle, inode, bh);
  1100. if (status < 0)
  1101. mlog_errno(status);
  1102. bail_commit:
  1103. ocfs2_commit_trans(osb, handle);
  1104. bail_unlock_alloc:
  1105. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  1106. bail_unlock:
  1107. if (status && inode_locked) {
  1108. ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
  1109. inode_locked = 0;
  1110. }
  1111. bail_unlock_rw:
  1112. if (size_change)
  1113. ocfs2_rw_unlock(inode, 1);
  1114. bail:
  1115. /* Release quota pointers in case we acquired them */
  1116. for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
  1117. dqput(transfer_to[qtype]);
  1118. if (!status && attr->ia_valid & ATTR_MODE) {
  1119. status = ocfs2_acl_chmod(inode, bh);
  1120. if (status < 0)
  1121. mlog_errno(status);
  1122. }
  1123. if (inode_locked)
  1124. ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
  1125. brelse(bh);
  1126. return status;
  1127. }
  1128. int ocfs2_getattr(const struct path *path, struct kstat *stat,
  1129. u32 request_mask, unsigned int flags)
  1130. {
  1131. struct inode *inode = d_inode(path->dentry);
  1132. struct super_block *sb = path->dentry->d_sb;
  1133. struct ocfs2_super *osb = sb->s_fs_info;
  1134. int err;
  1135. err = ocfs2_inode_revalidate(path->dentry);
  1136. if (err) {
  1137. if (err != -ENOENT)
  1138. mlog_errno(err);
  1139. goto bail;
  1140. }
  1141. generic_fillattr(inode, stat);
  1142. /*
  1143. * If there is inline data in the inode, the inode will normally not
  1144. * have data blocks allocated (it may have an external xattr block).
  1145. * Report at least one sector for such files, so tools like tar, rsync,
  1146. * others don't incorrectly think the file is completely sparse.
  1147. */
  1148. if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
  1149. stat->blocks += (stat->size + 511)>>9;
  1150. /* We set the blksize from the cluster size for performance */
  1151. stat->blksize = osb->s_clustersize;
  1152. bail:
  1153. return err;
  1154. }
  1155. int ocfs2_permission(struct inode *inode, int mask)
  1156. {
  1157. int ret, had_lock;
  1158. struct ocfs2_lock_holder oh;
  1159. if (mask & MAY_NOT_BLOCK)
  1160. return -ECHILD;
  1161. had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
  1162. if (had_lock < 0) {
  1163. ret = had_lock;
  1164. goto out;
  1165. } else if (had_lock) {
  1166. /* See comments in ocfs2_setattr() for details.
  1167. * The call chain of this case could be:
  1168. * do_sys_open()
  1169. * may_open()
  1170. * inode_permission()
  1171. * ocfs2_permission()
  1172. * ocfs2_iop_get_acl()
  1173. */
  1174. mlog(ML_ERROR, "Another case of recursive locking:\n");
  1175. dump_stack();
  1176. }
  1177. ret = generic_permission(inode, mask);
  1178. ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
  1179. out:
  1180. return ret;
  1181. }
  1182. static int __ocfs2_write_remove_suid(struct inode *inode,
  1183. struct buffer_head *bh)
  1184. {
  1185. int ret;
  1186. handle_t *handle;
  1187. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1188. struct ocfs2_dinode *di;
  1189. trace_ocfs2_write_remove_suid(
  1190. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  1191. inode->i_mode);
  1192. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  1193. if (IS_ERR(handle)) {
  1194. ret = PTR_ERR(handle);
  1195. mlog_errno(ret);
  1196. goto out;
  1197. }
  1198. ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
  1199. OCFS2_JOURNAL_ACCESS_WRITE);
  1200. if (ret < 0) {
  1201. mlog_errno(ret);
  1202. goto out_trans;
  1203. }
  1204. inode->i_mode &= ~S_ISUID;
  1205. if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
  1206. inode->i_mode &= ~S_ISGID;
  1207. di = (struct ocfs2_dinode *) bh->b_data;
  1208. di->i_mode = cpu_to_le16(inode->i_mode);
  1209. ocfs2_update_inode_fsync_trans(handle, inode, 0);
  1210. ocfs2_journal_dirty(handle, bh);
  1211. out_trans:
  1212. ocfs2_commit_trans(osb, handle);
  1213. out:
  1214. return ret;
  1215. }
  1216. static int ocfs2_write_remove_suid(struct inode *inode)
  1217. {
  1218. int ret;
  1219. struct buffer_head *bh = NULL;
  1220. ret = ocfs2_read_inode_block(inode, &bh);
  1221. if (ret < 0) {
  1222. mlog_errno(ret);
  1223. goto out;
  1224. }
  1225. ret = __ocfs2_write_remove_suid(inode, bh);
  1226. out:
  1227. brelse(bh);
  1228. return ret;
  1229. }
  1230. /*
  1231. * Allocate enough extents to cover the region starting at byte offset
  1232. * start for len bytes. Existing extents are skipped, any extents
  1233. * added are marked as "unwritten".
  1234. */
  1235. static int ocfs2_allocate_unwritten_extents(struct inode *inode,
  1236. u64 start, u64 len)
  1237. {
  1238. int ret;
  1239. u32 cpos, phys_cpos, clusters, alloc_size;
  1240. u64 end = start + len;
  1241. struct buffer_head *di_bh = NULL;
  1242. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  1243. ret = ocfs2_read_inode_block(inode, &di_bh);
  1244. if (ret) {
  1245. mlog_errno(ret);
  1246. goto out;
  1247. }
  1248. /*
  1249. * Nothing to do if the requested reservation range
  1250. * fits within the inode.
  1251. */
  1252. if (ocfs2_size_fits_inline_data(di_bh, end))
  1253. goto out;
  1254. ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
  1255. if (ret) {
  1256. mlog_errno(ret);
  1257. goto out;
  1258. }
  1259. }
  1260. /*
  1261. * We consider both start and len to be inclusive.
  1262. */
  1263. cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
  1264. clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
  1265. clusters -= cpos;
  1266. while (clusters) {
  1267. ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
  1268. &alloc_size, NULL);
  1269. if (ret) {
  1270. mlog_errno(ret);
  1271. goto out;
  1272. }
  1273. /*
  1274. * Hole or existing extent len can be arbitrary, so
  1275. * cap it to our own allocation request.
  1276. */
  1277. if (alloc_size > clusters)
  1278. alloc_size = clusters;
  1279. if (phys_cpos) {
  1280. /*
  1281. * We already have an allocation at this
  1282. * region so we can safely skip it.
  1283. */
  1284. goto next;
  1285. }
  1286. ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
  1287. if (ret) {
  1288. if (ret != -ENOSPC)
  1289. mlog_errno(ret);
  1290. goto out;
  1291. }
  1292. next:
  1293. cpos += alloc_size;
  1294. clusters -= alloc_size;
  1295. }
  1296. ret = 0;
  1297. out:
  1298. brelse(di_bh);
  1299. return ret;
  1300. }
  1301. /*
  1302. * Truncate a byte range, avoiding pages within partial clusters. This
  1303. * preserves those pages for the zeroing code to write to.
  1304. */
  1305. static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
  1306. u64 byte_len)
  1307. {
  1308. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1309. loff_t start, end;
  1310. struct address_space *mapping = inode->i_mapping;
  1311. start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
  1312. end = byte_start + byte_len;
  1313. end = end & ~(osb->s_clustersize - 1);
  1314. if (start < end) {
  1315. unmap_mapping_range(mapping, start, end - start, 0);
  1316. truncate_inode_pages_range(mapping, start, end - 1);
  1317. }
  1318. }
  1319. /*
  1320. * zero out partial blocks of one cluster.
  1321. *
  1322. * start: file offset where zero starts, will be made upper block aligned.
  1323. * len: it will be trimmed to the end of current cluster if "start + len"
  1324. * is bigger than it.
  1325. */
  1326. static int ocfs2_zeroout_partial_cluster(struct inode *inode,
  1327. u64 start, u64 len)
  1328. {
  1329. int ret;
  1330. u64 start_block, end_block, nr_blocks;
  1331. u64 p_block, offset;
  1332. u32 cluster, p_cluster, nr_clusters;
  1333. struct super_block *sb = inode->i_sb;
  1334. u64 end = ocfs2_align_bytes_to_clusters(sb, start);
  1335. if (start + len < end)
  1336. end = start + len;
  1337. start_block = ocfs2_blocks_for_bytes(sb, start);
  1338. end_block = ocfs2_blocks_for_bytes(sb, end);
  1339. nr_blocks = end_block - start_block;
  1340. if (!nr_blocks)
  1341. return 0;
  1342. cluster = ocfs2_bytes_to_clusters(sb, start);
  1343. ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
  1344. &nr_clusters, NULL);
  1345. if (ret)
  1346. return ret;
  1347. if (!p_cluster)
  1348. return 0;
  1349. offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
  1350. p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
  1351. return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
  1352. }
  1353. static int ocfs2_zero_partial_clusters(struct inode *inode,
  1354. u64 start, u64 len)
  1355. {
  1356. int ret = 0;
  1357. u64 tmpend = 0;
  1358. u64 end = start + len;
  1359. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1360. unsigned int csize = osb->s_clustersize;
  1361. handle_t *handle;
  1362. loff_t isize = i_size_read(inode);
  1363. /*
  1364. * The "start" and "end" values are NOT necessarily part of
  1365. * the range whose allocation is being deleted. Rather, this
  1366. * is what the user passed in with the request. We must zero
  1367. * partial clusters here. There's no need to worry about
  1368. * physical allocation - the zeroing code knows to skip holes.
  1369. */
  1370. trace_ocfs2_zero_partial_clusters(
  1371. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  1372. (unsigned long long)start, (unsigned long long)end);
  1373. /*
  1374. * If both edges are on a cluster boundary then there's no
  1375. * zeroing required as the region is part of the allocation to
  1376. * be truncated.
  1377. */
  1378. if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
  1379. goto out;
  1380. /* No page cache for EOF blocks, issue zero out to disk. */
  1381. if (end > isize) {
  1382. /*
  1383. * zeroout eof blocks in last cluster starting from
  1384. * "isize" even "start" > "isize" because it is
  1385. * complicated to zeroout just at "start" as "start"
  1386. * may be not aligned with block size, buffer write
  1387. * would be required to do that, but out of eof buffer
  1388. * write is not supported.
  1389. */
  1390. ret = ocfs2_zeroout_partial_cluster(inode, isize,
  1391. end - isize);
  1392. if (ret) {
  1393. mlog_errno(ret);
  1394. goto out;
  1395. }
  1396. if (start >= isize)
  1397. goto out;
  1398. end = isize;
  1399. }
  1400. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  1401. if (IS_ERR(handle)) {
  1402. ret = PTR_ERR(handle);
  1403. mlog_errno(ret);
  1404. goto out;
  1405. }
  1406. /*
  1407. * If start is on a cluster boundary and end is somewhere in another
  1408. * cluster, we have not COWed the cluster starting at start, unless
  1409. * end is also within the same cluster. So, in this case, we skip this
  1410. * first call to ocfs2_zero_range_for_truncate() truncate and move on
  1411. * to the next one.
  1412. */
  1413. if ((start & (csize - 1)) != 0) {
  1414. /*
  1415. * We want to get the byte offset of the end of the 1st
  1416. * cluster.
  1417. */
  1418. tmpend = (u64)osb->s_clustersize +
  1419. (start & ~(osb->s_clustersize - 1));
  1420. if (tmpend > end)
  1421. tmpend = end;
  1422. trace_ocfs2_zero_partial_clusters_range1(
  1423. (unsigned long long)start,
  1424. (unsigned long long)tmpend);
  1425. ret = ocfs2_zero_range_for_truncate(inode, handle, start,
  1426. tmpend);
  1427. if (ret)
  1428. mlog_errno(ret);
  1429. }
  1430. if (tmpend < end) {
  1431. /*
  1432. * This may make start and end equal, but the zeroing
  1433. * code will skip any work in that case so there's no
  1434. * need to catch it up here.
  1435. */
  1436. start = end & ~(osb->s_clustersize - 1);
  1437. trace_ocfs2_zero_partial_clusters_range2(
  1438. (unsigned long long)start, (unsigned long long)end);
  1439. ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
  1440. if (ret)
  1441. mlog_errno(ret);
  1442. }
  1443. ocfs2_update_inode_fsync_trans(handle, inode, 1);
  1444. ocfs2_commit_trans(osb, handle);
  1445. out:
  1446. return ret;
  1447. }
  1448. static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
  1449. {
  1450. int i;
  1451. struct ocfs2_extent_rec *rec = NULL;
  1452. for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
  1453. rec = &el->l_recs[i];
  1454. if (le32_to_cpu(rec->e_cpos) < pos)
  1455. break;
  1456. }
  1457. return i;
  1458. }
  1459. /*
  1460. * Helper to calculate the punching pos and length in one run, we handle the
  1461. * following three cases in order:
  1462. *
  1463. * - remove the entire record
  1464. * - remove a partial record
  1465. * - no record needs to be removed (hole-punching completed)
  1466. */
  1467. static void ocfs2_calc_trunc_pos(struct inode *inode,
  1468. struct ocfs2_extent_list *el,
  1469. struct ocfs2_extent_rec *rec,
  1470. u32 trunc_start, u32 *trunc_cpos,
  1471. u32 *trunc_len, u32 *trunc_end,
  1472. u64 *blkno, int *done)
  1473. {
  1474. int ret = 0;
  1475. u32 coff, range;
  1476. range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
  1477. if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
  1478. /*
  1479. * remove an entire extent record.
  1480. */
  1481. *trunc_cpos = le32_to_cpu(rec->e_cpos);
  1482. /*
  1483. * Skip holes if any.
  1484. */
  1485. if (range < *trunc_end)
  1486. *trunc_end = range;
  1487. *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
  1488. *blkno = le64_to_cpu(rec->e_blkno);
  1489. *trunc_end = le32_to_cpu(rec->e_cpos);
  1490. } else if (range > trunc_start) {
  1491. /*
  1492. * remove a partial extent record, which means we're
  1493. * removing the last extent record.
  1494. */
  1495. *trunc_cpos = trunc_start;
  1496. /*
  1497. * skip hole if any.
  1498. */
  1499. if (range < *trunc_end)
  1500. *trunc_end = range;
  1501. *trunc_len = *trunc_end - trunc_start;
  1502. coff = trunc_start - le32_to_cpu(rec->e_cpos);
  1503. *blkno = le64_to_cpu(rec->e_blkno) +
  1504. ocfs2_clusters_to_blocks(inode->i_sb, coff);
  1505. *trunc_end = trunc_start;
  1506. } else {
  1507. /*
  1508. * It may have two following possibilities:
  1509. *
  1510. * - last record has been removed
  1511. * - trunc_start was within a hole
  1512. *
  1513. * both two cases mean the completion of hole punching.
  1514. */
  1515. ret = 1;
  1516. }
  1517. *done = ret;
  1518. }
  1519. int ocfs2_remove_inode_range(struct inode *inode,
  1520. struct buffer_head *di_bh, u64 byte_start,
  1521. u64 byte_len)
  1522. {
  1523. int ret = 0, flags = 0, done = 0, i;
  1524. u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
  1525. u32 cluster_in_el;
  1526. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1527. struct ocfs2_cached_dealloc_ctxt dealloc;
  1528. struct address_space *mapping = inode->i_mapping;
  1529. struct ocfs2_extent_tree et;
  1530. struct ocfs2_path *path = NULL;
  1531. struct ocfs2_extent_list *el = NULL;
  1532. struct ocfs2_extent_rec *rec = NULL;
  1533. struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
  1534. u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
  1535. ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
  1536. ocfs2_init_dealloc_ctxt(&dealloc);
  1537. trace_ocfs2_remove_inode_range(
  1538. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  1539. (unsigned long long)byte_start,
  1540. (unsigned long long)byte_len);
  1541. if (byte_len == 0)
  1542. return 0;
  1543. if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
  1544. ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
  1545. byte_start + byte_len, 0);
  1546. if (ret) {
  1547. mlog_errno(ret);
  1548. goto out;
  1549. }
  1550. /*
  1551. * There's no need to get fancy with the page cache
  1552. * truncate of an inline-data inode. We're talking
  1553. * about less than a page here, which will be cached
  1554. * in the dinode buffer anyway.
  1555. */
  1556. unmap_mapping_range(mapping, 0, 0, 0);
  1557. truncate_inode_pages(mapping, 0);
  1558. goto out;
  1559. }
  1560. /*
  1561. * For reflinks, we may need to CoW 2 clusters which might be
  1562. * partially zero'd later, if hole's start and end offset were
  1563. * within one cluster(means is not exactly aligned to clustersize).
  1564. */
  1565. if (ocfs2_is_refcount_inode(inode)) {
  1566. ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
  1567. if (ret) {
  1568. mlog_errno(ret);
  1569. goto out;
  1570. }
  1571. ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
  1572. if (ret) {
  1573. mlog_errno(ret);
  1574. goto out;
  1575. }
  1576. }
  1577. trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
  1578. trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
  1579. cluster_in_el = trunc_end;
  1580. ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
  1581. if (ret) {
  1582. mlog_errno(ret);
  1583. goto out;
  1584. }
  1585. path = ocfs2_new_path_from_et(&et);
  1586. if (!path) {
  1587. ret = -ENOMEM;
  1588. mlog_errno(ret);
  1589. goto out;
  1590. }
  1591. while (trunc_end > trunc_start) {
  1592. ret = ocfs2_find_path(INODE_CACHE(inode), path,
  1593. cluster_in_el);
  1594. if (ret) {
  1595. mlog_errno(ret);
  1596. goto out;
  1597. }
  1598. el = path_leaf_el(path);
  1599. i = ocfs2_find_rec(el, trunc_end);
  1600. /*
  1601. * Need to go to previous extent block.
  1602. */
  1603. if (i < 0) {
  1604. if (path->p_tree_depth == 0)
  1605. break;
  1606. ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
  1607. path,
  1608. &cluster_in_el);
  1609. if (ret) {
  1610. mlog_errno(ret);
  1611. goto out;
  1612. }
  1613. /*
  1614. * We've reached the leftmost extent block,
  1615. * it's safe to leave.
  1616. */
  1617. if (cluster_in_el == 0)
  1618. break;
  1619. /*
  1620. * The 'pos' searched for previous extent block is
  1621. * always one cluster less than actual trunc_end.
  1622. */
  1623. trunc_end = cluster_in_el + 1;
  1624. ocfs2_reinit_path(path, 1);
  1625. continue;
  1626. } else
  1627. rec = &el->l_recs[i];
  1628. ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
  1629. &trunc_len, &trunc_end, &blkno, &done);
  1630. if (done)
  1631. break;
  1632. flags = rec->e_flags;
  1633. phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
  1634. ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
  1635. phys_cpos, trunc_len, flags,
  1636. &dealloc, refcount_loc, false);
  1637. if (ret < 0) {
  1638. mlog_errno(ret);
  1639. goto out;
  1640. }
  1641. cluster_in_el = trunc_end;
  1642. ocfs2_reinit_path(path, 1);
  1643. }
  1644. ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
  1645. out:
  1646. ocfs2_free_path(path);
  1647. ocfs2_schedule_truncate_log_flush(osb, 1);
  1648. ocfs2_run_deallocs(osb, &dealloc);
  1649. return ret;
  1650. }
  1651. /*
  1652. * Parts of this function taken from xfs_change_file_space()
  1653. */
  1654. static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
  1655. loff_t f_pos, unsigned int cmd,
  1656. struct ocfs2_space_resv *sr,
  1657. int change_size)
  1658. {
  1659. int ret;
  1660. s64 llen;
  1661. loff_t size, orig_isize;
  1662. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1663. struct buffer_head *di_bh = NULL;
  1664. handle_t *handle;
  1665. unsigned long long max_off = inode->i_sb->s_maxbytes;
  1666. if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
  1667. return -EROFS;
  1668. inode_lock(inode);
  1669. /*
  1670. * This prevents concurrent writes on other nodes
  1671. */
  1672. ret = ocfs2_rw_lock(inode, 1);
  1673. if (ret) {
  1674. mlog_errno(ret);
  1675. goto out;
  1676. }
  1677. ret = ocfs2_inode_lock(inode, &di_bh, 1);
  1678. if (ret) {
  1679. mlog_errno(ret);
  1680. goto out_rw_unlock;
  1681. }
  1682. if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
  1683. ret = -EPERM;
  1684. goto out_inode_unlock;
  1685. }
  1686. switch (sr->l_whence) {
  1687. case 0: /*SEEK_SET*/
  1688. break;
  1689. case 1: /*SEEK_CUR*/
  1690. sr->l_start += f_pos;
  1691. break;
  1692. case 2: /*SEEK_END*/
  1693. sr->l_start += i_size_read(inode);
  1694. break;
  1695. default:
  1696. ret = -EINVAL;
  1697. goto out_inode_unlock;
  1698. }
  1699. sr->l_whence = 0;
  1700. llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
  1701. if (sr->l_start < 0
  1702. || sr->l_start > max_off
  1703. || (sr->l_start + llen) < 0
  1704. || (sr->l_start + llen) > max_off) {
  1705. ret = -EINVAL;
  1706. goto out_inode_unlock;
  1707. }
  1708. size = sr->l_start + sr->l_len;
  1709. if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
  1710. cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
  1711. if (sr->l_len <= 0) {
  1712. ret = -EINVAL;
  1713. goto out_inode_unlock;
  1714. }
  1715. }
  1716. if (file && should_remove_suid(file->f_path.dentry)) {
  1717. ret = __ocfs2_write_remove_suid(inode, di_bh);
  1718. if (ret) {
  1719. mlog_errno(ret);
  1720. goto out_inode_unlock;
  1721. }
  1722. }
  1723. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  1724. switch (cmd) {
  1725. case OCFS2_IOC_RESVSP:
  1726. case OCFS2_IOC_RESVSP64:
  1727. /*
  1728. * This takes unsigned offsets, but the signed ones we
  1729. * pass have been checked against overflow above.
  1730. */
  1731. ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
  1732. sr->l_len);
  1733. break;
  1734. case OCFS2_IOC_UNRESVSP:
  1735. case OCFS2_IOC_UNRESVSP64:
  1736. ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
  1737. sr->l_len);
  1738. break;
  1739. default:
  1740. ret = -EINVAL;
  1741. }
  1742. orig_isize = i_size_read(inode);
  1743. /* zeroout eof blocks in the cluster. */
  1744. if (!ret && change_size && orig_isize < size) {
  1745. ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
  1746. size - orig_isize);
  1747. if (!ret)
  1748. i_size_write(inode, size);
  1749. }
  1750. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  1751. if (ret) {
  1752. mlog_errno(ret);
  1753. goto out_inode_unlock;
  1754. }
  1755. /*
  1756. * We update c/mtime for these changes
  1757. */
  1758. handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
  1759. if (IS_ERR(handle)) {
  1760. ret = PTR_ERR(handle);
  1761. mlog_errno(ret);
  1762. goto out_inode_unlock;
  1763. }
  1764. inode->i_ctime = inode->i_mtime = current_time(inode);
  1765. ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
  1766. if (ret < 0)
  1767. mlog_errno(ret);
  1768. if (file && (file->f_flags & O_SYNC))
  1769. handle->h_sync = 1;
  1770. ocfs2_commit_trans(osb, handle);
  1771. out_inode_unlock:
  1772. brelse(di_bh);
  1773. ocfs2_inode_unlock(inode, 1);
  1774. out_rw_unlock:
  1775. ocfs2_rw_unlock(inode, 1);
  1776. out:
  1777. inode_unlock(inode);
  1778. return ret;
  1779. }
  1780. int ocfs2_change_file_space(struct file *file, unsigned int cmd,
  1781. struct ocfs2_space_resv *sr)
  1782. {
  1783. struct inode *inode = file_inode(file);
  1784. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1785. int ret;
  1786. if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
  1787. !ocfs2_writes_unwritten_extents(osb))
  1788. return -ENOTTY;
  1789. else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
  1790. !ocfs2_sparse_alloc(osb))
  1791. return -ENOTTY;
  1792. if (!S_ISREG(inode->i_mode))
  1793. return -EINVAL;
  1794. if (!(file->f_mode & FMODE_WRITE))
  1795. return -EBADF;
  1796. ret = mnt_want_write_file(file);
  1797. if (ret)
  1798. return ret;
  1799. ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
  1800. mnt_drop_write_file(file);
  1801. return ret;
  1802. }
  1803. static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
  1804. loff_t len)
  1805. {
  1806. struct inode *inode = file_inode(file);
  1807. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  1808. struct ocfs2_space_resv sr;
  1809. int change_size = 1;
  1810. int cmd = OCFS2_IOC_RESVSP64;
  1811. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  1812. return -EOPNOTSUPP;
  1813. if (!ocfs2_writes_unwritten_extents(osb))
  1814. return -EOPNOTSUPP;
  1815. if (mode & FALLOC_FL_KEEP_SIZE)
  1816. change_size = 0;
  1817. if (mode & FALLOC_FL_PUNCH_HOLE)
  1818. cmd = OCFS2_IOC_UNRESVSP64;
  1819. sr.l_whence = 0;
  1820. sr.l_start = (s64)offset;
  1821. sr.l_len = (s64)len;
  1822. return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
  1823. change_size);
  1824. }
  1825. int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
  1826. size_t count)
  1827. {
  1828. int ret = 0;
  1829. unsigned int extent_flags;
  1830. u32 cpos, clusters, extent_len, phys_cpos;
  1831. struct super_block *sb = inode->i_sb;
  1832. if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
  1833. !ocfs2_is_refcount_inode(inode) ||
  1834. OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
  1835. return 0;
  1836. cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
  1837. clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
  1838. while (clusters) {
  1839. ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
  1840. &extent_flags);
  1841. if (ret < 0) {
  1842. mlog_errno(ret);
  1843. goto out;
  1844. }
  1845. if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
  1846. ret = 1;
  1847. break;
  1848. }
  1849. if (extent_len > clusters)
  1850. extent_len = clusters;
  1851. clusters -= extent_len;
  1852. cpos += extent_len;
  1853. }
  1854. out:
  1855. return ret;
  1856. }
  1857. static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
  1858. {
  1859. int blockmask = inode->i_sb->s_blocksize - 1;
  1860. loff_t final_size = pos + count;
  1861. if ((pos & blockmask) || (final_size & blockmask))
  1862. return 1;
  1863. return 0;
  1864. }
  1865. static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
  1866. struct buffer_head **di_bh,
  1867. int meta_level,
  1868. int write_sem,
  1869. int wait)
  1870. {
  1871. int ret = 0;
  1872. if (wait)
  1873. ret = ocfs2_inode_lock(inode, di_bh, meta_level);
  1874. else
  1875. ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
  1876. if (ret < 0)
  1877. goto out;
  1878. if (wait) {
  1879. if (write_sem)
  1880. down_write(&OCFS2_I(inode)->ip_alloc_sem);
  1881. else
  1882. down_read(&OCFS2_I(inode)->ip_alloc_sem);
  1883. } else {
  1884. if (write_sem)
  1885. ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
  1886. else
  1887. ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
  1888. if (!ret) {
  1889. ret = -EAGAIN;
  1890. goto out_unlock;
  1891. }
  1892. }
  1893. return ret;
  1894. out_unlock:
  1895. brelse(*di_bh);
  1896. *di_bh = NULL;
  1897. ocfs2_inode_unlock(inode, meta_level);
  1898. out:
  1899. return ret;
  1900. }
  1901. static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
  1902. struct buffer_head **di_bh,
  1903. int meta_level,
  1904. int write_sem)
  1905. {
  1906. if (write_sem)
  1907. up_write(&OCFS2_I(inode)->ip_alloc_sem);
  1908. else
  1909. up_read(&OCFS2_I(inode)->ip_alloc_sem);
  1910. brelse(*di_bh);
  1911. *di_bh = NULL;
  1912. if (meta_level >= 0)
  1913. ocfs2_inode_unlock(inode, meta_level);
  1914. }
  1915. static int ocfs2_prepare_inode_for_write(struct file *file,
  1916. loff_t pos, size_t count, int wait)
  1917. {
  1918. int ret = 0, meta_level = 0, overwrite_io = 0;
  1919. int write_sem = 0;
  1920. struct dentry *dentry = file->f_path.dentry;
  1921. struct inode *inode = d_inode(dentry);
  1922. struct buffer_head *di_bh = NULL;
  1923. u32 cpos;
  1924. u32 clusters;
  1925. /*
  1926. * We start with a read level meta lock and only jump to an ex
  1927. * if we need to make modifications here.
  1928. */
  1929. for(;;) {
  1930. ret = ocfs2_inode_lock_for_extent_tree(inode,
  1931. &di_bh,
  1932. meta_level,
  1933. write_sem,
  1934. wait);
  1935. if (ret < 0) {
  1936. if (ret != -EAGAIN)
  1937. mlog_errno(ret);
  1938. goto out;
  1939. }
  1940. /*
  1941. * Check if IO will overwrite allocated blocks in case
  1942. * IOCB_NOWAIT flag is set.
  1943. */
  1944. if (!wait && !overwrite_io) {
  1945. overwrite_io = 1;
  1946. ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
  1947. if (ret < 0) {
  1948. if (ret != -EAGAIN)
  1949. mlog_errno(ret);
  1950. goto out_unlock;
  1951. }
  1952. }
  1953. /* Clear suid / sgid if necessary. We do this here
  1954. * instead of later in the write path because
  1955. * remove_suid() calls ->setattr without any hint that
  1956. * we may have already done our cluster locking. Since
  1957. * ocfs2_setattr() *must* take cluster locks to
  1958. * proceed, this will lead us to recursively lock the
  1959. * inode. There's also the dinode i_size state which
  1960. * can be lost via setattr during extending writes (we
  1961. * set inode->i_size at the end of a write. */
  1962. if (should_remove_suid(dentry)) {
  1963. if (meta_level == 0) {
  1964. ocfs2_inode_unlock_for_extent_tree(inode,
  1965. &di_bh,
  1966. meta_level,
  1967. write_sem);
  1968. meta_level = 1;
  1969. continue;
  1970. }
  1971. ret = ocfs2_write_remove_suid(inode);
  1972. if (ret < 0) {
  1973. mlog_errno(ret);
  1974. goto out_unlock;
  1975. }
  1976. }
  1977. ret = ocfs2_check_range_for_refcount(inode, pos, count);
  1978. if (ret == 1) {
  1979. ocfs2_inode_unlock_for_extent_tree(inode,
  1980. &di_bh,
  1981. meta_level,
  1982. write_sem);
  1983. meta_level = 1;
  1984. write_sem = 1;
  1985. ret = ocfs2_inode_lock_for_extent_tree(inode,
  1986. &di_bh,
  1987. meta_level,
  1988. write_sem,
  1989. wait);
  1990. if (ret < 0) {
  1991. if (ret != -EAGAIN)
  1992. mlog_errno(ret);
  1993. goto out;
  1994. }
  1995. cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
  1996. clusters =
  1997. ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
  1998. ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
  1999. }
  2000. if (ret < 0) {
  2001. if (ret != -EAGAIN)
  2002. mlog_errno(ret);
  2003. goto out_unlock;
  2004. }
  2005. break;
  2006. }
  2007. out_unlock:
  2008. trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
  2009. pos, count, wait);
  2010. ocfs2_inode_unlock_for_extent_tree(inode,
  2011. &di_bh,
  2012. meta_level,
  2013. write_sem);
  2014. out:
  2015. return ret;
  2016. }
  2017. static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
  2018. struct iov_iter *from)
  2019. {
  2020. int rw_level;
  2021. ssize_t written = 0;
  2022. ssize_t ret;
  2023. size_t count = iov_iter_count(from);
  2024. struct file *file = iocb->ki_filp;
  2025. struct inode *inode = file_inode(file);
  2026. struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
  2027. int full_coherency = !(osb->s_mount_opt &
  2028. OCFS2_MOUNT_COHERENCY_BUFFERED);
  2029. void *saved_ki_complete = NULL;
  2030. int append_write = ((iocb->ki_pos + count) >=
  2031. i_size_read(inode) ? 1 : 0);
  2032. int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
  2033. int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
  2034. trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
  2035. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  2036. file->f_path.dentry->d_name.len,
  2037. file->f_path.dentry->d_name.name,
  2038. (unsigned int)from->nr_segs); /* GRRRRR */
  2039. if (!direct_io && nowait)
  2040. return -EOPNOTSUPP;
  2041. if (count == 0)
  2042. return 0;
  2043. if (nowait) {
  2044. if (!inode_trylock(inode))
  2045. return -EAGAIN;
  2046. } else
  2047. inode_lock(inode);
  2048. /*
  2049. * Concurrent O_DIRECT writes are allowed with
  2050. * mount_option "coherency=buffered".
  2051. * For append write, we must take rw EX.
  2052. */
  2053. rw_level = (!direct_io || full_coherency || append_write);
  2054. if (nowait)
  2055. ret = ocfs2_try_rw_lock(inode, rw_level);
  2056. else
  2057. ret = ocfs2_rw_lock(inode, rw_level);
  2058. if (ret < 0) {
  2059. if (ret != -EAGAIN)
  2060. mlog_errno(ret);
  2061. goto out_mutex;
  2062. }
  2063. /*
  2064. * O_DIRECT writes with "coherency=full" need to take EX cluster
  2065. * inode_lock to guarantee coherency.
  2066. */
  2067. if (direct_io && full_coherency) {
  2068. /*
  2069. * We need to take and drop the inode lock to force
  2070. * other nodes to drop their caches. Buffered I/O
  2071. * already does this in write_begin().
  2072. */
  2073. if (nowait)
  2074. ret = ocfs2_try_inode_lock(inode, NULL, 1);
  2075. else
  2076. ret = ocfs2_inode_lock(inode, NULL, 1);
  2077. if (ret < 0) {
  2078. if (ret != -EAGAIN)
  2079. mlog_errno(ret);
  2080. goto out;
  2081. }
  2082. ocfs2_inode_unlock(inode, 1);
  2083. }
  2084. ret = generic_write_checks(iocb, from);
  2085. if (ret <= 0) {
  2086. if (ret)
  2087. mlog_errno(ret);
  2088. goto out;
  2089. }
  2090. count = ret;
  2091. ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
  2092. if (ret < 0) {
  2093. if (ret != -EAGAIN)
  2094. mlog_errno(ret);
  2095. goto out;
  2096. }
  2097. if (direct_io && !is_sync_kiocb(iocb) &&
  2098. ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
  2099. /*
  2100. * Make it a sync io if it's an unaligned aio.
  2101. */
  2102. saved_ki_complete = xchg(&iocb->ki_complete, NULL);
  2103. }
  2104. /* communicate with ocfs2_dio_end_io */
  2105. ocfs2_iocb_set_rw_locked(iocb, rw_level);
  2106. written = __generic_file_write_iter(iocb, from);
  2107. /* buffered aio wouldn't have proper lock coverage today */
  2108. BUG_ON(written == -EIOCBQUEUED && !direct_io);
  2109. /*
  2110. * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
  2111. * function pointer which is called when o_direct io completes so that
  2112. * it can unlock our rw lock.
  2113. * Unfortunately there are error cases which call end_io and others
  2114. * that don't. so we don't have to unlock the rw_lock if either an
  2115. * async dio is going to do it in the future or an end_io after an
  2116. * error has already done it.
  2117. */
  2118. if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
  2119. rw_level = -1;
  2120. }
  2121. if (unlikely(written <= 0))
  2122. goto out;
  2123. if (((file->f_flags & O_DSYNC) && !direct_io) ||
  2124. IS_SYNC(inode)) {
  2125. ret = filemap_fdatawrite_range(file->f_mapping,
  2126. iocb->ki_pos - written,
  2127. iocb->ki_pos - 1);
  2128. if (ret < 0)
  2129. written = ret;
  2130. if (!ret) {
  2131. ret = jbd2_journal_force_commit(osb->journal->j_journal);
  2132. if (ret < 0)
  2133. written = ret;
  2134. }
  2135. if (!ret)
  2136. ret = filemap_fdatawait_range(file->f_mapping,
  2137. iocb->ki_pos - written,
  2138. iocb->ki_pos - 1);
  2139. }
  2140. out:
  2141. if (saved_ki_complete)
  2142. xchg(&iocb->ki_complete, saved_ki_complete);
  2143. if (rw_level != -1)
  2144. ocfs2_rw_unlock(inode, rw_level);
  2145. out_mutex:
  2146. inode_unlock(inode);
  2147. if (written)
  2148. ret = written;
  2149. return ret;
  2150. }
  2151. static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
  2152. struct iov_iter *to)
  2153. {
  2154. int ret = 0, rw_level = -1, lock_level = 0;
  2155. struct file *filp = iocb->ki_filp;
  2156. struct inode *inode = file_inode(filp);
  2157. int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
  2158. int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
  2159. trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
  2160. (unsigned long long)OCFS2_I(inode)->ip_blkno,
  2161. filp->f_path.dentry->d_name.len,
  2162. filp->f_path.dentry->d_name.name,
  2163. to->nr_segs); /* GRRRRR */
  2164. if (!inode) {
  2165. ret = -EINVAL;
  2166. mlog_errno(ret);
  2167. goto bail;
  2168. }
  2169. if (!direct_io && nowait)
  2170. return -EOPNOTSUPP;
  2171. /*
  2172. * buffered reads protect themselves in ->readpage(). O_DIRECT reads
  2173. * need locks to protect pending reads from racing with truncate.
  2174. */
  2175. if (direct_io) {
  2176. if (nowait)
  2177. ret = ocfs2_try_rw_lock(inode, 0);
  2178. else
  2179. ret = ocfs2_rw_lock(inode, 0);
  2180. if (ret < 0) {
  2181. if (ret != -EAGAIN)
  2182. mlog_errno(ret);
  2183. goto bail;
  2184. }
  2185. rw_level = 0;
  2186. /* communicate with ocfs2_dio_end_io */
  2187. ocfs2_iocb_set_rw_locked(iocb, rw_level);
  2188. }
  2189. /*
  2190. * We're fine letting folks race truncates and extending
  2191. * writes with read across the cluster, just like they can
  2192. * locally. Hence no rw_lock during read.
  2193. *
  2194. * Take and drop the meta data lock to update inode fields
  2195. * like i_size. This allows the checks down below
  2196. * generic_file_read_iter() a chance of actually working.
  2197. */
  2198. ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
  2199. !nowait);
  2200. if (ret < 0) {
  2201. if (ret != -EAGAIN)
  2202. mlog_errno(ret);
  2203. goto bail;
  2204. }
  2205. ocfs2_inode_unlock(inode, lock_level);
  2206. ret = generic_file_read_iter(iocb, to);
  2207. trace_generic_file_read_iter_ret(ret);
  2208. /* buffered aio wouldn't have proper lock coverage today */
  2209. BUG_ON(ret == -EIOCBQUEUED && !direct_io);
  2210. /* see ocfs2_file_write_iter */
  2211. if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
  2212. rw_level = -1;
  2213. }
  2214. bail:
  2215. if (rw_level != -1)
  2216. ocfs2_rw_unlock(inode, rw_level);
  2217. return ret;
  2218. }
  2219. /* Refer generic_file_llseek_unlocked() */
  2220. static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
  2221. {
  2222. struct inode *inode = file->f_mapping->host;
  2223. int ret = 0;
  2224. inode_lock(inode);
  2225. switch (whence) {
  2226. case SEEK_SET:
  2227. break;
  2228. case SEEK_END:
  2229. /* SEEK_END requires the OCFS2 inode lock for the file
  2230. * because it references the file's size.
  2231. */
  2232. ret = ocfs2_inode_lock(inode, NULL, 0);
  2233. if (ret < 0) {
  2234. mlog_errno(ret);
  2235. goto out;
  2236. }
  2237. offset += i_size_read(inode);
  2238. ocfs2_inode_unlock(inode, 0);
  2239. break;
  2240. case SEEK_CUR:
  2241. if (offset == 0) {
  2242. offset = file->f_pos;
  2243. goto out;
  2244. }
  2245. offset += file->f_pos;
  2246. break;
  2247. case SEEK_DATA:
  2248. case SEEK_HOLE:
  2249. ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
  2250. if (ret)
  2251. goto out;
  2252. break;
  2253. default:
  2254. ret = -EINVAL;
  2255. goto out;
  2256. }
  2257. offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
  2258. out:
  2259. inode_unlock(inode);
  2260. if (ret)
  2261. return ret;
  2262. return offset;
  2263. }
  2264. static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
  2265. struct file *file_out, loff_t pos_out,
  2266. loff_t len, unsigned int remap_flags)
  2267. {
  2268. struct inode *inode_in = file_inode(file_in);
  2269. struct inode *inode_out = file_inode(file_out);
  2270. struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
  2271. struct buffer_head *in_bh = NULL, *out_bh = NULL;
  2272. bool same_inode = (inode_in == inode_out);
  2273. loff_t remapped = 0;
  2274. ssize_t ret;
  2275. if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
  2276. return -EINVAL;
  2277. if (!ocfs2_refcount_tree(osb))
  2278. return -EOPNOTSUPP;
  2279. if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
  2280. return -EROFS;
  2281. /* Lock both files against IO */
  2282. ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
  2283. if (ret)
  2284. return ret;
  2285. /* Check file eligibility and prepare for block sharing. */
  2286. ret = -EINVAL;
  2287. if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
  2288. (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
  2289. goto out_unlock;
  2290. ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
  2291. &len, remap_flags);
  2292. if (ret < 0 || len == 0)
  2293. goto out_unlock;
  2294. /* Lock out changes to the allocation maps and remap. */
  2295. down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
  2296. if (!same_inode)
  2297. down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
  2298. SINGLE_DEPTH_NESTING);
  2299. /* Zap any page cache for the destination file's range. */
  2300. truncate_inode_pages_range(&inode_out->i_data,
  2301. round_down(pos_out, PAGE_SIZE),
  2302. round_up(pos_out + len, PAGE_SIZE) - 1);
  2303. remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
  2304. inode_out, out_bh, pos_out, len);
  2305. up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
  2306. if (!same_inode)
  2307. up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
  2308. if (remapped < 0) {
  2309. ret = remapped;
  2310. mlog_errno(ret);
  2311. goto out_unlock;
  2312. }
  2313. /*
  2314. * Empty the extent map so that we may get the right extent
  2315. * record from the disk.
  2316. */
  2317. ocfs2_extent_map_trunc(inode_in, 0);
  2318. ocfs2_extent_map_trunc(inode_out, 0);
  2319. ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
  2320. if (ret) {
  2321. mlog_errno(ret);
  2322. goto out_unlock;
  2323. }
  2324. out_unlock:
  2325. ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
  2326. return remapped > 0 ? remapped : ret;
  2327. }
  2328. const struct inode_operations ocfs2_file_iops = {
  2329. .setattr = ocfs2_setattr,
  2330. .getattr = ocfs2_getattr,
  2331. .permission = ocfs2_permission,
  2332. .listxattr = ocfs2_listxattr,
  2333. .fiemap = ocfs2_fiemap,
  2334. .get_acl = ocfs2_iop_get_acl,
  2335. .set_acl = ocfs2_iop_set_acl,
  2336. };
  2337. const struct inode_operations ocfs2_special_file_iops = {
  2338. .setattr = ocfs2_setattr,
  2339. .getattr = ocfs2_getattr,
  2340. .permission = ocfs2_permission,
  2341. .get_acl = ocfs2_iop_get_acl,
  2342. .set_acl = ocfs2_iop_set_acl,
  2343. };
  2344. /*
  2345. * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
  2346. * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
  2347. */
  2348. const struct file_operations ocfs2_fops = {
  2349. .llseek = ocfs2_file_llseek,
  2350. .mmap = ocfs2_mmap,
  2351. .fsync = ocfs2_sync_file,
  2352. .release = ocfs2_file_release,
  2353. .open = ocfs2_file_open,
  2354. .read_iter = ocfs2_file_read_iter,
  2355. .write_iter = ocfs2_file_write_iter,
  2356. .unlocked_ioctl = ocfs2_ioctl,
  2357. #ifdef CONFIG_COMPAT
  2358. .compat_ioctl = ocfs2_compat_ioctl,
  2359. #endif
  2360. .lock = ocfs2_lock,
  2361. .flock = ocfs2_flock,
  2362. .splice_read = generic_file_splice_read,
  2363. .splice_write = iter_file_splice_write,
  2364. .fallocate = ocfs2_fallocate,
  2365. .remap_file_range = ocfs2_remap_file_range,
  2366. };
  2367. const struct file_operations ocfs2_dops = {
  2368. .llseek = generic_file_llseek,
  2369. .read = generic_read_dir,
  2370. .iterate = ocfs2_readdir,
  2371. .fsync = ocfs2_sync_file,
  2372. .release = ocfs2_dir_release,
  2373. .open = ocfs2_dir_open,
  2374. .unlocked_ioctl = ocfs2_ioctl,
  2375. #ifdef CONFIG_COMPAT
  2376. .compat_ioctl = ocfs2_compat_ioctl,
  2377. #endif
  2378. .lock = ocfs2_lock,
  2379. .flock = ocfs2_flock,
  2380. };
  2381. /*
  2382. * POSIX-lockless variants of our file_operations.
  2383. *
  2384. * These will be used if the underlying cluster stack does not support
  2385. * posix file locking, if the user passes the "localflocks" mount
  2386. * option, or if we have a local-only fs.
  2387. *
  2388. * ocfs2_flock is in here because all stacks handle UNIX file locks,
  2389. * so we still want it in the case of no stack support for
  2390. * plocks. Internally, it will do the right thing when asked to ignore
  2391. * the cluster.
  2392. */
  2393. const struct file_operations ocfs2_fops_no_plocks = {
  2394. .llseek = ocfs2_file_llseek,
  2395. .mmap = ocfs2_mmap,
  2396. .fsync = ocfs2_sync_file,
  2397. .release = ocfs2_file_release,
  2398. .open = ocfs2_file_open,
  2399. .read_iter = ocfs2_file_read_iter,
  2400. .write_iter = ocfs2_file_write_iter,
  2401. .unlocked_ioctl = ocfs2_ioctl,
  2402. #ifdef CONFIG_COMPAT
  2403. .compat_ioctl = ocfs2_compat_ioctl,
  2404. #endif
  2405. .flock = ocfs2_flock,
  2406. .splice_read = generic_file_splice_read,
  2407. .splice_write = iter_file_splice_write,
  2408. .fallocate = ocfs2_fallocate,
  2409. .remap_file_range = ocfs2_remap_file_range,
  2410. };
  2411. const struct file_operations ocfs2_dops_no_plocks = {
  2412. .llseek = generic_file_llseek,
  2413. .read = generic_read_dir,
  2414. .iterate = ocfs2_readdir,
  2415. .fsync = ocfs2_sync_file,
  2416. .release = ocfs2_dir_release,
  2417. .open = ocfs2_dir_open,
  2418. .unlocked_ioctl = ocfs2_ioctl,
  2419. #ifdef CONFIG_COMPAT
  2420. .compat_ioctl = ocfs2_compat_ioctl,
  2421. #endif
  2422. .flock = ocfs2_flock,
  2423. };