quota_global.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Implementation of operations over global quota file
  4. */
  5. #include <linux/spinlock.h>
  6. #include <linux/fs.h>
  7. #include <linux/slab.h>
  8. #include <linux/quota.h>
  9. #include <linux/quotaops.h>
  10. #include <linux/dqblk_qtree.h>
  11. #include <linux/jiffies.h>
  12. #include <linux/writeback.h>
  13. #include <linux/workqueue.h>
  14. #include <linux/llist.h>
  15. #include <linux/iversion.h>
  16. #include <cluster/masklog.h>
  17. #include "ocfs2_fs.h"
  18. #include "ocfs2.h"
  19. #include "alloc.h"
  20. #include "blockcheck.h"
  21. #include "inode.h"
  22. #include "journal.h"
  23. #include "file.h"
  24. #include "sysfile.h"
  25. #include "dlmglue.h"
  26. #include "uptodate.h"
  27. #include "super.h"
  28. #include "buffer_head_io.h"
  29. #include "quota.h"
  30. #include "ocfs2_trace.h"
  31. /*
  32. * Locking of quotas with OCFS2 is rather complex. Here are rules that
  33. * should be obeyed by all the functions:
  34. * - any write of quota structure (either to local or global file) is protected
  35. * by dqio_sem or dquot->dq_lock.
  36. * - any modification of global quota file holds inode cluster lock, i_mutex,
  37. * and ip_alloc_sem of the global quota file (achieved by
  38. * ocfs2_lock_global_qf). It also has to hold qinfo_lock.
  39. * - an allocation of new blocks for local quota file is protected by
  40. * its ip_alloc_sem
  41. *
  42. * A rough sketch of locking dependencies (lf = local file, gf = global file):
  43. * Normal filesystem operation:
  44. * start_trans -> dqio_sem -> write to lf
  45. * Syncing of local and global file:
  46. * ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
  47. * write to gf
  48. * -> write to lf
  49. * Acquire dquot for the first time:
  50. * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
  51. * -> alloc space for gf
  52. * -> start_trans -> qinfo_lock -> write to gf
  53. * -> ip_alloc_sem of lf -> alloc space for lf
  54. * -> write to lf
  55. * Release last reference to dquot:
  56. * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
  57. * -> write to lf
  58. * Note that all the above operations also hold the inode cluster lock of lf.
  59. * Recovery:
  60. * inode cluster lock of recovered lf
  61. * -> read bitmaps -> ip_alloc_sem of lf
  62. * -> ocfs2_lock_global_qf -> start_trans -> dqio_sem -> qinfo_lock ->
  63. * write to gf
  64. */
  65. static void qsync_work_fn(struct work_struct *work);
  66. static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
  67. {
  68. struct ocfs2_global_disk_dqblk *d = dp;
  69. struct mem_dqblk *m = &dquot->dq_dqb;
  70. /* Update from disk only entries not set by the admin */
  71. if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
  72. m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
  73. m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
  74. }
  75. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  76. m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
  77. if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
  78. m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
  79. m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
  80. }
  81. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  82. m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
  83. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
  84. m->dqb_btime = le64_to_cpu(d->dqb_btime);
  85. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
  86. m->dqb_itime = le64_to_cpu(d->dqb_itime);
  87. OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
  88. }
  89. static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
  90. {
  91. struct ocfs2_global_disk_dqblk *d = dp;
  92. struct mem_dqblk *m = &dquot->dq_dqb;
  93. d->dqb_id = cpu_to_le32(from_kqid(&init_user_ns, dquot->dq_id));
  94. d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
  95. d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
  96. d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
  97. d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
  98. d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
  99. d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
  100. d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
  101. d->dqb_btime = cpu_to_le64(m->dqb_btime);
  102. d->dqb_itime = cpu_to_le64(m->dqb_itime);
  103. d->dqb_pad1 = d->dqb_pad2 = 0;
  104. }
  105. static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
  106. {
  107. struct ocfs2_global_disk_dqblk *d = dp;
  108. struct ocfs2_mem_dqinfo *oinfo =
  109. sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
  110. if (qtree_entry_unused(&oinfo->dqi_gi, dp))
  111. return 0;
  112. return qid_eq(make_kqid(&init_user_ns, dquot->dq_id.type,
  113. le32_to_cpu(d->dqb_id)),
  114. dquot->dq_id);
  115. }
  116. const struct qtree_fmt_operations ocfs2_global_ops = {
  117. .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
  118. .disk2mem_dqblk = ocfs2_global_disk2memdqb,
  119. .is_id = ocfs2_global_is_id,
  120. };
  121. int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
  122. {
  123. struct ocfs2_disk_dqtrailer *dqt =
  124. ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
  125. trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr);
  126. BUG_ON(!buffer_uptodate(bh));
  127. /*
  128. * If the ecc fails, we return the error but otherwise
  129. * leave the filesystem running. We know any error is
  130. * local to this block.
  131. */
  132. return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
  133. }
  134. int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
  135. struct buffer_head **bhp)
  136. {
  137. int rc;
  138. *bhp = NULL;
  139. rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
  140. ocfs2_validate_quota_block);
  141. if (rc)
  142. mlog_errno(rc);
  143. return rc;
  144. }
  145. /* Read data from global quotafile - avoid pagecache and such because we cannot
  146. * afford acquiring the locks... We use quota cluster lock to serialize
  147. * operations. Caller is responsible for acquiring it. */
  148. ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
  149. size_t len, loff_t off)
  150. {
  151. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  152. struct inode *gqinode = oinfo->dqi_gqinode;
  153. loff_t i_size = i_size_read(gqinode);
  154. int offset = off & (sb->s_blocksize - 1);
  155. sector_t blk = off >> sb->s_blocksize_bits;
  156. int err = 0;
  157. struct buffer_head *bh;
  158. size_t toread, tocopy;
  159. u64 pblock = 0, pcount = 0;
  160. if (off > i_size)
  161. return 0;
  162. if (off + len > i_size)
  163. len = i_size - off;
  164. toread = len;
  165. while (toread > 0) {
  166. tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
  167. if (!pcount) {
  168. err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
  169. &pcount, NULL);
  170. if (err) {
  171. mlog_errno(err);
  172. return err;
  173. }
  174. } else {
  175. pcount--;
  176. pblock++;
  177. }
  178. bh = NULL;
  179. err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
  180. if (err) {
  181. mlog_errno(err);
  182. return err;
  183. }
  184. memcpy(data, bh->b_data + offset, tocopy);
  185. brelse(bh);
  186. offset = 0;
  187. toread -= tocopy;
  188. data += tocopy;
  189. blk++;
  190. }
  191. return len;
  192. }
  193. /* Write to quotafile (we know the transaction is already started and has
  194. * enough credits) */
  195. ssize_t ocfs2_quota_write(struct super_block *sb, int type,
  196. const char *data, size_t len, loff_t off)
  197. {
  198. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  199. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  200. struct inode *gqinode = oinfo->dqi_gqinode;
  201. int offset = off & (sb->s_blocksize - 1);
  202. sector_t blk = off >> sb->s_blocksize_bits;
  203. int err = 0, new = 0, ja_type;
  204. struct buffer_head *bh = NULL;
  205. handle_t *handle = journal_current_handle();
  206. u64 pblock, pcount;
  207. if (!handle) {
  208. mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
  209. "because transaction was not started.\n",
  210. (unsigned long long)off, (unsigned long long)len);
  211. return -EIO;
  212. }
  213. if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
  214. WARN_ON(1);
  215. len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
  216. }
  217. if (i_size_read(gqinode) < off + len) {
  218. loff_t rounded_end =
  219. ocfs2_align_bytes_to_blocks(sb, off + len);
  220. /* Space is already allocated in ocfs2_acquire_dquot() */
  221. err = ocfs2_simple_size_update(gqinode,
  222. oinfo->dqi_gqi_bh,
  223. rounded_end);
  224. if (err < 0)
  225. goto out;
  226. new = 1;
  227. }
  228. err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
  229. if (err) {
  230. mlog_errno(err);
  231. goto out;
  232. }
  233. /* Not rewriting whole block? */
  234. if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
  235. !new) {
  236. err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
  237. ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
  238. } else {
  239. bh = sb_getblk(sb, pblock);
  240. if (!bh)
  241. err = -ENOMEM;
  242. ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
  243. }
  244. if (err) {
  245. mlog_errno(err);
  246. goto out;
  247. }
  248. lock_buffer(bh);
  249. if (new)
  250. memset(bh->b_data, 0, sb->s_blocksize);
  251. memcpy(bh->b_data + offset, data, len);
  252. flush_dcache_page(bh->b_page);
  253. set_buffer_uptodate(bh);
  254. unlock_buffer(bh);
  255. ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
  256. err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
  257. ja_type);
  258. if (err < 0) {
  259. brelse(bh);
  260. goto out;
  261. }
  262. ocfs2_journal_dirty(handle, bh);
  263. brelse(bh);
  264. out:
  265. if (err) {
  266. mlog_errno(err);
  267. return err;
  268. }
  269. inode_inc_iversion(gqinode);
  270. ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
  271. return len;
  272. }
  273. int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  274. {
  275. int status;
  276. struct buffer_head *bh = NULL;
  277. status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
  278. if (status < 0)
  279. return status;
  280. spin_lock(&dq_data_lock);
  281. if (!oinfo->dqi_gqi_count++)
  282. oinfo->dqi_gqi_bh = bh;
  283. else
  284. WARN_ON(bh != oinfo->dqi_gqi_bh);
  285. spin_unlock(&dq_data_lock);
  286. if (ex) {
  287. inode_lock(oinfo->dqi_gqinode);
  288. down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  289. } else {
  290. down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  291. }
  292. return 0;
  293. }
  294. void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
  295. {
  296. if (ex) {
  297. up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  298. inode_unlock(oinfo->dqi_gqinode);
  299. } else {
  300. up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
  301. }
  302. ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
  303. brelse(oinfo->dqi_gqi_bh);
  304. spin_lock(&dq_data_lock);
  305. if (!--oinfo->dqi_gqi_count)
  306. oinfo->dqi_gqi_bh = NULL;
  307. spin_unlock(&dq_data_lock);
  308. }
  309. /* Read information header from global quota file */
  310. int ocfs2_global_read_info(struct super_block *sb, int type)
  311. {
  312. struct inode *gqinode = NULL;
  313. unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
  314. GROUP_QUOTA_SYSTEM_INODE };
  315. struct ocfs2_global_disk_dqinfo dinfo;
  316. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  317. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  318. u64 pcount;
  319. int status;
  320. /* Read global header */
  321. gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
  322. OCFS2_INVALID_SLOT);
  323. if (!gqinode) {
  324. mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
  325. type);
  326. status = -EINVAL;
  327. goto out_err;
  328. }
  329. oinfo->dqi_gi.dqi_sb = sb;
  330. oinfo->dqi_gi.dqi_type = type;
  331. ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
  332. oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
  333. oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
  334. oinfo->dqi_gqi_bh = NULL;
  335. oinfo->dqi_gqi_count = 0;
  336. oinfo->dqi_gqinode = gqinode;
  337. status = ocfs2_lock_global_qf(oinfo, 0);
  338. if (status < 0) {
  339. mlog_errno(status);
  340. goto out_err;
  341. }
  342. status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
  343. &pcount, NULL);
  344. if (status < 0)
  345. goto out_unlock;
  346. status = ocfs2_qinfo_lock(oinfo, 0);
  347. if (status < 0)
  348. goto out_unlock;
  349. status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
  350. sizeof(struct ocfs2_global_disk_dqinfo),
  351. OCFS2_GLOBAL_INFO_OFF);
  352. ocfs2_qinfo_unlock(oinfo, 0);
  353. ocfs2_unlock_global_qf(oinfo, 0);
  354. if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
  355. mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
  356. status);
  357. if (status >= 0)
  358. status = -EIO;
  359. mlog_errno(status);
  360. goto out_err;
  361. }
  362. info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
  363. info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
  364. oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
  365. oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
  366. oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
  367. oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
  368. oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
  369. oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
  370. OCFS2_QBLK_RESERVED_SPACE;
  371. oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
  372. INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
  373. schedule_delayed_work(&oinfo->dqi_sync_work,
  374. msecs_to_jiffies(oinfo->dqi_syncms));
  375. out_err:
  376. return status;
  377. out_unlock:
  378. ocfs2_unlock_global_qf(oinfo, 0);
  379. mlog_errno(status);
  380. goto out_err;
  381. }
  382. /* Write information to global quota file. Expects exlusive lock on quota
  383. * file inode and quota info */
  384. static int __ocfs2_global_write_info(struct super_block *sb, int type)
  385. {
  386. struct mem_dqinfo *info = sb_dqinfo(sb, type);
  387. struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
  388. struct ocfs2_global_disk_dqinfo dinfo;
  389. ssize_t size;
  390. spin_lock(&dq_data_lock);
  391. info->dqi_flags &= ~DQF_INFO_DIRTY;
  392. dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
  393. dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
  394. spin_unlock(&dq_data_lock);
  395. dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
  396. dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
  397. dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
  398. dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
  399. size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
  400. sizeof(struct ocfs2_global_disk_dqinfo),
  401. OCFS2_GLOBAL_INFO_OFF);
  402. if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
  403. mlog(ML_ERROR, "Cannot write global quota info structure\n");
  404. if (size >= 0)
  405. size = -EIO;
  406. return size;
  407. }
  408. return 0;
  409. }
  410. int ocfs2_global_write_info(struct super_block *sb, int type)
  411. {
  412. int err;
  413. struct quota_info *dqopt = sb_dqopt(sb);
  414. struct ocfs2_mem_dqinfo *info = dqopt->info[type].dqi_priv;
  415. down_write(&dqopt->dqio_sem);
  416. err = ocfs2_qinfo_lock(info, 1);
  417. if (err < 0)
  418. goto out_sem;
  419. err = __ocfs2_global_write_info(sb, type);
  420. ocfs2_qinfo_unlock(info, 1);
  421. out_sem:
  422. up_write(&dqopt->dqio_sem);
  423. return err;
  424. }
  425. static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
  426. {
  427. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  428. /*
  429. * We may need to allocate tree blocks and a leaf block but not the
  430. * root block
  431. */
  432. return oinfo->dqi_gi.dqi_qtree_depth;
  433. }
  434. static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
  435. {
  436. /* We modify all the allocated blocks, tree root, info block and
  437. * the inode */
  438. return (ocfs2_global_qinit_alloc(sb, type) + 2) *
  439. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
  440. }
  441. /* Sync local information about quota modifications with global quota file.
  442. * Caller must have started the transaction and obtained exclusive lock for
  443. * global quota file inode */
  444. int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
  445. {
  446. int err, err2;
  447. struct super_block *sb = dquot->dq_sb;
  448. int type = dquot->dq_id.type;
  449. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  450. struct ocfs2_global_disk_dqblk dqblk;
  451. s64 spacechange, inodechange;
  452. time64_t olditime, oldbtime;
  453. err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
  454. sizeof(struct ocfs2_global_disk_dqblk),
  455. dquot->dq_off);
  456. if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
  457. if (err >= 0) {
  458. mlog(ML_ERROR, "Short read from global quota file "
  459. "(%u read)\n", err);
  460. err = -EIO;
  461. }
  462. goto out;
  463. }
  464. /* Update space and inode usage. Get also other information from
  465. * global quota file so that we don't overwrite any changes there.
  466. * We are */
  467. spin_lock(&dquot->dq_dqb_lock);
  468. spacechange = dquot->dq_dqb.dqb_curspace -
  469. OCFS2_DQUOT(dquot)->dq_origspace;
  470. inodechange = dquot->dq_dqb.dqb_curinodes -
  471. OCFS2_DQUOT(dquot)->dq_originodes;
  472. olditime = dquot->dq_dqb.dqb_itime;
  473. oldbtime = dquot->dq_dqb.dqb_btime;
  474. ocfs2_global_disk2memdqb(dquot, &dqblk);
  475. trace_ocfs2_sync_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  476. dquot->dq_dqb.dqb_curspace,
  477. (long long)spacechange,
  478. dquot->dq_dqb.dqb_curinodes,
  479. (long long)inodechange);
  480. if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
  481. dquot->dq_dqb.dqb_curspace += spacechange;
  482. if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
  483. dquot->dq_dqb.dqb_curinodes += inodechange;
  484. /* Set properly space grace time... */
  485. if (dquot->dq_dqb.dqb_bsoftlimit &&
  486. dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
  487. if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
  488. oldbtime > 0) {
  489. if (dquot->dq_dqb.dqb_btime > 0)
  490. dquot->dq_dqb.dqb_btime =
  491. min(dquot->dq_dqb.dqb_btime, oldbtime);
  492. else
  493. dquot->dq_dqb.dqb_btime = oldbtime;
  494. }
  495. } else {
  496. dquot->dq_dqb.dqb_btime = 0;
  497. clear_bit(DQ_BLKS_B, &dquot->dq_flags);
  498. }
  499. /* Set properly inode grace time... */
  500. if (dquot->dq_dqb.dqb_isoftlimit &&
  501. dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
  502. if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
  503. olditime > 0) {
  504. if (dquot->dq_dqb.dqb_itime > 0)
  505. dquot->dq_dqb.dqb_itime =
  506. min(dquot->dq_dqb.dqb_itime, olditime);
  507. else
  508. dquot->dq_dqb.dqb_itime = olditime;
  509. }
  510. } else {
  511. dquot->dq_dqb.dqb_itime = 0;
  512. clear_bit(DQ_INODES_B, &dquot->dq_flags);
  513. }
  514. /* All information is properly updated, clear the flags */
  515. __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
  516. __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
  517. __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
  518. __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
  519. __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
  520. __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
  521. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  522. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  523. spin_unlock(&dquot->dq_dqb_lock);
  524. err = ocfs2_qinfo_lock(info, freeing);
  525. if (err < 0) {
  526. mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
  527. " (type=%d, id=%u)\n", dquot->dq_id.type,
  528. (unsigned)from_kqid(&init_user_ns, dquot->dq_id));
  529. goto out;
  530. }
  531. if (freeing)
  532. OCFS2_DQUOT(dquot)->dq_use_count--;
  533. err = qtree_write_dquot(&info->dqi_gi, dquot);
  534. if (err < 0)
  535. goto out_qlock;
  536. if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
  537. err = qtree_release_dquot(&info->dqi_gi, dquot);
  538. if (info_dirty(sb_dqinfo(sb, type))) {
  539. err2 = __ocfs2_global_write_info(sb, type);
  540. if (!err)
  541. err = err2;
  542. }
  543. }
  544. out_qlock:
  545. ocfs2_qinfo_unlock(info, freeing);
  546. out:
  547. if (err < 0)
  548. mlog_errno(err);
  549. return err;
  550. }
  551. /*
  552. * Functions for periodic syncing of dquots with global file
  553. */
  554. static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
  555. {
  556. handle_t *handle;
  557. struct super_block *sb = dquot->dq_sb;
  558. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  559. struct ocfs2_super *osb = OCFS2_SB(sb);
  560. int status = 0;
  561. trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id),
  562. dquot->dq_id.type,
  563. type, sb->s_id);
  564. if (type != dquot->dq_id.type)
  565. goto out;
  566. status = ocfs2_lock_global_qf(oinfo, 1);
  567. if (status < 0)
  568. goto out;
  569. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  570. if (IS_ERR(handle)) {
  571. status = PTR_ERR(handle);
  572. mlog_errno(status);
  573. goto out_ilock;
  574. }
  575. down_write(&sb_dqopt(sb)->dqio_sem);
  576. status = ocfs2_sync_dquot(dquot);
  577. if (status < 0)
  578. mlog_errno(status);
  579. /* We have to write local structure as well... */
  580. status = ocfs2_local_write_dquot(dquot);
  581. if (status < 0)
  582. mlog_errno(status);
  583. up_write(&sb_dqopt(sb)->dqio_sem);
  584. ocfs2_commit_trans(osb, handle);
  585. out_ilock:
  586. ocfs2_unlock_global_qf(oinfo, 1);
  587. out:
  588. return status;
  589. }
  590. static void qsync_work_fn(struct work_struct *work)
  591. {
  592. struct ocfs2_mem_dqinfo *oinfo = container_of(work,
  593. struct ocfs2_mem_dqinfo,
  594. dqi_sync_work.work);
  595. struct super_block *sb = oinfo->dqi_gqinode->i_sb;
  596. /*
  597. * We have to be careful here not to deadlock on s_umount as umount
  598. * disabling quotas may be in progress and it waits for this work to
  599. * complete. If trylock fails, we'll do the sync next time...
  600. */
  601. if (down_read_trylock(&sb->s_umount)) {
  602. dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
  603. up_read(&sb->s_umount);
  604. }
  605. schedule_delayed_work(&oinfo->dqi_sync_work,
  606. msecs_to_jiffies(oinfo->dqi_syncms));
  607. }
  608. /*
  609. * Wrappers for generic quota functions
  610. */
  611. static int ocfs2_write_dquot(struct dquot *dquot)
  612. {
  613. handle_t *handle;
  614. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  615. int status = 0;
  616. trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  617. dquot->dq_id.type);
  618. handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
  619. if (IS_ERR(handle)) {
  620. status = PTR_ERR(handle);
  621. mlog_errno(status);
  622. goto out;
  623. }
  624. down_write(&sb_dqopt(dquot->dq_sb)->dqio_sem);
  625. status = ocfs2_local_write_dquot(dquot);
  626. up_write(&sb_dqopt(dquot->dq_sb)->dqio_sem);
  627. ocfs2_commit_trans(osb, handle);
  628. out:
  629. return status;
  630. }
  631. static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
  632. {
  633. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  634. /*
  635. * We modify tree, leaf block, global info, local chunk header,
  636. * global and local inode; OCFS2_QINFO_WRITE_CREDITS already
  637. * accounts for inode update
  638. */
  639. return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
  640. OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
  641. OCFS2_QINFO_WRITE_CREDITS +
  642. OCFS2_INODE_UPDATE_CREDITS;
  643. }
  644. void ocfs2_drop_dquot_refs(struct work_struct *work)
  645. {
  646. struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
  647. dquot_drop_work);
  648. struct llist_node *list;
  649. struct ocfs2_dquot *odquot, *next_odquot;
  650. list = llist_del_all(&osb->dquot_drop_list);
  651. llist_for_each_entry_safe(odquot, next_odquot, list, list) {
  652. /* Drop the reference we acquired in ocfs2_dquot_release() */
  653. dqput(&odquot->dq_dquot);
  654. }
  655. }
  656. /*
  657. * Called when the last reference to dquot is dropped. If we are called from
  658. * downconvert thread, we cannot do all the handling here because grabbing
  659. * quota lock could deadlock (the node holding the quota lock could need some
  660. * other cluster lock to proceed but with blocked downconvert thread we cannot
  661. * release any lock).
  662. */
  663. static int ocfs2_release_dquot(struct dquot *dquot)
  664. {
  665. handle_t *handle;
  666. struct ocfs2_mem_dqinfo *oinfo =
  667. sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv;
  668. struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
  669. int status = 0;
  670. trace_ocfs2_release_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  671. dquot->dq_id.type);
  672. mutex_lock(&dquot->dq_lock);
  673. /* Check whether we are not racing with some other dqget() */
  674. if (dquot_is_busy(dquot))
  675. goto out;
  676. /* Running from downconvert thread? Postpone quota processing to wq */
  677. if (current == osb->dc_task) {
  678. /*
  679. * Grab our own reference to dquot and queue it for delayed
  680. * dropping. Quota code rechecks after calling
  681. * ->release_dquot() and won't free dquot structure.
  682. */
  683. dqgrab(dquot);
  684. /* First entry on list -> queue work */
  685. if (llist_add(&OCFS2_DQUOT(dquot)->list, &osb->dquot_drop_list))
  686. queue_work(osb->ocfs2_wq, &osb->dquot_drop_work);
  687. goto out;
  688. }
  689. status = ocfs2_lock_global_qf(oinfo, 1);
  690. if (status < 0)
  691. goto out;
  692. handle = ocfs2_start_trans(osb,
  693. ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type));
  694. if (IS_ERR(handle)) {
  695. status = PTR_ERR(handle);
  696. mlog_errno(status);
  697. goto out_ilock;
  698. }
  699. status = ocfs2_global_release_dquot(dquot);
  700. if (status < 0) {
  701. mlog_errno(status);
  702. goto out_trans;
  703. }
  704. status = ocfs2_local_release_dquot(handle, dquot);
  705. /*
  706. * If we fail here, we cannot do much as global structure is
  707. * already released. So just complain...
  708. */
  709. if (status < 0)
  710. mlog_errno(status);
  711. /*
  712. * Clear dq_off so that we search for the structure in quota file next
  713. * time we acquire it. The structure might be deleted and reallocated
  714. * elsewhere by another node while our dquot structure is on freelist.
  715. */
  716. dquot->dq_off = 0;
  717. clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  718. out_trans:
  719. ocfs2_commit_trans(osb, handle);
  720. out_ilock:
  721. ocfs2_unlock_global_qf(oinfo, 1);
  722. out:
  723. mutex_unlock(&dquot->dq_lock);
  724. if (status)
  725. mlog_errno(status);
  726. return status;
  727. }
  728. /*
  729. * Read global dquot structure from disk or create it if it does
  730. * not exist. Also update use count of the global structure and
  731. * create structure in node-local quota file.
  732. */
  733. static int ocfs2_acquire_dquot(struct dquot *dquot)
  734. {
  735. int status = 0, err;
  736. int ex = 0;
  737. struct super_block *sb = dquot->dq_sb;
  738. struct ocfs2_super *osb = OCFS2_SB(sb);
  739. int type = dquot->dq_id.type;
  740. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  741. struct inode *gqinode = info->dqi_gqinode;
  742. int need_alloc = ocfs2_global_qinit_alloc(sb, type);
  743. handle_t *handle;
  744. trace_ocfs2_acquire_dquot(from_kqid(&init_user_ns, dquot->dq_id),
  745. type);
  746. mutex_lock(&dquot->dq_lock);
  747. /*
  748. * We need an exclusive lock, because we're going to update use count
  749. * and instantiate possibly new dquot structure
  750. */
  751. status = ocfs2_lock_global_qf(info, 1);
  752. if (status < 0)
  753. goto out;
  754. status = ocfs2_qinfo_lock(info, 0);
  755. if (status < 0)
  756. goto out_dq;
  757. /*
  758. * We always want to read dquot structure from disk because we don't
  759. * know what happened with it while it was on freelist.
  760. */
  761. status = qtree_read_dquot(&info->dqi_gi, dquot);
  762. ocfs2_qinfo_unlock(info, 0);
  763. if (status < 0)
  764. goto out_dq;
  765. OCFS2_DQUOT(dquot)->dq_use_count++;
  766. OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
  767. OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
  768. if (!dquot->dq_off) { /* No real quota entry? */
  769. ex = 1;
  770. /*
  771. * Add blocks to quota file before we start a transaction since
  772. * locking allocators ranks above a transaction start
  773. */
  774. WARN_ON(journal_current_handle());
  775. status = ocfs2_extend_no_holes(gqinode, NULL,
  776. i_size_read(gqinode) + (need_alloc << sb->s_blocksize_bits),
  777. i_size_read(gqinode));
  778. if (status < 0)
  779. goto out_dq;
  780. }
  781. handle = ocfs2_start_trans(osb,
  782. ocfs2_calc_global_qinit_credits(sb, type));
  783. if (IS_ERR(handle)) {
  784. status = PTR_ERR(handle);
  785. goto out_dq;
  786. }
  787. status = ocfs2_qinfo_lock(info, ex);
  788. if (status < 0)
  789. goto out_trans;
  790. status = qtree_write_dquot(&info->dqi_gi, dquot);
  791. if (ex && info_dirty(sb_dqinfo(sb, type))) {
  792. err = __ocfs2_global_write_info(sb, type);
  793. if (!status)
  794. status = err;
  795. }
  796. ocfs2_qinfo_unlock(info, ex);
  797. out_trans:
  798. ocfs2_commit_trans(osb, handle);
  799. out_dq:
  800. ocfs2_unlock_global_qf(info, 1);
  801. if (status < 0)
  802. goto out;
  803. status = ocfs2_create_local_dquot(dquot);
  804. if (status < 0)
  805. goto out;
  806. set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
  807. out:
  808. mutex_unlock(&dquot->dq_lock);
  809. if (status)
  810. mlog_errno(status);
  811. return status;
  812. }
  813. static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
  814. {
  815. int type = qid->type;
  816. struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
  817. int status = 0;
  818. trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
  819. if (!sb_has_quota_loaded(sb, type)) {
  820. status = -ESRCH;
  821. goto out;
  822. }
  823. status = ocfs2_lock_global_qf(info, 0);
  824. if (status < 0)
  825. goto out;
  826. status = ocfs2_qinfo_lock(info, 0);
  827. if (status < 0)
  828. goto out_global;
  829. status = qtree_get_next_id(&info->dqi_gi, qid);
  830. ocfs2_qinfo_unlock(info, 0);
  831. out_global:
  832. ocfs2_unlock_global_qf(info, 0);
  833. out:
  834. /*
  835. * Avoid logging ENOENT since it just means there isn't next ID and
  836. * ESRCH which means quota isn't enabled for the filesystem.
  837. */
  838. if (status && status != -ENOENT && status != -ESRCH)
  839. mlog_errno(status);
  840. return status;
  841. }
  842. static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
  843. {
  844. unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
  845. (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
  846. (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
  847. (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
  848. (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
  849. (1 << (DQ_LASTSET_B + QIF_ITIME_B));
  850. int sync = 0;
  851. int status;
  852. struct super_block *sb = dquot->dq_sb;
  853. int type = dquot->dq_id.type;
  854. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  855. handle_t *handle;
  856. struct ocfs2_super *osb = OCFS2_SB(sb);
  857. trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id),
  858. type);
  859. /* In case user set some limits, sync dquot immediately to global
  860. * quota file so that information propagates quicker */
  861. spin_lock(&dquot->dq_dqb_lock);
  862. if (dquot->dq_flags & mask)
  863. sync = 1;
  864. spin_unlock(&dquot->dq_dqb_lock);
  865. /* This is a slight hack but we can't afford getting global quota
  866. * lock if we already have a transaction started. */
  867. if (!sync || journal_current_handle()) {
  868. status = ocfs2_write_dquot(dquot);
  869. goto out;
  870. }
  871. status = ocfs2_lock_global_qf(oinfo, 1);
  872. if (status < 0)
  873. goto out;
  874. handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
  875. if (IS_ERR(handle)) {
  876. status = PTR_ERR(handle);
  877. mlog_errno(status);
  878. goto out_ilock;
  879. }
  880. down_write(&sb_dqopt(sb)->dqio_sem);
  881. status = ocfs2_sync_dquot(dquot);
  882. if (status < 0) {
  883. mlog_errno(status);
  884. goto out_dlock;
  885. }
  886. /* Now write updated local dquot structure */
  887. status = ocfs2_local_write_dquot(dquot);
  888. out_dlock:
  889. up_write(&sb_dqopt(sb)->dqio_sem);
  890. ocfs2_commit_trans(osb, handle);
  891. out_ilock:
  892. ocfs2_unlock_global_qf(oinfo, 1);
  893. out:
  894. if (status)
  895. mlog_errno(status);
  896. return status;
  897. }
  898. /* This should happen only after set_dqinfo(). */
  899. static int ocfs2_write_info(struct super_block *sb, int type)
  900. {
  901. handle_t *handle;
  902. int status = 0;
  903. struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
  904. status = ocfs2_lock_global_qf(oinfo, 1);
  905. if (status < 0)
  906. goto out;
  907. handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
  908. if (IS_ERR(handle)) {
  909. status = PTR_ERR(handle);
  910. mlog_errno(status);
  911. goto out_ilock;
  912. }
  913. status = dquot_commit_info(sb, type);
  914. ocfs2_commit_trans(OCFS2_SB(sb), handle);
  915. out_ilock:
  916. ocfs2_unlock_global_qf(oinfo, 1);
  917. out:
  918. if (status)
  919. mlog_errno(status);
  920. return status;
  921. }
  922. static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
  923. {
  924. struct ocfs2_dquot *dquot =
  925. kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
  926. if (!dquot)
  927. return NULL;
  928. return &dquot->dq_dquot;
  929. }
  930. static void ocfs2_destroy_dquot(struct dquot *dquot)
  931. {
  932. kmem_cache_free(ocfs2_dquot_cachep, dquot);
  933. }
  934. const struct dquot_operations ocfs2_quota_operations = {
  935. /* We never make dquot dirty so .write_dquot is never called */
  936. .acquire_dquot = ocfs2_acquire_dquot,
  937. .release_dquot = ocfs2_release_dquot,
  938. .mark_dirty = ocfs2_mark_dquot_dirty,
  939. .write_info = ocfs2_write_info,
  940. .alloc_dquot = ocfs2_alloc_dquot,
  941. .destroy_dquot = ocfs2_destroy_dquot,
  942. .get_next_id = ocfs2_get_next_id,
  943. };