xfs_trans_dquot.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2002 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_mount.h"
  13. #include "xfs_inode.h"
  14. #include "xfs_trans.h"
  15. #include "xfs_trans_priv.h"
  16. #include "xfs_quota.h"
  17. #include "xfs_qm.h"
  18. #include "xfs_trace.h"
  19. STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
  20. /*
  21. * Add the locked dquot to the transaction.
  22. * The dquot must be locked, and it cannot be associated with any
  23. * transaction.
  24. */
  25. void
  26. xfs_trans_dqjoin(
  27. struct xfs_trans *tp,
  28. struct xfs_dquot *dqp)
  29. {
  30. ASSERT(XFS_DQ_IS_LOCKED(dqp));
  31. ASSERT(dqp->q_logitem.qli_dquot == dqp);
  32. /*
  33. * Get a log_item_desc to point at the new item.
  34. */
  35. xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
  36. }
  37. /*
  38. * This is called to mark the dquot as needing
  39. * to be logged when the transaction is committed. The dquot must
  40. * already be associated with the given transaction.
  41. * Note that it marks the entire transaction as dirty. In the ordinary
  42. * case, this gets called via xfs_trans_commit, after the transaction
  43. * is already dirty. However, there's nothing stop this from getting
  44. * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
  45. * flag.
  46. */
  47. void
  48. xfs_trans_log_dquot(
  49. struct xfs_trans *tp,
  50. struct xfs_dquot *dqp)
  51. {
  52. ASSERT(XFS_DQ_IS_LOCKED(dqp));
  53. /* Upgrade the dquot to bigtime format if possible. */
  54. if (dqp->q_id != 0 &&
  55. xfs_sb_version_hasbigtime(&tp->t_mountp->m_sb) &&
  56. !(dqp->q_type & XFS_DQTYPE_BIGTIME))
  57. dqp->q_type |= XFS_DQTYPE_BIGTIME;
  58. tp->t_flags |= XFS_TRANS_DIRTY;
  59. set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
  60. }
  61. /*
  62. * Carry forward whatever is left of the quota blk reservation to
  63. * the spanky new transaction
  64. */
  65. void
  66. xfs_trans_dup_dqinfo(
  67. struct xfs_trans *otp,
  68. struct xfs_trans *ntp)
  69. {
  70. struct xfs_dqtrx *oq, *nq;
  71. int i, j;
  72. struct xfs_dqtrx *oqa, *nqa;
  73. uint64_t blk_res_used;
  74. if (!otp->t_dqinfo)
  75. return;
  76. xfs_trans_alloc_dqinfo(ntp);
  77. /*
  78. * Because the quota blk reservation is carried forward,
  79. * it is also necessary to carry forward the DQ_DIRTY flag.
  80. */
  81. if (otp->t_flags & XFS_TRANS_DQ_DIRTY)
  82. ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
  83. for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
  84. oqa = otp->t_dqinfo->dqs[j];
  85. nqa = ntp->t_dqinfo->dqs[j];
  86. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  87. blk_res_used = 0;
  88. if (oqa[i].qt_dquot == NULL)
  89. break;
  90. oq = &oqa[i];
  91. nq = &nqa[i];
  92. if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
  93. blk_res_used = oq->qt_bcount_delta;
  94. nq->qt_dquot = oq->qt_dquot;
  95. nq->qt_bcount_delta = nq->qt_icount_delta = 0;
  96. nq->qt_rtbcount_delta = 0;
  97. /*
  98. * Transfer whatever is left of the reservations.
  99. */
  100. nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
  101. oq->qt_blk_res = blk_res_used;
  102. nq->qt_rtblk_res = oq->qt_rtblk_res -
  103. oq->qt_rtblk_res_used;
  104. oq->qt_rtblk_res = oq->qt_rtblk_res_used;
  105. nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
  106. oq->qt_ino_res = oq->qt_ino_res_used;
  107. }
  108. }
  109. }
  110. /*
  111. * Wrap around mod_dquot to account for both user and group quotas.
  112. */
  113. void
  114. xfs_trans_mod_dquot_byino(
  115. xfs_trans_t *tp,
  116. xfs_inode_t *ip,
  117. uint field,
  118. int64_t delta)
  119. {
  120. xfs_mount_t *mp = tp->t_mountp;
  121. if (!XFS_IS_QUOTA_RUNNING(mp) ||
  122. !XFS_IS_QUOTA_ON(mp) ||
  123. xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
  124. return;
  125. if (tp->t_dqinfo == NULL)
  126. xfs_trans_alloc_dqinfo(tp);
  127. if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
  128. (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
  129. if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
  130. (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
  131. if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
  132. (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
  133. }
  134. STATIC struct xfs_dqtrx *
  135. xfs_trans_get_dqtrx(
  136. struct xfs_trans *tp,
  137. struct xfs_dquot *dqp)
  138. {
  139. int i;
  140. struct xfs_dqtrx *qa;
  141. switch (xfs_dquot_type(dqp)) {
  142. case XFS_DQTYPE_USER:
  143. qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
  144. break;
  145. case XFS_DQTYPE_GROUP:
  146. qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
  147. break;
  148. case XFS_DQTYPE_PROJ:
  149. qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
  150. break;
  151. default:
  152. return NULL;
  153. }
  154. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  155. if (qa[i].qt_dquot == NULL ||
  156. qa[i].qt_dquot == dqp)
  157. return &qa[i];
  158. }
  159. return NULL;
  160. }
  161. /*
  162. * Make the changes in the transaction structure.
  163. * The moral equivalent to xfs_trans_mod_sb().
  164. * We don't touch any fields in the dquot, so we don't care
  165. * if it's locked or not (most of the time it won't be).
  166. */
  167. void
  168. xfs_trans_mod_dquot(
  169. struct xfs_trans *tp,
  170. struct xfs_dquot *dqp,
  171. uint field,
  172. int64_t delta)
  173. {
  174. struct xfs_dqtrx *qtrx;
  175. ASSERT(tp);
  176. ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
  177. qtrx = NULL;
  178. if (tp->t_dqinfo == NULL)
  179. xfs_trans_alloc_dqinfo(tp);
  180. /*
  181. * Find either the first free slot or the slot that belongs
  182. * to this dquot.
  183. */
  184. qtrx = xfs_trans_get_dqtrx(tp, dqp);
  185. ASSERT(qtrx);
  186. if (qtrx->qt_dquot == NULL)
  187. qtrx->qt_dquot = dqp;
  188. if (delta) {
  189. trace_xfs_trans_mod_dquot_before(qtrx);
  190. trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
  191. }
  192. switch (field) {
  193. /* regular disk blk reservation */
  194. case XFS_TRANS_DQ_RES_BLKS:
  195. qtrx->qt_blk_res += delta;
  196. break;
  197. /* inode reservation */
  198. case XFS_TRANS_DQ_RES_INOS:
  199. qtrx->qt_ino_res += delta;
  200. break;
  201. /* disk blocks used. */
  202. case XFS_TRANS_DQ_BCOUNT:
  203. qtrx->qt_bcount_delta += delta;
  204. break;
  205. case XFS_TRANS_DQ_DELBCOUNT:
  206. qtrx->qt_delbcnt_delta += delta;
  207. break;
  208. /* Inode Count */
  209. case XFS_TRANS_DQ_ICOUNT:
  210. if (qtrx->qt_ino_res && delta > 0) {
  211. qtrx->qt_ino_res_used += delta;
  212. ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
  213. }
  214. qtrx->qt_icount_delta += delta;
  215. break;
  216. /* rtblk reservation */
  217. case XFS_TRANS_DQ_RES_RTBLKS:
  218. qtrx->qt_rtblk_res += delta;
  219. break;
  220. /* rtblk count */
  221. case XFS_TRANS_DQ_RTBCOUNT:
  222. if (qtrx->qt_rtblk_res && delta > 0) {
  223. qtrx->qt_rtblk_res_used += delta;
  224. ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
  225. }
  226. qtrx->qt_rtbcount_delta += delta;
  227. break;
  228. case XFS_TRANS_DQ_DELRTBCOUNT:
  229. qtrx->qt_delrtb_delta += delta;
  230. break;
  231. default:
  232. ASSERT(0);
  233. }
  234. if (delta)
  235. trace_xfs_trans_mod_dquot_after(qtrx);
  236. tp->t_flags |= XFS_TRANS_DQ_DIRTY;
  237. }
  238. /*
  239. * Given an array of dqtrx structures, lock all the dquots associated and join
  240. * them to the transaction, provided they have been modified. We know that the
  241. * highest number of dquots of one type - usr, grp and prj - involved in a
  242. * transaction is 3 so we don't need to make this very generic.
  243. */
  244. STATIC void
  245. xfs_trans_dqlockedjoin(
  246. struct xfs_trans *tp,
  247. struct xfs_dqtrx *q)
  248. {
  249. ASSERT(q[0].qt_dquot != NULL);
  250. if (q[1].qt_dquot == NULL) {
  251. xfs_dqlock(q[0].qt_dquot);
  252. xfs_trans_dqjoin(tp, q[0].qt_dquot);
  253. } else {
  254. ASSERT(XFS_QM_TRANS_MAXDQS == 2);
  255. xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
  256. xfs_trans_dqjoin(tp, q[0].qt_dquot);
  257. xfs_trans_dqjoin(tp, q[1].qt_dquot);
  258. }
  259. }
  260. /* Apply dqtrx changes to the quota reservation counters. */
  261. static inline void
  262. xfs_apply_quota_reservation_deltas(
  263. struct xfs_dquot_res *res,
  264. uint64_t reserved,
  265. int64_t res_used,
  266. int64_t count_delta)
  267. {
  268. if (reserved != 0) {
  269. /*
  270. * Subtle math here: If reserved > res_used (the normal case),
  271. * we're simply subtracting the unused transaction quota
  272. * reservation from the dquot reservation.
  273. *
  274. * If, however, res_used > reserved, then we have allocated
  275. * more quota blocks than were reserved for the transaction.
  276. * We must add that excess to the dquot reservation since it
  277. * tracks (usage + resv) and by definition we didn't reserve
  278. * that excess.
  279. */
  280. res->reserved -= abs(reserved - res_used);
  281. } else if (count_delta != 0) {
  282. /*
  283. * These blks were never reserved, either inside a transaction
  284. * or outside one (in a delayed allocation). Also, this isn't
  285. * always a negative number since we sometimes deliberately
  286. * skip quota reservations.
  287. */
  288. res->reserved += count_delta;
  289. }
  290. }
  291. /*
  292. * Called by xfs_trans_commit() and similar in spirit to
  293. * xfs_trans_apply_sb_deltas().
  294. * Go thru all the dquots belonging to this transaction and modify the
  295. * INCORE dquot to reflect the actual usages.
  296. * Unreserve just the reservations done by this transaction.
  297. * dquot is still left locked at exit.
  298. */
  299. void
  300. xfs_trans_apply_dquot_deltas(
  301. struct xfs_trans *tp)
  302. {
  303. int i, j;
  304. struct xfs_dquot *dqp;
  305. struct xfs_dqtrx *qtrx, *qa;
  306. int64_t totalbdelta;
  307. int64_t totalrtbdelta;
  308. if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
  309. return;
  310. ASSERT(tp->t_dqinfo);
  311. for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
  312. qa = tp->t_dqinfo->dqs[j];
  313. if (qa[0].qt_dquot == NULL)
  314. continue;
  315. /*
  316. * Lock all of the dquots and join them to the transaction.
  317. */
  318. xfs_trans_dqlockedjoin(tp, qa);
  319. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  320. uint64_t blk_res_used;
  321. qtrx = &qa[i];
  322. /*
  323. * The array of dquots is filled
  324. * sequentially, not sparsely.
  325. */
  326. if ((dqp = qtrx->qt_dquot) == NULL)
  327. break;
  328. ASSERT(XFS_DQ_IS_LOCKED(dqp));
  329. /*
  330. * adjust the actual number of blocks used
  331. */
  332. /*
  333. * The issue here is - sometimes we don't make a blkquota
  334. * reservation intentionally to be fair to users
  335. * (when the amount is small). On the other hand,
  336. * delayed allocs do make reservations, but that's
  337. * outside of a transaction, so we have no
  338. * idea how much was really reserved.
  339. * So, here we've accumulated delayed allocation blks and
  340. * non-delay blks. The assumption is that the
  341. * delayed ones are always reserved (outside of a
  342. * transaction), and the others may or may not have
  343. * quota reservations.
  344. */
  345. totalbdelta = qtrx->qt_bcount_delta +
  346. qtrx->qt_delbcnt_delta;
  347. totalrtbdelta = qtrx->qt_rtbcount_delta +
  348. qtrx->qt_delrtb_delta;
  349. if (totalbdelta != 0 || totalrtbdelta != 0 ||
  350. qtrx->qt_icount_delta != 0) {
  351. trace_xfs_trans_apply_dquot_deltas_before(dqp);
  352. trace_xfs_trans_apply_dquot_deltas(qtrx);
  353. }
  354. #ifdef DEBUG
  355. if (totalbdelta < 0)
  356. ASSERT(dqp->q_blk.count >= -totalbdelta);
  357. if (totalrtbdelta < 0)
  358. ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
  359. if (qtrx->qt_icount_delta < 0)
  360. ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
  361. #endif
  362. if (totalbdelta)
  363. dqp->q_blk.count += totalbdelta;
  364. if (qtrx->qt_icount_delta)
  365. dqp->q_ino.count += qtrx->qt_icount_delta;
  366. if (totalrtbdelta)
  367. dqp->q_rtb.count += totalrtbdelta;
  368. if (totalbdelta != 0 || totalrtbdelta != 0 ||
  369. qtrx->qt_icount_delta != 0)
  370. trace_xfs_trans_apply_dquot_deltas_after(dqp);
  371. /*
  372. * Get any default limits in use.
  373. * Start/reset the timer(s) if needed.
  374. */
  375. if (dqp->q_id) {
  376. xfs_qm_adjust_dqlimits(dqp);
  377. xfs_qm_adjust_dqtimers(dqp);
  378. }
  379. dqp->q_flags |= XFS_DQFLAG_DIRTY;
  380. /*
  381. * add this to the list of items to get logged
  382. */
  383. xfs_trans_log_dquot(tp, dqp);
  384. /*
  385. * Take off what's left of the original reservation.
  386. * In case of delayed allocations, there's no
  387. * reservation that a transaction structure knows of.
  388. */
  389. blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
  390. xfs_apply_quota_reservation_deltas(&dqp->q_blk,
  391. qtrx->qt_blk_res, blk_res_used,
  392. qtrx->qt_bcount_delta);
  393. /*
  394. * Adjust the RT reservation.
  395. */
  396. xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
  397. qtrx->qt_rtblk_res,
  398. qtrx->qt_rtblk_res_used,
  399. qtrx->qt_rtbcount_delta);
  400. /*
  401. * Adjust the inode reservation.
  402. */
  403. ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
  404. xfs_apply_quota_reservation_deltas(&dqp->q_ino,
  405. qtrx->qt_ino_res,
  406. qtrx->qt_ino_res_used,
  407. qtrx->qt_icount_delta);
  408. ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
  409. ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
  410. ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
  411. }
  412. }
  413. }
  414. /*
  415. * Release the reservations, and adjust the dquots accordingly.
  416. * This is called only when the transaction is being aborted. If by
  417. * any chance we have done dquot modifications incore (ie. deltas) already,
  418. * we simply throw those away, since that's the expected behavior
  419. * when a transaction is curtailed without a commit.
  420. */
  421. void
  422. xfs_trans_unreserve_and_mod_dquots(
  423. struct xfs_trans *tp)
  424. {
  425. int i, j;
  426. struct xfs_dquot *dqp;
  427. struct xfs_dqtrx *qtrx, *qa;
  428. bool locked;
  429. if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
  430. return;
  431. for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
  432. qa = tp->t_dqinfo->dqs[j];
  433. for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
  434. qtrx = &qa[i];
  435. /*
  436. * We assume that the array of dquots is filled
  437. * sequentially, not sparsely.
  438. */
  439. if ((dqp = qtrx->qt_dquot) == NULL)
  440. break;
  441. /*
  442. * Unreserve the original reservation. We don't care
  443. * about the number of blocks used field, or deltas.
  444. * Also we don't bother to zero the fields.
  445. */
  446. locked = false;
  447. if (qtrx->qt_blk_res) {
  448. xfs_dqlock(dqp);
  449. locked = true;
  450. dqp->q_blk.reserved -=
  451. (xfs_qcnt_t)qtrx->qt_blk_res;
  452. }
  453. if (qtrx->qt_ino_res) {
  454. if (!locked) {
  455. xfs_dqlock(dqp);
  456. locked = true;
  457. }
  458. dqp->q_ino.reserved -=
  459. (xfs_qcnt_t)qtrx->qt_ino_res;
  460. }
  461. if (qtrx->qt_rtblk_res) {
  462. if (!locked) {
  463. xfs_dqlock(dqp);
  464. locked = true;
  465. }
  466. dqp->q_rtb.reserved -=
  467. (xfs_qcnt_t)qtrx->qt_rtblk_res;
  468. }
  469. if (locked)
  470. xfs_dqunlock(dqp);
  471. }
  472. }
  473. }
  474. STATIC void
  475. xfs_quota_warn(
  476. struct xfs_mount *mp,
  477. struct xfs_dquot *dqp,
  478. int type)
  479. {
  480. enum quota_type qtype;
  481. switch (xfs_dquot_type(dqp)) {
  482. case XFS_DQTYPE_PROJ:
  483. qtype = PRJQUOTA;
  484. break;
  485. case XFS_DQTYPE_USER:
  486. qtype = USRQUOTA;
  487. break;
  488. case XFS_DQTYPE_GROUP:
  489. qtype = GRPQUOTA;
  490. break;
  491. default:
  492. return;
  493. }
  494. quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
  495. mp->m_super->s_dev, type);
  496. }
  497. /*
  498. * Decide if we can make an additional reservation against a quota resource.
  499. * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
  500. *
  501. * Note that we assume that the numeric difference between the inode and block
  502. * warning codes will always be 3 since it's userspace ABI now, and will never
  503. * decrease the quota reservation, so the *BELOW messages are irrelevant.
  504. */
  505. static inline int
  506. xfs_dqresv_check(
  507. struct xfs_dquot_res *res,
  508. struct xfs_quota_limits *qlim,
  509. int64_t delta,
  510. bool *fatal)
  511. {
  512. xfs_qcnt_t hardlimit = res->hardlimit;
  513. xfs_qcnt_t softlimit = res->softlimit;
  514. xfs_qcnt_t total_count = res->reserved + delta;
  515. BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3);
  516. BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
  517. BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3);
  518. *fatal = false;
  519. if (delta <= 0)
  520. return QUOTA_NL_NOWARN;
  521. if (!hardlimit)
  522. hardlimit = qlim->hard;
  523. if (!softlimit)
  524. softlimit = qlim->soft;
  525. if (hardlimit && total_count > hardlimit) {
  526. *fatal = true;
  527. return QUOTA_NL_IHARDWARN;
  528. }
  529. if (softlimit && total_count > softlimit) {
  530. time64_t now = ktime_get_real_seconds();
  531. if ((res->timer != 0 && now > res->timer) ||
  532. (res->warnings != 0 && res->warnings >= qlim->warn)) {
  533. *fatal = true;
  534. return QUOTA_NL_ISOFTLONGWARN;
  535. }
  536. res->warnings++;
  537. return QUOTA_NL_ISOFTWARN;
  538. }
  539. return QUOTA_NL_NOWARN;
  540. }
  541. /*
  542. * This reserves disk blocks and inodes against a dquot.
  543. * Flags indicate if the dquot is to be locked here and also
  544. * if the blk reservation is for RT or regular blocks.
  545. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
  546. */
  547. STATIC int
  548. xfs_trans_dqresv(
  549. struct xfs_trans *tp,
  550. struct xfs_mount *mp,
  551. struct xfs_dquot *dqp,
  552. int64_t nblks,
  553. long ninos,
  554. uint flags)
  555. {
  556. struct xfs_quotainfo *q = mp->m_quotainfo;
  557. struct xfs_def_quota *defq;
  558. struct xfs_dquot_res *blkres;
  559. struct xfs_quota_limits *qlim;
  560. xfs_dqlock(dqp);
  561. defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
  562. if (flags & XFS_TRANS_DQ_RES_BLKS) {
  563. blkres = &dqp->q_blk;
  564. qlim = &defq->blk;
  565. } else {
  566. blkres = &dqp->q_rtb;
  567. qlim = &defq->rtb;
  568. }
  569. if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
  570. xfs_dquot_is_enforced(dqp)) {
  571. int quota_nl;
  572. bool fatal;
  573. /*
  574. * dquot is locked already. See if we'd go over the hardlimit
  575. * or exceed the timelimit if we'd reserve resources.
  576. */
  577. quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
  578. if (quota_nl != QUOTA_NL_NOWARN) {
  579. /*
  580. * Quota block warning codes are 3 more than the inode
  581. * codes, which we check above.
  582. */
  583. xfs_quota_warn(mp, dqp, quota_nl + 3);
  584. if (fatal)
  585. goto error_return;
  586. }
  587. quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
  588. &fatal);
  589. if (quota_nl != QUOTA_NL_NOWARN) {
  590. xfs_quota_warn(mp, dqp, quota_nl);
  591. if (fatal)
  592. goto error_return;
  593. }
  594. }
  595. /*
  596. * Change the reservation, but not the actual usage.
  597. * Note that q_blk.reserved = q_blk.count + resv
  598. */
  599. blkres->reserved += (xfs_qcnt_t)nblks;
  600. dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
  601. /*
  602. * note the reservation amt in the trans struct too,
  603. * so that the transaction knows how much was reserved by
  604. * it against this particular dquot.
  605. * We don't do this when we are reserving for a delayed allocation,
  606. * because we don't have the luxury of a transaction envelope then.
  607. */
  608. if (tp) {
  609. ASSERT(tp->t_dqinfo);
  610. ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
  611. if (nblks != 0)
  612. xfs_trans_mod_dquot(tp, dqp,
  613. flags & XFS_QMOPT_RESBLK_MASK,
  614. nblks);
  615. if (ninos != 0)
  616. xfs_trans_mod_dquot(tp, dqp,
  617. XFS_TRANS_DQ_RES_INOS,
  618. ninos);
  619. }
  620. ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
  621. ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
  622. ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
  623. xfs_dqunlock(dqp);
  624. return 0;
  625. error_return:
  626. xfs_dqunlock(dqp);
  627. if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
  628. return -ENOSPC;
  629. return -EDQUOT;
  630. }
  631. /*
  632. * Given dquot(s), make disk block and/or inode reservations against them.
  633. * The fact that this does the reservation against user, group and
  634. * project quotas is important, because this follows a all-or-nothing
  635. * approach.
  636. *
  637. * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
  638. * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
  639. * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
  640. * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
  641. * dquots are unlocked on return, if they were not locked by caller.
  642. */
  643. int
  644. xfs_trans_reserve_quota_bydquots(
  645. struct xfs_trans *tp,
  646. struct xfs_mount *mp,
  647. struct xfs_dquot *udqp,
  648. struct xfs_dquot *gdqp,
  649. struct xfs_dquot *pdqp,
  650. int64_t nblks,
  651. long ninos,
  652. uint flags)
  653. {
  654. int error;
  655. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  656. return 0;
  657. if (tp && tp->t_dqinfo == NULL)
  658. xfs_trans_alloc_dqinfo(tp);
  659. ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
  660. if (udqp) {
  661. error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
  662. if (error)
  663. return error;
  664. }
  665. if (gdqp) {
  666. error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
  667. if (error)
  668. goto unwind_usr;
  669. }
  670. if (pdqp) {
  671. error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
  672. if (error)
  673. goto unwind_grp;
  674. }
  675. /*
  676. * Didn't change anything critical, so, no need to log
  677. */
  678. return 0;
  679. unwind_grp:
  680. flags |= XFS_QMOPT_FORCE_RES;
  681. if (gdqp)
  682. xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
  683. unwind_usr:
  684. flags |= XFS_QMOPT_FORCE_RES;
  685. if (udqp)
  686. xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
  687. return error;
  688. }
  689. /*
  690. * Lock the dquot and change the reservation if we can.
  691. * This doesn't change the actual usage, just the reservation.
  692. * The inode sent in is locked.
  693. */
  694. int
  695. xfs_trans_reserve_quota_nblks(
  696. struct xfs_trans *tp,
  697. struct xfs_inode *ip,
  698. int64_t nblks,
  699. long ninos,
  700. uint flags)
  701. {
  702. struct xfs_mount *mp = ip->i_mount;
  703. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  704. return 0;
  705. ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
  706. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  707. ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS ||
  708. (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS);
  709. /*
  710. * Reserve nblks against these dquots, with trans as the mediator.
  711. */
  712. return xfs_trans_reserve_quota_bydquots(tp, mp,
  713. ip->i_udquot, ip->i_gdquot,
  714. ip->i_pdquot,
  715. nblks, ninos, flags);
  716. }
  717. /*
  718. * This routine is called to allocate a quotaoff log item.
  719. */
  720. struct xfs_qoff_logitem *
  721. xfs_trans_get_qoff_item(
  722. struct xfs_trans *tp,
  723. struct xfs_qoff_logitem *startqoff,
  724. uint flags)
  725. {
  726. struct xfs_qoff_logitem *q;
  727. ASSERT(tp != NULL);
  728. q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
  729. ASSERT(q != NULL);
  730. /*
  731. * Get a log_item_desc to point at the new item.
  732. */
  733. xfs_trans_add_item(tp, &q->qql_item);
  734. return q;
  735. }
  736. /*
  737. * This is called to mark the quotaoff logitem as needing
  738. * to be logged when the transaction is committed. The logitem must
  739. * already be associated with the given transaction.
  740. */
  741. void
  742. xfs_trans_log_quotaoff_item(
  743. struct xfs_trans *tp,
  744. struct xfs_qoff_logitem *qlp)
  745. {
  746. tp->t_flags |= XFS_TRANS_DIRTY;
  747. set_bit(XFS_LI_DIRTY, &qlp->qql_item.li_flags);
  748. }
  749. STATIC void
  750. xfs_trans_alloc_dqinfo(
  751. xfs_trans_t *tp)
  752. {
  753. tp->t_dqinfo = kmem_cache_zalloc(xfs_qm_dqtrxzone,
  754. GFP_KERNEL | __GFP_NOFAIL);
  755. }
  756. void
  757. xfs_trans_free_dqinfo(
  758. xfs_trans_t *tp)
  759. {
  760. if (!tp->t_dqinfo)
  761. return;
  762. kmem_cache_free(xfs_qm_dqtrxzone, tp->t_dqinfo);
  763. tp->t_dqinfo = NULL;
  764. }