quota.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  4. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  5. */
  6. /*
  7. * Quota change tags are associated with each transaction that allocates or
  8. * deallocates space. Those changes are accumulated locally to each node (in a
  9. * per-node file) and then are periodically synced to the quota file. This
  10. * avoids the bottleneck of constantly touching the quota file, but introduces
  11. * fuzziness in the current usage value of IDs that are being used on different
  12. * nodes in the cluster simultaneously. So, it is possible for a user on
  13. * multiple nodes to overrun their quota, but that overrun is controlable.
  14. * Since quota tags are part of transactions, there is no need for a quota check
  15. * program to be run on node crashes or anything like that.
  16. *
  17. * There are couple of knobs that let the administrator manage the quota
  18. * fuzziness. "quota_quantum" sets the maximum time a quota change can be
  19. * sitting on one node before being synced to the quota file. (The default is
  20. * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
  21. * of quota file syncs increases as the user moves closer to their limit. The
  22. * more frequent the syncs, the more accurate the quota enforcement, but that
  23. * means that there is more contention between the nodes for the quota file.
  24. * The default value is one. This sets the maximum theoretical quota overrun
  25. * (with infinite node with infinite bandwidth) to twice the user's limit. (In
  26. * practice, the maximum overrun you see should be much less.) A "quota_scale"
  27. * number greater than one makes quota syncs more frequent and reduces the
  28. * maximum overrun. Numbers less than one (but greater than zero) make quota
  29. * syncs less frequent.
  30. *
  31. * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  32. * the quota file, so it is not being constantly read.
  33. */
  34. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  35. #include <linux/sched.h>
  36. #include <linux/slab.h>
  37. #include <linux/mm.h>
  38. #include <linux/spinlock.h>
  39. #include <linux/completion.h>
  40. #include <linux/buffer_head.h>
  41. #include <linux/sort.h>
  42. #include <linux/fs.h>
  43. #include <linux/bio.h>
  44. #include <linux/gfs2_ondisk.h>
  45. #include <linux/kthread.h>
  46. #include <linux/freezer.h>
  47. #include <linux/quota.h>
  48. #include <linux/dqblk_xfs.h>
  49. #include <linux/lockref.h>
  50. #include <linux/list_lru.h>
  51. #include <linux/rcupdate.h>
  52. #include <linux/rculist_bl.h>
  53. #include <linux/bit_spinlock.h>
  54. #include <linux/jhash.h>
  55. #include <linux/vmalloc.h>
  56. #include "gfs2.h"
  57. #include "incore.h"
  58. #include "bmap.h"
  59. #include "glock.h"
  60. #include "glops.h"
  61. #include "log.h"
  62. #include "meta_io.h"
  63. #include "quota.h"
  64. #include "rgrp.h"
  65. #include "super.h"
  66. #include "trans.h"
  67. #include "inode.h"
  68. #include "util.h"
  69. #define GFS2_QD_HASH_SHIFT 12
  70. #define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
  71. #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
  72. /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  73. /* -> sd_bitmap_lock */
  74. static DEFINE_SPINLOCK(qd_lock);
  75. struct list_lru gfs2_qd_lru;
  76. static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  77. static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  78. const struct kqid qid)
  79. {
  80. unsigned int h;
  81. h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  82. h = jhash(&qid, sizeof(struct kqid), h);
  83. return h & GFS2_QD_HASH_MASK;
  84. }
  85. static inline void spin_lock_bucket(unsigned int hash)
  86. {
  87. hlist_bl_lock(&qd_hash_table[hash]);
  88. }
  89. static inline void spin_unlock_bucket(unsigned int hash)
  90. {
  91. hlist_bl_unlock(&qd_hash_table[hash]);
  92. }
  93. static void gfs2_qd_dealloc(struct rcu_head *rcu)
  94. {
  95. struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
  96. kmem_cache_free(gfs2_quotad_cachep, qd);
  97. }
  98. static void gfs2_qd_dispose(struct list_head *list)
  99. {
  100. struct gfs2_quota_data *qd;
  101. struct gfs2_sbd *sdp;
  102. while (!list_empty(list)) {
  103. qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
  104. sdp = qd->qd_gl->gl_name.ln_sbd;
  105. list_del(&qd->qd_lru);
  106. /* Free from the filesystem-specific list */
  107. spin_lock(&qd_lock);
  108. list_del(&qd->qd_list);
  109. spin_unlock(&qd_lock);
  110. spin_lock_bucket(qd->qd_hash);
  111. hlist_bl_del_rcu(&qd->qd_hlist);
  112. spin_unlock_bucket(qd->qd_hash);
  113. gfs2_assert_warn(sdp, !qd->qd_change);
  114. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  115. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  116. gfs2_glock_put(qd->qd_gl);
  117. atomic_dec(&sdp->sd_quota_count);
  118. /* Delete it from the common reclaim list */
  119. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  120. }
  121. }
  122. static enum lru_status gfs2_qd_isolate(struct list_head *item,
  123. struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
  124. {
  125. struct list_head *dispose = arg;
  126. struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
  127. if (!spin_trylock(&qd->qd_lockref.lock))
  128. return LRU_SKIP;
  129. if (qd->qd_lockref.count == 0) {
  130. lockref_mark_dead(&qd->qd_lockref);
  131. list_lru_isolate_move(lru, &qd->qd_lru, dispose);
  132. }
  133. spin_unlock(&qd->qd_lockref.lock);
  134. return LRU_REMOVED;
  135. }
  136. static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
  137. struct shrink_control *sc)
  138. {
  139. LIST_HEAD(dispose);
  140. unsigned long freed;
  141. if (!(sc->gfp_mask & __GFP_FS))
  142. return SHRINK_STOP;
  143. freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
  144. gfs2_qd_isolate, &dispose);
  145. gfs2_qd_dispose(&dispose);
  146. return freed;
  147. }
  148. static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
  149. struct shrink_control *sc)
  150. {
  151. return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
  152. }
  153. struct shrinker gfs2_qd_shrinker = {
  154. .count_objects = gfs2_qd_shrink_count,
  155. .scan_objects = gfs2_qd_shrink_scan,
  156. .seeks = DEFAULT_SEEKS,
  157. .flags = SHRINKER_NUMA_AWARE,
  158. };
  159. static u64 qd2index(struct gfs2_quota_data *qd)
  160. {
  161. struct kqid qid = qd->qd_id;
  162. return (2 * (u64)from_kqid(&init_user_ns, qid)) +
  163. ((qid.type == USRQUOTA) ? 0 : 1);
  164. }
  165. static u64 qd2offset(struct gfs2_quota_data *qd)
  166. {
  167. u64 offset;
  168. offset = qd2index(qd);
  169. offset *= sizeof(struct gfs2_quota);
  170. return offset;
  171. }
  172. static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
  173. {
  174. struct gfs2_quota_data *qd;
  175. int error;
  176. qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
  177. if (!qd)
  178. return NULL;
  179. qd->qd_sbd = sdp;
  180. qd->qd_lockref.count = 1;
  181. spin_lock_init(&qd->qd_lockref.lock);
  182. qd->qd_id = qid;
  183. qd->qd_slot = -1;
  184. INIT_LIST_HEAD(&qd->qd_lru);
  185. qd->qd_hash = hash;
  186. error = gfs2_glock_get(sdp, qd2index(qd),
  187. &gfs2_quota_glops, CREATE, &qd->qd_gl);
  188. if (error)
  189. goto fail;
  190. return qd;
  191. fail:
  192. kmem_cache_free(gfs2_quotad_cachep, qd);
  193. return NULL;
  194. }
  195. static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
  196. const struct gfs2_sbd *sdp,
  197. struct kqid qid)
  198. {
  199. struct gfs2_quota_data *qd;
  200. struct hlist_bl_node *h;
  201. hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
  202. if (!qid_eq(qd->qd_id, qid))
  203. continue;
  204. if (qd->qd_sbd != sdp)
  205. continue;
  206. if (lockref_get_not_dead(&qd->qd_lockref)) {
  207. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  208. return qd;
  209. }
  210. }
  211. return NULL;
  212. }
  213. static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
  214. struct gfs2_quota_data **qdp)
  215. {
  216. struct gfs2_quota_data *qd, *new_qd;
  217. unsigned int hash = gfs2_qd_hash(sdp, qid);
  218. rcu_read_lock();
  219. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  220. rcu_read_unlock();
  221. if (qd)
  222. return 0;
  223. new_qd = qd_alloc(hash, sdp, qid);
  224. if (!new_qd)
  225. return -ENOMEM;
  226. spin_lock(&qd_lock);
  227. spin_lock_bucket(hash);
  228. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  229. if (qd == NULL) {
  230. *qdp = new_qd;
  231. list_add(&new_qd->qd_list, &sdp->sd_quota_list);
  232. hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
  233. atomic_inc(&sdp->sd_quota_count);
  234. }
  235. spin_unlock_bucket(hash);
  236. spin_unlock(&qd_lock);
  237. if (qd) {
  238. gfs2_glock_put(new_qd->qd_gl);
  239. kmem_cache_free(gfs2_quotad_cachep, new_qd);
  240. }
  241. return 0;
  242. }
  243. static void qd_hold(struct gfs2_quota_data *qd)
  244. {
  245. struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
  246. gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
  247. lockref_get(&qd->qd_lockref);
  248. }
  249. static void qd_put(struct gfs2_quota_data *qd)
  250. {
  251. if (lockref_put_or_lock(&qd->qd_lockref))
  252. return;
  253. qd->qd_lockref.count = 0;
  254. list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
  255. spin_unlock(&qd->qd_lockref.lock);
  256. }
  257. static int slot_get(struct gfs2_quota_data *qd)
  258. {
  259. struct gfs2_sbd *sdp = qd->qd_sbd;
  260. unsigned int bit;
  261. int error = 0;
  262. spin_lock(&sdp->sd_bitmap_lock);
  263. if (qd->qd_slot_count != 0)
  264. goto out;
  265. error = -ENOSPC;
  266. bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
  267. if (bit < sdp->sd_quota_slots) {
  268. set_bit(bit, sdp->sd_quota_bitmap);
  269. qd->qd_slot = bit;
  270. error = 0;
  271. out:
  272. qd->qd_slot_count++;
  273. }
  274. spin_unlock(&sdp->sd_bitmap_lock);
  275. return error;
  276. }
  277. static void slot_hold(struct gfs2_quota_data *qd)
  278. {
  279. struct gfs2_sbd *sdp = qd->qd_sbd;
  280. spin_lock(&sdp->sd_bitmap_lock);
  281. gfs2_assert(sdp, qd->qd_slot_count);
  282. qd->qd_slot_count++;
  283. spin_unlock(&sdp->sd_bitmap_lock);
  284. }
  285. static void slot_put(struct gfs2_quota_data *qd)
  286. {
  287. struct gfs2_sbd *sdp = qd->qd_sbd;
  288. spin_lock(&sdp->sd_bitmap_lock);
  289. gfs2_assert(sdp, qd->qd_slot_count);
  290. if (!--qd->qd_slot_count) {
  291. BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
  292. qd->qd_slot = -1;
  293. }
  294. spin_unlock(&sdp->sd_bitmap_lock);
  295. }
  296. static int bh_get(struct gfs2_quota_data *qd)
  297. {
  298. struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
  299. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  300. unsigned int block, offset;
  301. struct buffer_head *bh;
  302. int error;
  303. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  304. mutex_lock(&sdp->sd_quota_mutex);
  305. if (qd->qd_bh_count++) {
  306. mutex_unlock(&sdp->sd_quota_mutex);
  307. return 0;
  308. }
  309. block = qd->qd_slot / sdp->sd_qc_per_block;
  310. offset = qd->qd_slot % sdp->sd_qc_per_block;
  311. bh_map.b_size = BIT(ip->i_inode.i_blkbits);
  312. error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
  313. if (error)
  314. goto fail;
  315. error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh);
  316. if (error)
  317. goto fail;
  318. error = -EIO;
  319. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
  320. goto fail_brelse;
  321. qd->qd_bh = bh;
  322. qd->qd_bh_qc = (struct gfs2_quota_change *)
  323. (bh->b_data + sizeof(struct gfs2_meta_header) +
  324. offset * sizeof(struct gfs2_quota_change));
  325. mutex_unlock(&sdp->sd_quota_mutex);
  326. return 0;
  327. fail_brelse:
  328. brelse(bh);
  329. fail:
  330. qd->qd_bh_count--;
  331. mutex_unlock(&sdp->sd_quota_mutex);
  332. return error;
  333. }
  334. static void bh_put(struct gfs2_quota_data *qd)
  335. {
  336. struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
  337. mutex_lock(&sdp->sd_quota_mutex);
  338. gfs2_assert(sdp, qd->qd_bh_count);
  339. if (!--qd->qd_bh_count) {
  340. brelse(qd->qd_bh);
  341. qd->qd_bh = NULL;
  342. qd->qd_bh_qc = NULL;
  343. }
  344. mutex_unlock(&sdp->sd_quota_mutex);
  345. }
  346. static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
  347. u64 *sync_gen)
  348. {
  349. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  350. !test_bit(QDF_CHANGE, &qd->qd_flags) ||
  351. (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
  352. return 0;
  353. if (!lockref_get_not_dead(&qd->qd_lockref))
  354. return 0;
  355. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  356. set_bit(QDF_LOCKED, &qd->qd_flags);
  357. qd->qd_change_sync = qd->qd_change;
  358. slot_hold(qd);
  359. return 1;
  360. }
  361. static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
  362. {
  363. struct gfs2_quota_data *qd = NULL;
  364. int error;
  365. int found = 0;
  366. *qdp = NULL;
  367. if (sb_rdonly(sdp->sd_vfs))
  368. return 0;
  369. spin_lock(&qd_lock);
  370. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  371. found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
  372. if (found)
  373. break;
  374. }
  375. if (!found)
  376. qd = NULL;
  377. spin_unlock(&qd_lock);
  378. if (qd) {
  379. gfs2_assert_warn(sdp, qd->qd_change_sync);
  380. error = bh_get(qd);
  381. if (error) {
  382. clear_bit(QDF_LOCKED, &qd->qd_flags);
  383. slot_put(qd);
  384. qd_put(qd);
  385. return error;
  386. }
  387. }
  388. *qdp = qd;
  389. return 0;
  390. }
  391. static void qd_unlock(struct gfs2_quota_data *qd)
  392. {
  393. gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
  394. test_bit(QDF_LOCKED, &qd->qd_flags));
  395. clear_bit(QDF_LOCKED, &qd->qd_flags);
  396. bh_put(qd);
  397. slot_put(qd);
  398. qd_put(qd);
  399. }
  400. static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
  401. struct gfs2_quota_data **qdp)
  402. {
  403. int error;
  404. error = qd_get(sdp, qid, qdp);
  405. if (error)
  406. return error;
  407. error = slot_get(*qdp);
  408. if (error)
  409. goto fail;
  410. error = bh_get(*qdp);
  411. if (error)
  412. goto fail_slot;
  413. return 0;
  414. fail_slot:
  415. slot_put(*qdp);
  416. fail:
  417. qd_put(*qdp);
  418. return error;
  419. }
  420. static void qdsb_put(struct gfs2_quota_data *qd)
  421. {
  422. bh_put(qd);
  423. slot_put(qd);
  424. qd_put(qd);
  425. }
  426. /**
  427. * gfs2_qa_get - make sure we have a quota allocations data structure,
  428. * if necessary
  429. * @ip: the inode for this reservation
  430. */
  431. int gfs2_qa_get(struct gfs2_inode *ip)
  432. {
  433. int error = 0;
  434. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  435. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  436. return 0;
  437. down_write(&ip->i_rw_mutex);
  438. if (ip->i_qadata == NULL) {
  439. ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
  440. if (!ip->i_qadata) {
  441. error = -ENOMEM;
  442. goto out;
  443. }
  444. }
  445. ip->i_qadata->qa_ref++;
  446. out:
  447. up_write(&ip->i_rw_mutex);
  448. return error;
  449. }
  450. void gfs2_qa_put(struct gfs2_inode *ip)
  451. {
  452. down_write(&ip->i_rw_mutex);
  453. if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
  454. kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
  455. ip->i_qadata = NULL;
  456. }
  457. up_write(&ip->i_rw_mutex);
  458. }
  459. int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  460. {
  461. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  462. struct gfs2_quota_data **qd;
  463. int error;
  464. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  465. return 0;
  466. error = gfs2_qa_get(ip);
  467. if (error)
  468. return error;
  469. qd = ip->i_qadata->qa_qd;
  470. if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
  471. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
  472. error = -EIO;
  473. goto out;
  474. }
  475. error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
  476. if (error)
  477. goto out_unhold;
  478. ip->i_qadata->qa_qd_num++;
  479. qd++;
  480. error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
  481. if (error)
  482. goto out_unhold;
  483. ip->i_qadata->qa_qd_num++;
  484. qd++;
  485. if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
  486. !uid_eq(uid, ip->i_inode.i_uid)) {
  487. error = qdsb_get(sdp, make_kqid_uid(uid), qd);
  488. if (error)
  489. goto out_unhold;
  490. ip->i_qadata->qa_qd_num++;
  491. qd++;
  492. }
  493. if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
  494. !gid_eq(gid, ip->i_inode.i_gid)) {
  495. error = qdsb_get(sdp, make_kqid_gid(gid), qd);
  496. if (error)
  497. goto out_unhold;
  498. ip->i_qadata->qa_qd_num++;
  499. qd++;
  500. }
  501. out_unhold:
  502. if (error)
  503. gfs2_quota_unhold(ip);
  504. out:
  505. return error;
  506. }
  507. void gfs2_quota_unhold(struct gfs2_inode *ip)
  508. {
  509. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  510. u32 x;
  511. if (ip->i_qadata == NULL)
  512. return;
  513. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
  514. for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
  515. qdsb_put(ip->i_qadata->qa_qd[x]);
  516. ip->i_qadata->qa_qd[x] = NULL;
  517. }
  518. ip->i_qadata->qa_qd_num = 0;
  519. gfs2_qa_put(ip);
  520. }
  521. static int sort_qd(const void *a, const void *b)
  522. {
  523. const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
  524. const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
  525. if (qid_lt(qd_a->qd_id, qd_b->qd_id))
  526. return -1;
  527. if (qid_lt(qd_b->qd_id, qd_a->qd_id))
  528. return 1;
  529. return 0;
  530. }
  531. static void do_qc(struct gfs2_quota_data *qd, s64 change)
  532. {
  533. struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
  534. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  535. struct gfs2_quota_change *qc = qd->qd_bh_qc;
  536. s64 x;
  537. mutex_lock(&sdp->sd_quota_mutex);
  538. gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
  539. if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
  540. qc->qc_change = 0;
  541. qc->qc_flags = 0;
  542. if (qd->qd_id.type == USRQUOTA)
  543. qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
  544. qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
  545. }
  546. x = be64_to_cpu(qc->qc_change) + change;
  547. qc->qc_change = cpu_to_be64(x);
  548. spin_lock(&qd_lock);
  549. qd->qd_change = x;
  550. spin_unlock(&qd_lock);
  551. if (!x) {
  552. gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
  553. clear_bit(QDF_CHANGE, &qd->qd_flags);
  554. qc->qc_flags = 0;
  555. qc->qc_id = 0;
  556. slot_put(qd);
  557. qd_put(qd);
  558. } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
  559. qd_hold(qd);
  560. slot_hold(qd);
  561. }
  562. if (change < 0) /* Reset quiet flag if we freed some blocks */
  563. clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
  564. mutex_unlock(&sdp->sd_quota_mutex);
  565. }
  566. static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
  567. unsigned off, void *buf, unsigned bytes)
  568. {
  569. struct inode *inode = &ip->i_inode;
  570. struct gfs2_sbd *sdp = GFS2_SB(inode);
  571. struct address_space *mapping = inode->i_mapping;
  572. struct page *page;
  573. struct buffer_head *bh;
  574. void *kaddr;
  575. u64 blk;
  576. unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
  577. unsigned to_write = bytes, pg_off = off;
  578. int done = 0;
  579. blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
  580. boff = off % bsize;
  581. page = find_or_create_page(mapping, index, GFP_NOFS);
  582. if (!page)
  583. return -ENOMEM;
  584. if (!page_has_buffers(page))
  585. create_empty_buffers(page, bsize, 0);
  586. bh = page_buffers(page);
  587. while (!done) {
  588. /* Find the beginning block within the page */
  589. if (pg_off >= ((bnum * bsize) + bsize)) {
  590. bh = bh->b_this_page;
  591. bnum++;
  592. blk++;
  593. continue;
  594. }
  595. if (!buffer_mapped(bh)) {
  596. gfs2_block_map(inode, blk, bh, 1);
  597. if (!buffer_mapped(bh))
  598. goto unlock_out;
  599. /* If it's a newly allocated disk block, zero it */
  600. if (buffer_new(bh))
  601. zero_user(page, bnum * bsize, bh->b_size);
  602. }
  603. if (PageUptodate(page))
  604. set_buffer_uptodate(bh);
  605. if (!buffer_uptodate(bh)) {
  606. ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
  607. wait_on_buffer(bh);
  608. if (!buffer_uptodate(bh))
  609. goto unlock_out;
  610. }
  611. if (gfs2_is_jdata(ip))
  612. gfs2_trans_add_data(ip->i_gl, bh);
  613. else
  614. gfs2_ordered_add_inode(ip);
  615. /* If we need to write to the next block as well */
  616. if (to_write > (bsize - boff)) {
  617. pg_off += (bsize - boff);
  618. to_write -= (bsize - boff);
  619. boff = pg_off % bsize;
  620. continue;
  621. }
  622. done = 1;
  623. }
  624. /* Write to the page, now that we have setup the buffer(s) */
  625. kaddr = kmap_atomic(page);
  626. memcpy(kaddr + off, buf, bytes);
  627. flush_dcache_page(page);
  628. kunmap_atomic(kaddr);
  629. unlock_page(page);
  630. put_page(page);
  631. return 0;
  632. unlock_out:
  633. unlock_page(page);
  634. put_page(page);
  635. return -EIO;
  636. }
  637. static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
  638. loff_t loc)
  639. {
  640. unsigned long pg_beg;
  641. unsigned pg_off, nbytes, overflow = 0;
  642. int pg_oflow = 0, error;
  643. void *ptr;
  644. nbytes = sizeof(struct gfs2_quota);
  645. pg_beg = loc >> PAGE_SHIFT;
  646. pg_off = offset_in_page(loc);
  647. /* If the quota straddles a page boundary, split the write in two */
  648. if ((pg_off + nbytes) > PAGE_SIZE) {
  649. pg_oflow = 1;
  650. overflow = (pg_off + nbytes) - PAGE_SIZE;
  651. }
  652. ptr = qp;
  653. error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
  654. nbytes - overflow);
  655. /* If there's an overflow, write the remaining bytes to the next page */
  656. if (!error && pg_oflow)
  657. error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
  658. ptr + nbytes - overflow,
  659. overflow);
  660. return error;
  661. }
  662. /**
  663. * gfs2_adjust_quota - adjust record of current block usage
  664. * @ip: The quota inode
  665. * @loc: Offset of the entry in the quota file
  666. * @change: The amount of usage change to record
  667. * @qd: The quota data
  668. * @fdq: The updated limits to record
  669. *
  670. * This function was mostly borrowed from gfs2_block_truncate_page which was
  671. * in turn mostly borrowed from ext3
  672. *
  673. * Returns: 0 or -ve on error
  674. */
  675. static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
  676. s64 change, struct gfs2_quota_data *qd,
  677. struct qc_dqblk *fdq)
  678. {
  679. struct inode *inode = &ip->i_inode;
  680. struct gfs2_sbd *sdp = GFS2_SB(inode);
  681. struct gfs2_quota q;
  682. int err;
  683. u64 size;
  684. if (gfs2_is_stuffed(ip)) {
  685. err = gfs2_unstuff_dinode(ip, NULL);
  686. if (err)
  687. return err;
  688. }
  689. memset(&q, 0, sizeof(struct gfs2_quota));
  690. err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
  691. if (err < 0)
  692. return err;
  693. loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
  694. err = -EIO;
  695. be64_add_cpu(&q.qu_value, change);
  696. if (((s64)be64_to_cpu(q.qu_value)) < 0)
  697. q.qu_value = 0; /* Never go negative on quota usage */
  698. qd->qd_qb.qb_value = q.qu_value;
  699. if (fdq) {
  700. if (fdq->d_fieldmask & QC_SPC_SOFT) {
  701. q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
  702. qd->qd_qb.qb_warn = q.qu_warn;
  703. }
  704. if (fdq->d_fieldmask & QC_SPC_HARD) {
  705. q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
  706. qd->qd_qb.qb_limit = q.qu_limit;
  707. }
  708. if (fdq->d_fieldmask & QC_SPACE) {
  709. q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
  710. qd->qd_qb.qb_value = q.qu_value;
  711. }
  712. }
  713. err = gfs2_write_disk_quota(ip, &q, loc);
  714. if (!err) {
  715. size = loc + sizeof(struct gfs2_quota);
  716. if (size > inode->i_size)
  717. i_size_write(inode, size);
  718. inode->i_mtime = inode->i_atime = current_time(inode);
  719. mark_inode_dirty(inode);
  720. set_bit(QDF_REFRESH, &qd->qd_flags);
  721. }
  722. return err;
  723. }
  724. static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
  725. {
  726. struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
  727. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  728. struct gfs2_alloc_parms ap = { .aflags = 0, };
  729. unsigned int data_blocks, ind_blocks;
  730. struct gfs2_holder *ghs, i_gh;
  731. unsigned int qx, x;
  732. struct gfs2_quota_data *qd;
  733. unsigned reserved;
  734. loff_t offset;
  735. unsigned int nalloc = 0, blocks;
  736. int error;
  737. error = gfs2_qa_get(ip);
  738. if (error)
  739. return error;
  740. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  741. &data_blocks, &ind_blocks);
  742. ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
  743. if (!ghs) {
  744. error = -ENOMEM;
  745. goto out;
  746. }
  747. sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  748. inode_lock(&ip->i_inode);
  749. for (qx = 0; qx < num_qd; qx++) {
  750. error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
  751. GL_NOCACHE, &ghs[qx]);
  752. if (error)
  753. goto out_dq;
  754. }
  755. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  756. if (error)
  757. goto out_dq;
  758. for (x = 0; x < num_qd; x++) {
  759. offset = qd2offset(qda[x]);
  760. if (gfs2_write_alloc_required(ip, offset,
  761. sizeof(struct gfs2_quota)))
  762. nalloc++;
  763. }
  764. /*
  765. * 1 blk for unstuffing inode if stuffed. We add this extra
  766. * block to the reservation unconditionally. If the inode
  767. * doesn't need unstuffing, the block will be released to the
  768. * rgrp since it won't be allocated during the transaction
  769. */
  770. /* +3 in the end for unstuffing block, inode size update block
  771. * and another block in case quota straddles page boundary and
  772. * two blocks need to be updated instead of 1 */
  773. blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
  774. reserved = 1 + (nalloc * (data_blocks + ind_blocks));
  775. ap.target = reserved;
  776. error = gfs2_inplace_reserve(ip, &ap);
  777. if (error)
  778. goto out_alloc;
  779. if (nalloc)
  780. blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
  781. error = gfs2_trans_begin(sdp, blocks, 0);
  782. if (error)
  783. goto out_ipres;
  784. for (x = 0; x < num_qd; x++) {
  785. qd = qda[x];
  786. offset = qd2offset(qd);
  787. error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
  788. if (error)
  789. goto out_end_trans;
  790. do_qc(qd, -qd->qd_change_sync);
  791. set_bit(QDF_REFRESH, &qd->qd_flags);
  792. }
  793. error = 0;
  794. out_end_trans:
  795. gfs2_trans_end(sdp);
  796. out_ipres:
  797. gfs2_inplace_release(ip);
  798. out_alloc:
  799. gfs2_glock_dq_uninit(&i_gh);
  800. out_dq:
  801. while (qx--)
  802. gfs2_glock_dq_uninit(&ghs[qx]);
  803. inode_unlock(&ip->i_inode);
  804. kfree(ghs);
  805. gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
  806. GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
  807. out:
  808. gfs2_qa_put(ip);
  809. return error;
  810. }
  811. static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
  812. {
  813. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  814. struct gfs2_quota q;
  815. struct gfs2_quota_lvb *qlvb;
  816. loff_t pos;
  817. int error;
  818. memset(&q, 0, sizeof(struct gfs2_quota));
  819. pos = qd2offset(qd);
  820. error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
  821. if (error < 0)
  822. return error;
  823. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  824. qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
  825. qlvb->__pad = 0;
  826. qlvb->qb_limit = q.qu_limit;
  827. qlvb->qb_warn = q.qu_warn;
  828. qlvb->qb_value = q.qu_value;
  829. qd->qd_qb = *qlvb;
  830. return 0;
  831. }
  832. static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
  833. struct gfs2_holder *q_gh)
  834. {
  835. struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
  836. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  837. struct gfs2_holder i_gh;
  838. int error;
  839. restart:
  840. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
  841. if (error)
  842. return error;
  843. if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
  844. force_refresh = FORCE;
  845. qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  846. if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
  847. gfs2_glock_dq_uninit(q_gh);
  848. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
  849. GL_NOCACHE, q_gh);
  850. if (error)
  851. return error;
  852. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  853. if (error)
  854. goto fail;
  855. error = update_qd(sdp, qd);
  856. if (error)
  857. goto fail_gunlock;
  858. gfs2_glock_dq_uninit(&i_gh);
  859. gfs2_glock_dq_uninit(q_gh);
  860. force_refresh = 0;
  861. goto restart;
  862. }
  863. return 0;
  864. fail_gunlock:
  865. gfs2_glock_dq_uninit(&i_gh);
  866. fail:
  867. gfs2_glock_dq_uninit(q_gh);
  868. return error;
  869. }
  870. int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  871. {
  872. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  873. struct gfs2_quota_data *qd;
  874. u32 x;
  875. int error = 0;
  876. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  877. return 0;
  878. error = gfs2_quota_hold(ip, uid, gid);
  879. if (error)
  880. return error;
  881. sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num,
  882. sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  883. for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
  884. qd = ip->i_qadata->qa_qd[x];
  885. error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]);
  886. if (error)
  887. break;
  888. }
  889. if (!error)
  890. set_bit(GIF_QD_LOCKED, &ip->i_flags);
  891. else {
  892. while (x--)
  893. gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
  894. gfs2_quota_unhold(ip);
  895. }
  896. return error;
  897. }
  898. static int need_sync(struct gfs2_quota_data *qd)
  899. {
  900. struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
  901. struct gfs2_tune *gt = &sdp->sd_tune;
  902. s64 value;
  903. unsigned int num, den;
  904. int do_sync = 1;
  905. if (!qd->qd_qb.qb_limit)
  906. return 0;
  907. spin_lock(&qd_lock);
  908. value = qd->qd_change;
  909. spin_unlock(&qd_lock);
  910. spin_lock(&gt->gt_spin);
  911. num = gt->gt_quota_scale_num;
  912. den = gt->gt_quota_scale_den;
  913. spin_unlock(&gt->gt_spin);
  914. if (value < 0)
  915. do_sync = 0;
  916. else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
  917. (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  918. do_sync = 0;
  919. else {
  920. value *= gfs2_jindex_size(sdp) * num;
  921. value = div_s64(value, den);
  922. value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
  923. if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  924. do_sync = 0;
  925. }
  926. return do_sync;
  927. }
  928. void gfs2_quota_unlock(struct gfs2_inode *ip)
  929. {
  930. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  931. struct gfs2_quota_data *qda[4];
  932. unsigned int count = 0;
  933. u32 x;
  934. int found;
  935. if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
  936. return;
  937. for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
  938. struct gfs2_quota_data *qd;
  939. int sync;
  940. qd = ip->i_qadata->qa_qd[x];
  941. sync = need_sync(qd);
  942. gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]);
  943. if (!sync)
  944. continue;
  945. spin_lock(&qd_lock);
  946. found = qd_check_sync(sdp, qd, NULL);
  947. spin_unlock(&qd_lock);
  948. if (!found)
  949. continue;
  950. gfs2_assert_warn(sdp, qd->qd_change_sync);
  951. if (bh_get(qd)) {
  952. clear_bit(QDF_LOCKED, &qd->qd_flags);
  953. slot_put(qd);
  954. qd_put(qd);
  955. continue;
  956. }
  957. qda[count++] = qd;
  958. }
  959. if (count) {
  960. do_sync(count, qda);
  961. for (x = 0; x < count; x++)
  962. qd_unlock(qda[x]);
  963. }
  964. gfs2_quota_unhold(ip);
  965. }
  966. #define MAX_LINE 256
  967. static int print_message(struct gfs2_quota_data *qd, char *type)
  968. {
  969. struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
  970. fs_info(sdp, "quota %s for %s %u\n",
  971. type,
  972. (qd->qd_id.type == USRQUOTA) ? "user" : "group",
  973. from_kqid(&init_user_ns, qd->qd_id));
  974. return 0;
  975. }
  976. /**
  977. * gfs2_quota_check - check if allocating new blocks will exceed quota
  978. * @ip: The inode for which this check is being performed
  979. * @uid: The uid to check against
  980. * @gid: The gid to check against
  981. * @ap: The allocation parameters. ap->target contains the requested
  982. * blocks. ap->min_target, if set, contains the minimum blks
  983. * requested.
  984. *
  985. * Returns: 0 on success.
  986. * min_req = ap->min_target ? ap->min_target : ap->target;
  987. * quota must allow at least min_req blks for success and
  988. * ap->allowed is set to the number of blocks allowed
  989. *
  990. * -EDQUOT otherwise, quota violation. ap->allowed is set to number
  991. * of blocks available.
  992. */
  993. int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
  994. struct gfs2_alloc_parms *ap)
  995. {
  996. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  997. struct gfs2_quota_data *qd;
  998. s64 value, warn, limit;
  999. u32 x;
  1000. int error = 0;
  1001. ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
  1002. if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
  1003. return 0;
  1004. for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
  1005. qd = ip->i_qadata->qa_qd[x];
  1006. if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  1007. qid_eq(qd->qd_id, make_kqid_gid(gid))))
  1008. continue;
  1009. warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
  1010. limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
  1011. value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
  1012. spin_lock(&qd_lock);
  1013. value += qd->qd_change;
  1014. spin_unlock(&qd_lock);
  1015. if (limit > 0 && (limit - value) < ap->allowed)
  1016. ap->allowed = limit - value;
  1017. /* If we can't meet the target */
  1018. if (limit && limit < (value + (s64)ap->target)) {
  1019. /* If no min_target specified or we don't meet
  1020. * min_target, return -EDQUOT */
  1021. if (!ap->min_target || ap->min_target > ap->allowed) {
  1022. if (!test_and_set_bit(QDF_QMSG_QUIET,
  1023. &qd->qd_flags)) {
  1024. print_message(qd, "exceeded");
  1025. quota_send_warning(qd->qd_id,
  1026. sdp->sd_vfs->s_dev,
  1027. QUOTA_NL_BHARDWARN);
  1028. }
  1029. error = -EDQUOT;
  1030. break;
  1031. }
  1032. } else if (warn && warn < value &&
  1033. time_after_eq(jiffies, qd->qd_last_warn +
  1034. gfs2_tune_get(sdp, gt_quota_warn_period)
  1035. * HZ)) {
  1036. quota_send_warning(qd->qd_id,
  1037. sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
  1038. error = print_message(qd, "warning");
  1039. qd->qd_last_warn = jiffies;
  1040. }
  1041. }
  1042. return error;
  1043. }
  1044. void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
  1045. kuid_t uid, kgid_t gid)
  1046. {
  1047. struct gfs2_quota_data *qd;
  1048. u32 x;
  1049. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1050. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
  1051. gfs2_assert_warn(sdp, change))
  1052. return;
  1053. if (ip->i_diskflags & GFS2_DIF_SYSTEM)
  1054. return;
  1055. if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
  1056. ip->i_qadata->qa_ref > 0))
  1057. return;
  1058. for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
  1059. qd = ip->i_qadata->qa_qd[x];
  1060. if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  1061. qid_eq(qd->qd_id, make_kqid_gid(gid))) {
  1062. do_qc(qd, change);
  1063. }
  1064. }
  1065. }
  1066. int gfs2_quota_sync(struct super_block *sb, int type)
  1067. {
  1068. struct gfs2_sbd *sdp = sb->s_fs_info;
  1069. struct gfs2_quota_data **qda;
  1070. unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder);
  1071. unsigned int num_qd;
  1072. unsigned int x;
  1073. int error = 0;
  1074. qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
  1075. if (!qda)
  1076. return -ENOMEM;
  1077. mutex_lock(&sdp->sd_quota_sync_mutex);
  1078. sdp->sd_quota_sync_gen++;
  1079. do {
  1080. num_qd = 0;
  1081. for (;;) {
  1082. error = qd_fish(sdp, qda + num_qd);
  1083. if (error || !qda[num_qd])
  1084. break;
  1085. if (++num_qd == max_qd)
  1086. break;
  1087. }
  1088. if (num_qd) {
  1089. if (!error)
  1090. error = do_sync(num_qd, qda);
  1091. if (!error)
  1092. for (x = 0; x < num_qd; x++)
  1093. qda[x]->qd_sync_gen =
  1094. sdp->sd_quota_sync_gen;
  1095. for (x = 0; x < num_qd; x++)
  1096. qd_unlock(qda[x]);
  1097. }
  1098. } while (!error && num_qd == max_qd);
  1099. mutex_unlock(&sdp->sd_quota_sync_mutex);
  1100. kfree(qda);
  1101. return error;
  1102. }
  1103. int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
  1104. {
  1105. struct gfs2_quota_data *qd;
  1106. struct gfs2_holder q_gh;
  1107. int error;
  1108. error = qd_get(sdp, qid, &qd);
  1109. if (error)
  1110. return error;
  1111. error = do_glock(qd, FORCE, &q_gh);
  1112. if (!error)
  1113. gfs2_glock_dq_uninit(&q_gh);
  1114. qd_put(qd);
  1115. return error;
  1116. }
  1117. int gfs2_quota_init(struct gfs2_sbd *sdp)
  1118. {
  1119. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  1120. u64 size = i_size_read(sdp->sd_qc_inode);
  1121. unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
  1122. unsigned int x, slot = 0;
  1123. unsigned int found = 0;
  1124. unsigned int hash;
  1125. unsigned int bm_size;
  1126. u64 dblock;
  1127. u32 extlen = 0;
  1128. int error;
  1129. if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
  1130. return -EIO;
  1131. sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
  1132. bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
  1133. bm_size *= sizeof(unsigned long);
  1134. error = -ENOMEM;
  1135. sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
  1136. if (sdp->sd_quota_bitmap == NULL)
  1137. sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
  1138. __GFP_ZERO);
  1139. if (!sdp->sd_quota_bitmap)
  1140. return error;
  1141. for (x = 0; x < blocks; x++) {
  1142. struct buffer_head *bh;
  1143. const struct gfs2_quota_change *qc;
  1144. unsigned int y;
  1145. if (!extlen) {
  1146. int new = 0;
  1147. error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
  1148. if (error)
  1149. goto fail;
  1150. }
  1151. error = -EIO;
  1152. bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
  1153. if (!bh)
  1154. goto fail;
  1155. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
  1156. brelse(bh);
  1157. goto fail;
  1158. }
  1159. qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
  1160. for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
  1161. y++, slot++) {
  1162. struct gfs2_quota_data *qd;
  1163. s64 qc_change = be64_to_cpu(qc->qc_change);
  1164. u32 qc_flags = be32_to_cpu(qc->qc_flags);
  1165. enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
  1166. USRQUOTA : GRPQUOTA;
  1167. struct kqid qc_id = make_kqid(&init_user_ns, qtype,
  1168. be32_to_cpu(qc->qc_id));
  1169. qc++;
  1170. if (!qc_change)
  1171. continue;
  1172. hash = gfs2_qd_hash(sdp, qc_id);
  1173. qd = qd_alloc(hash, sdp, qc_id);
  1174. if (qd == NULL) {
  1175. brelse(bh);
  1176. goto fail;
  1177. }
  1178. set_bit(QDF_CHANGE, &qd->qd_flags);
  1179. qd->qd_change = qc_change;
  1180. qd->qd_slot = slot;
  1181. qd->qd_slot_count = 1;
  1182. spin_lock(&qd_lock);
  1183. BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
  1184. list_add(&qd->qd_list, &sdp->sd_quota_list);
  1185. atomic_inc(&sdp->sd_quota_count);
  1186. spin_unlock(&qd_lock);
  1187. spin_lock_bucket(hash);
  1188. hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
  1189. spin_unlock_bucket(hash);
  1190. found++;
  1191. }
  1192. brelse(bh);
  1193. dblock++;
  1194. extlen--;
  1195. }
  1196. if (found)
  1197. fs_info(sdp, "found %u quota changes\n", found);
  1198. return 0;
  1199. fail:
  1200. gfs2_quota_cleanup(sdp);
  1201. return error;
  1202. }
  1203. void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
  1204. {
  1205. struct list_head *head = &sdp->sd_quota_list;
  1206. struct gfs2_quota_data *qd;
  1207. spin_lock(&qd_lock);
  1208. while (!list_empty(head)) {
  1209. qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
  1210. list_del(&qd->qd_list);
  1211. /* Also remove if this qd exists in the reclaim list */
  1212. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  1213. atomic_dec(&sdp->sd_quota_count);
  1214. spin_unlock(&qd_lock);
  1215. spin_lock_bucket(qd->qd_hash);
  1216. hlist_bl_del_rcu(&qd->qd_hlist);
  1217. spin_unlock_bucket(qd->qd_hash);
  1218. gfs2_assert_warn(sdp, !qd->qd_change);
  1219. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  1220. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  1221. gfs2_glock_put(qd->qd_gl);
  1222. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  1223. spin_lock(&qd_lock);
  1224. }
  1225. spin_unlock(&qd_lock);
  1226. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
  1227. kvfree(sdp->sd_quota_bitmap);
  1228. sdp->sd_quota_bitmap = NULL;
  1229. }
  1230. static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
  1231. {
  1232. if (error == 0 || error == -EROFS)
  1233. return;
  1234. if (!gfs2_withdrawn(sdp)) {
  1235. if (!cmpxchg(&sdp->sd_log_error, 0, error))
  1236. fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
  1237. wake_up(&sdp->sd_logd_waitq);
  1238. }
  1239. }
  1240. static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
  1241. int (*fxn)(struct super_block *sb, int type),
  1242. unsigned long t, unsigned long *timeo,
  1243. unsigned int *new_timeo)
  1244. {
  1245. if (t >= *timeo) {
  1246. int error = fxn(sdp->sd_vfs, 0);
  1247. quotad_error(sdp, msg, error);
  1248. *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
  1249. } else {
  1250. *timeo -= t;
  1251. }
  1252. }
  1253. static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
  1254. {
  1255. struct gfs2_inode *ip;
  1256. while(1) {
  1257. ip = NULL;
  1258. spin_lock(&sdp->sd_trunc_lock);
  1259. if (!list_empty(&sdp->sd_trunc_list)) {
  1260. ip = list_first_entry(&sdp->sd_trunc_list,
  1261. struct gfs2_inode, i_trunc_list);
  1262. list_del_init(&ip->i_trunc_list);
  1263. }
  1264. spin_unlock(&sdp->sd_trunc_lock);
  1265. if (ip == NULL)
  1266. return;
  1267. gfs2_glock_finish_truncate(ip);
  1268. }
  1269. }
  1270. void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
  1271. if (!sdp->sd_statfs_force_sync) {
  1272. sdp->sd_statfs_force_sync = 1;
  1273. wake_up(&sdp->sd_quota_wait);
  1274. }
  1275. }
  1276. /**
  1277. * gfs2_quotad - Write cached quota changes into the quota file
  1278. * @sdp: Pointer to GFS2 superblock
  1279. *
  1280. */
  1281. int gfs2_quotad(void *data)
  1282. {
  1283. struct gfs2_sbd *sdp = data;
  1284. struct gfs2_tune *tune = &sdp->sd_tune;
  1285. unsigned long statfs_timeo = 0;
  1286. unsigned long quotad_timeo = 0;
  1287. unsigned long t = 0;
  1288. DEFINE_WAIT(wait);
  1289. int empty;
  1290. while (!kthread_should_stop()) {
  1291. if (gfs2_withdrawn(sdp))
  1292. goto bypass;
  1293. /* Update the master statfs file */
  1294. if (sdp->sd_statfs_force_sync) {
  1295. int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
  1296. quotad_error(sdp, "statfs", error);
  1297. statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
  1298. }
  1299. else
  1300. quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
  1301. &statfs_timeo,
  1302. &tune->gt_statfs_quantum);
  1303. /* Update quota file */
  1304. quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
  1305. &quotad_timeo, &tune->gt_quota_quantum);
  1306. /* Check for & recover partially truncated inodes */
  1307. quotad_check_trunc_list(sdp);
  1308. try_to_freeze();
  1309. bypass:
  1310. t = min(quotad_timeo, statfs_timeo);
  1311. prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
  1312. spin_lock(&sdp->sd_trunc_lock);
  1313. empty = list_empty(&sdp->sd_trunc_list);
  1314. spin_unlock(&sdp->sd_trunc_lock);
  1315. if (empty && !sdp->sd_statfs_force_sync)
  1316. t -= schedule_timeout(t);
  1317. else
  1318. t = 0;
  1319. finish_wait(&sdp->sd_quota_wait, &wait);
  1320. }
  1321. return 0;
  1322. }
  1323. static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
  1324. {
  1325. struct gfs2_sbd *sdp = sb->s_fs_info;
  1326. memset(state, 0, sizeof(*state));
  1327. switch (sdp->sd_args.ar_quota) {
  1328. case GFS2_QUOTA_ON:
  1329. state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
  1330. state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
  1331. fallthrough;
  1332. case GFS2_QUOTA_ACCOUNT:
  1333. state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
  1334. QCI_SYSFILE;
  1335. state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
  1336. QCI_SYSFILE;
  1337. break;
  1338. case GFS2_QUOTA_OFF:
  1339. break;
  1340. }
  1341. if (sdp->sd_quota_inode) {
  1342. state->s_state[USRQUOTA].ino =
  1343. GFS2_I(sdp->sd_quota_inode)->i_no_addr;
  1344. state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
  1345. }
  1346. state->s_state[USRQUOTA].nextents = 1; /* unsupported */
  1347. state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
  1348. state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
  1349. return 0;
  1350. }
  1351. static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
  1352. struct qc_dqblk *fdq)
  1353. {
  1354. struct gfs2_sbd *sdp = sb->s_fs_info;
  1355. struct gfs2_quota_lvb *qlvb;
  1356. struct gfs2_quota_data *qd;
  1357. struct gfs2_holder q_gh;
  1358. int error;
  1359. memset(fdq, 0, sizeof(*fdq));
  1360. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1361. return -ESRCH; /* Crazy XFS error code */
  1362. if ((qid.type != USRQUOTA) &&
  1363. (qid.type != GRPQUOTA))
  1364. return -EINVAL;
  1365. error = qd_get(sdp, qid, &qd);
  1366. if (error)
  1367. return error;
  1368. error = do_glock(qd, FORCE, &q_gh);
  1369. if (error)
  1370. goto out;
  1371. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  1372. fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
  1373. fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
  1374. fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
  1375. gfs2_glock_dq_uninit(&q_gh);
  1376. out:
  1377. qd_put(qd);
  1378. return error;
  1379. }
  1380. /* GFS2 only supports a subset of the XFS fields */
  1381. #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
  1382. static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
  1383. struct qc_dqblk *fdq)
  1384. {
  1385. struct gfs2_sbd *sdp = sb->s_fs_info;
  1386. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  1387. struct gfs2_quota_data *qd;
  1388. struct gfs2_holder q_gh, i_gh;
  1389. unsigned int data_blocks, ind_blocks;
  1390. unsigned int blocks = 0;
  1391. int alloc_required;
  1392. loff_t offset;
  1393. int error;
  1394. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1395. return -ESRCH; /* Crazy XFS error code */
  1396. if ((qid.type != USRQUOTA) &&
  1397. (qid.type != GRPQUOTA))
  1398. return -EINVAL;
  1399. if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
  1400. return -EINVAL;
  1401. error = qd_get(sdp, qid, &qd);
  1402. if (error)
  1403. return error;
  1404. error = gfs2_qa_get(ip);
  1405. if (error)
  1406. goto out_put;
  1407. inode_lock(&ip->i_inode);
  1408. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
  1409. if (error)
  1410. goto out_unlockput;
  1411. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  1412. if (error)
  1413. goto out_q;
  1414. /* Check for existing entry, if none then alloc new blocks */
  1415. error = update_qd(sdp, qd);
  1416. if (error)
  1417. goto out_i;
  1418. /* If nothing has changed, this is a no-op */
  1419. if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
  1420. ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
  1421. fdq->d_fieldmask ^= QC_SPC_SOFT;
  1422. if ((fdq->d_fieldmask & QC_SPC_HARD) &&
  1423. ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
  1424. fdq->d_fieldmask ^= QC_SPC_HARD;
  1425. if ((fdq->d_fieldmask & QC_SPACE) &&
  1426. ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
  1427. fdq->d_fieldmask ^= QC_SPACE;
  1428. if (fdq->d_fieldmask == 0)
  1429. goto out_i;
  1430. offset = qd2offset(qd);
  1431. alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
  1432. if (gfs2_is_stuffed(ip))
  1433. alloc_required = 1;
  1434. if (alloc_required) {
  1435. struct gfs2_alloc_parms ap = { .aflags = 0, };
  1436. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  1437. &data_blocks, &ind_blocks);
  1438. blocks = 1 + data_blocks + ind_blocks;
  1439. ap.target = blocks;
  1440. error = gfs2_inplace_reserve(ip, &ap);
  1441. if (error)
  1442. goto out_i;
  1443. blocks += gfs2_rg_blocks(ip, blocks);
  1444. }
  1445. /* Some quotas span block boundaries and can update two blocks,
  1446. adding an extra block to the transaction to handle such quotas */
  1447. error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
  1448. if (error)
  1449. goto out_release;
  1450. /* Apply changes */
  1451. error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
  1452. if (!error)
  1453. clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
  1454. gfs2_trans_end(sdp);
  1455. out_release:
  1456. if (alloc_required)
  1457. gfs2_inplace_release(ip);
  1458. out_i:
  1459. gfs2_glock_dq_uninit(&i_gh);
  1460. out_q:
  1461. gfs2_glock_dq_uninit(&q_gh);
  1462. out_unlockput:
  1463. gfs2_qa_put(ip);
  1464. inode_unlock(&ip->i_inode);
  1465. out_put:
  1466. qd_put(qd);
  1467. return error;
  1468. }
  1469. const struct quotactl_ops gfs2_quotactl_ops = {
  1470. .quota_sync = gfs2_quota_sync,
  1471. .get_state = gfs2_quota_get_state,
  1472. .get_dqblk = gfs2_get_dqblk,
  1473. .set_dqblk = gfs2_set_dqblk,
  1474. };
  1475. void __init gfs2_quota_hash_init(void)
  1476. {
  1477. unsigned i;
  1478. for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
  1479. INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
  1480. }