glock.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  4. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/sched.h>
  8. #include <linux/slab.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/buffer_head.h>
  11. #include <linux/delay.h>
  12. #include <linux/sort.h>
  13. #include <linux/hash.h>
  14. #include <linux/jhash.h>
  15. #include <linux/kallsyms.h>
  16. #include <linux/gfs2_ondisk.h>
  17. #include <linux/list.h>
  18. #include <linux/wait.h>
  19. #include <linux/module.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/debugfs.h>
  23. #include <linux/kthread.h>
  24. #include <linux/freezer.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/jiffies.h>
  27. #include <linux/rcupdate.h>
  28. #include <linux/rculist_bl.h>
  29. #include <linux/bit_spinlock.h>
  30. #include <linux/percpu.h>
  31. #include <linux/list_sort.h>
  32. #include <linux/lockref.h>
  33. #include <linux/rhashtable.h>
  34. #include "gfs2.h"
  35. #include "incore.h"
  36. #include "glock.h"
  37. #include "glops.h"
  38. #include "inode.h"
  39. #include "lops.h"
  40. #include "meta_io.h"
  41. #include "quota.h"
  42. #include "super.h"
  43. #include "util.h"
  44. #include "bmap.h"
  45. #define CREATE_TRACE_POINTS
  46. #include "trace_gfs2.h"
  47. struct gfs2_glock_iter {
  48. struct gfs2_sbd *sdp; /* incore superblock */
  49. struct rhashtable_iter hti; /* rhashtable iterator */
  50. struct gfs2_glock *gl; /* current glock struct */
  51. loff_t last_pos; /* last position */
  52. };
  53. typedef void (*glock_examiner) (struct gfs2_glock * gl);
  54. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
  55. static struct dentry *gfs2_root;
  56. static struct workqueue_struct *glock_workqueue;
  57. struct workqueue_struct *gfs2_delete_workqueue;
  58. static LIST_HEAD(lru_list);
  59. static atomic_t lru_count = ATOMIC_INIT(0);
  60. static DEFINE_SPINLOCK(lru_lock);
  61. #define GFS2_GL_HASH_SHIFT 15
  62. #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
  63. static const struct rhashtable_params ht_parms = {
  64. .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
  65. .key_len = offsetofend(struct lm_lockname, ln_type),
  66. .key_offset = offsetof(struct gfs2_glock, gl_name),
  67. .head_offset = offsetof(struct gfs2_glock, gl_node),
  68. };
  69. static struct rhashtable gl_hash_table;
  70. #define GLOCK_WAIT_TABLE_BITS 12
  71. #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
  72. static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
  73. struct wait_glock_queue {
  74. struct lm_lockname *name;
  75. wait_queue_entry_t wait;
  76. };
  77. static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
  78. int sync, void *key)
  79. {
  80. struct wait_glock_queue *wait_glock =
  81. container_of(wait, struct wait_glock_queue, wait);
  82. struct lm_lockname *wait_name = wait_glock->name;
  83. struct lm_lockname *wake_name = key;
  84. if (wake_name->ln_sbd != wait_name->ln_sbd ||
  85. wake_name->ln_number != wait_name->ln_number ||
  86. wake_name->ln_type != wait_name->ln_type)
  87. return 0;
  88. return autoremove_wake_function(wait, mode, sync, key);
  89. }
  90. static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
  91. {
  92. u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
  93. return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
  94. }
  95. /**
  96. * wake_up_glock - Wake up waiters on a glock
  97. * @gl: the glock
  98. */
  99. static void wake_up_glock(struct gfs2_glock *gl)
  100. {
  101. wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
  102. if (waitqueue_active(wq))
  103. __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
  104. }
  105. static void gfs2_glock_dealloc(struct rcu_head *rcu)
  106. {
  107. struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
  108. kfree(gl->gl_lksb.sb_lvbptr);
  109. if (gl->gl_ops->go_flags & GLOF_ASPACE)
  110. kmem_cache_free(gfs2_glock_aspace_cachep, gl);
  111. else
  112. kmem_cache_free(gfs2_glock_cachep, gl);
  113. }
  114. /**
  115. * glock_blocked_by_withdraw - determine if we can still use a glock
  116. * @gl: the glock
  117. *
  118. * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
  119. * when we're withdrawn. For example, to maintain metadata integrity, we should
  120. * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
  121. * iopen or the transaction glocks may be safely used because none of their
  122. * metadata goes through the journal. So in general, we should disallow all
  123. * glocks that are journaled, and allow all the others. One exception is:
  124. * we need to allow our active journal to be promoted and demoted so others
  125. * may recover it and we can reacquire it when they're done.
  126. */
  127. static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
  128. {
  129. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  130. if (likely(!gfs2_withdrawn(sdp)))
  131. return false;
  132. if (gl->gl_ops->go_flags & GLOF_NONDISK)
  133. return false;
  134. if (!sdp->sd_jdesc ||
  135. gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
  136. return false;
  137. return true;
  138. }
  139. void gfs2_glock_free(struct gfs2_glock *gl)
  140. {
  141. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  142. gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
  143. rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
  144. smp_mb();
  145. wake_up_glock(gl);
  146. call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
  147. if (atomic_dec_and_test(&sdp->sd_glock_disposal))
  148. wake_up(&sdp->sd_glock_wait);
  149. }
  150. /**
  151. * gfs2_glock_hold() - increment reference count on glock
  152. * @gl: The glock to hold
  153. *
  154. */
  155. void gfs2_glock_hold(struct gfs2_glock *gl)
  156. {
  157. GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
  158. lockref_get(&gl->gl_lockref);
  159. }
  160. /**
  161. * demote_ok - Check to see if it's ok to unlock a glock
  162. * @gl: the glock
  163. *
  164. * Returns: 1 if it's ok
  165. */
  166. static int demote_ok(const struct gfs2_glock *gl)
  167. {
  168. const struct gfs2_glock_operations *glops = gl->gl_ops;
  169. if (gl->gl_state == LM_ST_UNLOCKED)
  170. return 0;
  171. if (!list_empty(&gl->gl_holders))
  172. return 0;
  173. if (glops->go_demote_ok)
  174. return glops->go_demote_ok(gl);
  175. return 1;
  176. }
  177. void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
  178. {
  179. if (!(gl->gl_ops->go_flags & GLOF_LRU))
  180. return;
  181. spin_lock(&lru_lock);
  182. list_del(&gl->gl_lru);
  183. list_add_tail(&gl->gl_lru, &lru_list);
  184. if (!test_bit(GLF_LRU, &gl->gl_flags)) {
  185. set_bit(GLF_LRU, &gl->gl_flags);
  186. atomic_inc(&lru_count);
  187. }
  188. spin_unlock(&lru_lock);
  189. }
  190. static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
  191. {
  192. if (!(gl->gl_ops->go_flags & GLOF_LRU))
  193. return;
  194. spin_lock(&lru_lock);
  195. if (test_bit(GLF_LRU, &gl->gl_flags)) {
  196. list_del_init(&gl->gl_lru);
  197. atomic_dec(&lru_count);
  198. clear_bit(GLF_LRU, &gl->gl_flags);
  199. }
  200. spin_unlock(&lru_lock);
  201. }
  202. /*
  203. * Enqueue the glock on the work queue. Passes one glock reference on to the
  204. * work queue.
  205. */
  206. static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
  207. if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
  208. /*
  209. * We are holding the lockref spinlock, and the work was still
  210. * queued above. The queued work (glock_work_func) takes that
  211. * spinlock before dropping its glock reference(s), so it
  212. * cannot have dropped them in the meantime.
  213. */
  214. GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
  215. gl->gl_lockref.count--;
  216. }
  217. }
  218. static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
  219. spin_lock(&gl->gl_lockref.lock);
  220. __gfs2_glock_queue_work(gl, delay);
  221. spin_unlock(&gl->gl_lockref.lock);
  222. }
  223. static void __gfs2_glock_put(struct gfs2_glock *gl)
  224. {
  225. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  226. struct address_space *mapping = gfs2_glock2aspace(gl);
  227. lockref_mark_dead(&gl->gl_lockref);
  228. gfs2_glock_remove_from_lru(gl);
  229. spin_unlock(&gl->gl_lockref.lock);
  230. GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
  231. if (mapping) {
  232. truncate_inode_pages_final(mapping);
  233. if (!gfs2_withdrawn(sdp))
  234. GLOCK_BUG_ON(gl, mapping->nrpages ||
  235. mapping->nrexceptional);
  236. }
  237. trace_gfs2_glock_put(gl);
  238. sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
  239. }
  240. /*
  241. * Cause the glock to be put in work queue context.
  242. */
  243. void gfs2_glock_queue_put(struct gfs2_glock *gl)
  244. {
  245. gfs2_glock_queue_work(gl, 0);
  246. }
  247. /**
  248. * gfs2_glock_put() - Decrement reference count on glock
  249. * @gl: The glock to put
  250. *
  251. */
  252. void gfs2_glock_put(struct gfs2_glock *gl)
  253. {
  254. if (lockref_put_or_lock(&gl->gl_lockref))
  255. return;
  256. __gfs2_glock_put(gl);
  257. }
  258. /**
  259. * may_grant - check if its ok to grant a new lock
  260. * @gl: The glock
  261. * @gh: The lock request which we wish to grant
  262. *
  263. * Returns: true if its ok to grant the lock
  264. */
  265. static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
  266. {
  267. const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
  268. if ((gh->gh_state == LM_ST_EXCLUSIVE ||
  269. gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
  270. return 0;
  271. if (gl->gl_state == gh->gh_state)
  272. return 1;
  273. if (gh->gh_flags & GL_EXACT)
  274. return 0;
  275. if (gl->gl_state == LM_ST_EXCLUSIVE) {
  276. if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
  277. return 1;
  278. if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
  279. return 1;
  280. }
  281. if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
  282. return 1;
  283. return 0;
  284. }
  285. static void gfs2_holder_wake(struct gfs2_holder *gh)
  286. {
  287. clear_bit(HIF_WAIT, &gh->gh_iflags);
  288. smp_mb__after_atomic();
  289. wake_up_bit(&gh->gh_iflags, HIF_WAIT);
  290. if (gh->gh_flags & GL_ASYNC) {
  291. struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
  292. wake_up(&sdp->sd_async_glock_wait);
  293. }
  294. }
  295. /**
  296. * do_error - Something unexpected has happened during a lock request
  297. *
  298. */
  299. static void do_error(struct gfs2_glock *gl, const int ret)
  300. {
  301. struct gfs2_holder *gh, *tmp;
  302. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  303. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  304. continue;
  305. if (ret & LM_OUT_ERROR)
  306. gh->gh_error = -EIO;
  307. else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
  308. gh->gh_error = GLR_TRYFAILED;
  309. else
  310. continue;
  311. list_del_init(&gh->gh_list);
  312. trace_gfs2_glock_queue(gh, 0);
  313. gfs2_holder_wake(gh);
  314. }
  315. }
  316. /**
  317. * do_promote - promote as many requests as possible on the current queue
  318. * @gl: The glock
  319. *
  320. * Returns: 1 if there is a blocked holder at the head of the list, or 2
  321. * if a type specific operation is underway.
  322. */
  323. static int do_promote(struct gfs2_glock *gl)
  324. __releases(&gl->gl_lockref.lock)
  325. __acquires(&gl->gl_lockref.lock)
  326. {
  327. const struct gfs2_glock_operations *glops = gl->gl_ops;
  328. struct gfs2_holder *gh, *tmp;
  329. int ret;
  330. restart:
  331. list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
  332. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  333. continue;
  334. if (may_grant(gl, gh)) {
  335. if (gh->gh_list.prev == &gl->gl_holders &&
  336. glops->go_lock) {
  337. spin_unlock(&gl->gl_lockref.lock);
  338. /* FIXME: eliminate this eventually */
  339. ret = glops->go_lock(gh);
  340. spin_lock(&gl->gl_lockref.lock);
  341. if (ret) {
  342. if (ret == 1)
  343. return 2;
  344. gh->gh_error = ret;
  345. list_del_init(&gh->gh_list);
  346. trace_gfs2_glock_queue(gh, 0);
  347. gfs2_holder_wake(gh);
  348. goto restart;
  349. }
  350. set_bit(HIF_HOLDER, &gh->gh_iflags);
  351. trace_gfs2_promote(gh, 1);
  352. gfs2_holder_wake(gh);
  353. goto restart;
  354. }
  355. set_bit(HIF_HOLDER, &gh->gh_iflags);
  356. trace_gfs2_promote(gh, 0);
  357. gfs2_holder_wake(gh);
  358. continue;
  359. }
  360. if (gh->gh_list.prev == &gl->gl_holders)
  361. return 1;
  362. do_error(gl, 0);
  363. break;
  364. }
  365. return 0;
  366. }
  367. /**
  368. * find_first_waiter - find the first gh that's waiting for the glock
  369. * @gl: the glock
  370. */
  371. static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
  372. {
  373. struct gfs2_holder *gh;
  374. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  375. if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
  376. return gh;
  377. }
  378. return NULL;
  379. }
  380. /**
  381. * state_change - record that the glock is now in a different state
  382. * @gl: the glock
  383. * @new_state the new state
  384. *
  385. */
  386. static void state_change(struct gfs2_glock *gl, unsigned int new_state)
  387. {
  388. int held1, held2;
  389. held1 = (gl->gl_state != LM_ST_UNLOCKED);
  390. held2 = (new_state != LM_ST_UNLOCKED);
  391. if (held1 != held2) {
  392. GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
  393. if (held2)
  394. gl->gl_lockref.count++;
  395. else
  396. gl->gl_lockref.count--;
  397. }
  398. if (new_state != gl->gl_target)
  399. /* shorten our minimum hold time */
  400. gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
  401. GL_GLOCK_MIN_HOLD);
  402. gl->gl_state = new_state;
  403. gl->gl_tchange = jiffies;
  404. }
  405. static void gfs2_set_demote(struct gfs2_glock *gl)
  406. {
  407. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  408. set_bit(GLF_DEMOTE, &gl->gl_flags);
  409. smp_mb();
  410. wake_up(&sdp->sd_async_glock_wait);
  411. }
  412. static void gfs2_demote_wake(struct gfs2_glock *gl)
  413. {
  414. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  415. clear_bit(GLF_DEMOTE, &gl->gl_flags);
  416. smp_mb__after_atomic();
  417. wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
  418. }
  419. /**
  420. * finish_xmote - The DLM has replied to one of our lock requests
  421. * @gl: The glock
  422. * @ret: The status from the DLM
  423. *
  424. */
  425. static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
  426. {
  427. const struct gfs2_glock_operations *glops = gl->gl_ops;
  428. struct gfs2_holder *gh;
  429. unsigned state = ret & LM_OUT_ST_MASK;
  430. int rv;
  431. spin_lock(&gl->gl_lockref.lock);
  432. trace_gfs2_glock_state_change(gl, state);
  433. state_change(gl, state);
  434. gh = find_first_waiter(gl);
  435. /* Demote to UN request arrived during demote to SH or DF */
  436. if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
  437. state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
  438. gl->gl_target = LM_ST_UNLOCKED;
  439. /* Check for state != intended state */
  440. if (unlikely(state != gl->gl_target)) {
  441. if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
  442. /* move to back of queue and try next entry */
  443. if (ret & LM_OUT_CANCELED) {
  444. if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
  445. list_move_tail(&gh->gh_list, &gl->gl_holders);
  446. gh = find_first_waiter(gl);
  447. gl->gl_target = gh->gh_state;
  448. goto retry;
  449. }
  450. /* Some error or failed "try lock" - report it */
  451. if ((ret & LM_OUT_ERROR) ||
  452. (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
  453. gl->gl_target = gl->gl_state;
  454. do_error(gl, ret);
  455. goto out;
  456. }
  457. }
  458. switch(state) {
  459. /* Unlocked due to conversion deadlock, try again */
  460. case LM_ST_UNLOCKED:
  461. retry:
  462. do_xmote(gl, gh, gl->gl_target);
  463. break;
  464. /* Conversion fails, unlock and try again */
  465. case LM_ST_SHARED:
  466. case LM_ST_DEFERRED:
  467. do_xmote(gl, gh, LM_ST_UNLOCKED);
  468. break;
  469. default: /* Everything else */
  470. fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
  471. gl->gl_target, state);
  472. GLOCK_BUG_ON(gl, 1);
  473. }
  474. spin_unlock(&gl->gl_lockref.lock);
  475. return;
  476. }
  477. /* Fast path - we got what we asked for */
  478. if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
  479. gfs2_demote_wake(gl);
  480. if (state != LM_ST_UNLOCKED) {
  481. if (glops->go_xmote_bh) {
  482. spin_unlock(&gl->gl_lockref.lock);
  483. rv = glops->go_xmote_bh(gl, gh);
  484. spin_lock(&gl->gl_lockref.lock);
  485. if (rv) {
  486. do_error(gl, rv);
  487. goto out;
  488. }
  489. }
  490. rv = do_promote(gl);
  491. if (rv == 2)
  492. goto out_locked;
  493. }
  494. out:
  495. clear_bit(GLF_LOCK, &gl->gl_flags);
  496. out_locked:
  497. spin_unlock(&gl->gl_lockref.lock);
  498. }
  499. static bool is_system_glock(struct gfs2_glock *gl)
  500. {
  501. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  502. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  503. if (gl == m_ip->i_gl)
  504. return true;
  505. return false;
  506. }
  507. /**
  508. * do_xmote - Calls the DLM to change the state of a lock
  509. * @gl: The lock state
  510. * @gh: The holder (only for promotes)
  511. * @target: The target lock state
  512. *
  513. */
  514. static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
  515. __releases(&gl->gl_lockref.lock)
  516. __acquires(&gl->gl_lockref.lock)
  517. {
  518. const struct gfs2_glock_operations *glops = gl->gl_ops;
  519. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  520. unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
  521. int ret;
  522. if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
  523. gh && !(gh->gh_flags & LM_FLAG_NOEXP))
  524. return;
  525. lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
  526. LM_FLAG_PRIORITY);
  527. GLOCK_BUG_ON(gl, gl->gl_state == target);
  528. GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
  529. if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
  530. glops->go_inval) {
  531. /*
  532. * If another process is already doing the invalidate, let that
  533. * finish first. The glock state machine will get back to this
  534. * holder again later.
  535. */
  536. if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS,
  537. &gl->gl_flags))
  538. return;
  539. do_error(gl, 0); /* Fail queued try locks */
  540. }
  541. gl->gl_req = target;
  542. set_bit(GLF_BLOCKING, &gl->gl_flags);
  543. if ((gl->gl_req == LM_ST_UNLOCKED) ||
  544. (gl->gl_state == LM_ST_EXCLUSIVE) ||
  545. (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
  546. clear_bit(GLF_BLOCKING, &gl->gl_flags);
  547. spin_unlock(&gl->gl_lockref.lock);
  548. if (glops->go_sync) {
  549. ret = glops->go_sync(gl);
  550. /* If we had a problem syncing (due to io errors or whatever,
  551. * we should not invalidate the metadata or tell dlm to
  552. * release the glock to other nodes.
  553. */
  554. if (ret) {
  555. if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
  556. fs_err(sdp, "Error %d syncing glock \n", ret);
  557. gfs2_dump_glock(NULL, gl, true);
  558. }
  559. goto skip_inval;
  560. }
  561. }
  562. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
  563. /*
  564. * The call to go_sync should have cleared out the ail list.
  565. * If there are still items, we have a problem. We ought to
  566. * withdraw, but we can't because the withdraw code also uses
  567. * glocks. Warn about the error, dump the glock, then fall
  568. * through and wait for logd to do the withdraw for us.
  569. */
  570. if ((atomic_read(&gl->gl_ail_count) != 0) &&
  571. (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
  572. gfs2_glock_assert_warn(gl,
  573. !atomic_read(&gl->gl_ail_count));
  574. gfs2_dump_glock(NULL, gl, true);
  575. }
  576. glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
  577. clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  578. }
  579. skip_inval:
  580. gfs2_glock_hold(gl);
  581. /*
  582. * Check for an error encountered since we called go_sync and go_inval.
  583. * If so, we can't withdraw from the glock code because the withdraw
  584. * code itself uses glocks (see function signal_our_withdraw) to
  585. * change the mount to read-only. Most importantly, we must not call
  586. * dlm to unlock the glock until the journal is in a known good state
  587. * (after journal replay) otherwise other nodes may use the object
  588. * (rgrp or dinode) and then later, journal replay will corrupt the
  589. * file system. The best we can do here is wait for the logd daemon
  590. * to see sd_log_error and withdraw, and in the meantime, requeue the
  591. * work for later.
  592. *
  593. * We make a special exception for some system glocks, such as the
  594. * system statfs inode glock, which needs to be granted before the
  595. * gfs2_quotad daemon can exit, and that exit needs to finish before
  596. * we can unmount the withdrawn file system.
  597. *
  598. * However, if we're just unlocking the lock (say, for unmount, when
  599. * gfs2_gl_hash_clear calls clear_glock) and recovery is complete
  600. * then it's okay to tell dlm to unlock it.
  601. */
  602. if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
  603. gfs2_withdraw_delayed(sdp);
  604. if (glock_blocked_by_withdraw(gl) &&
  605. (target != LM_ST_UNLOCKED ||
  606. test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) {
  607. if (!is_system_glock(gl)) {
  608. gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
  609. goto out;
  610. } else {
  611. clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
  612. }
  613. }
  614. if (sdp->sd_lockstruct.ls_ops->lm_lock) {
  615. /* lock_dlm */
  616. ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
  617. if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
  618. target == LM_ST_UNLOCKED &&
  619. test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
  620. finish_xmote(gl, target);
  621. gfs2_glock_queue_work(gl, 0);
  622. } else if (ret) {
  623. fs_err(sdp, "lm_lock ret %d\n", ret);
  624. GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
  625. }
  626. } else { /* lock_nolock */
  627. finish_xmote(gl, target);
  628. gfs2_glock_queue_work(gl, 0);
  629. }
  630. out:
  631. spin_lock(&gl->gl_lockref.lock);
  632. }
  633. /**
  634. * find_first_holder - find the first "holder" gh
  635. * @gl: the glock
  636. */
  637. static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
  638. {
  639. struct gfs2_holder *gh;
  640. if (!list_empty(&gl->gl_holders)) {
  641. gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
  642. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  643. return gh;
  644. }
  645. return NULL;
  646. }
  647. /**
  648. * run_queue - do all outstanding tasks related to a glock
  649. * @gl: The glock in question
  650. * @nonblock: True if we must not block in run_queue
  651. *
  652. */
  653. static void run_queue(struct gfs2_glock *gl, const int nonblock)
  654. __releases(&gl->gl_lockref.lock)
  655. __acquires(&gl->gl_lockref.lock)
  656. {
  657. struct gfs2_holder *gh = NULL;
  658. int ret;
  659. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
  660. return;
  661. GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
  662. if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
  663. gl->gl_demote_state != gl->gl_state) {
  664. if (find_first_holder(gl))
  665. goto out_unlock;
  666. if (nonblock)
  667. goto out_sched;
  668. set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
  669. GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
  670. gl->gl_target = gl->gl_demote_state;
  671. } else {
  672. if (test_bit(GLF_DEMOTE, &gl->gl_flags))
  673. gfs2_demote_wake(gl);
  674. ret = do_promote(gl);
  675. if (ret == 0)
  676. goto out_unlock;
  677. if (ret == 2)
  678. goto out;
  679. gh = find_first_waiter(gl);
  680. gl->gl_target = gh->gh_state;
  681. if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
  682. do_error(gl, 0); /* Fail queued try locks */
  683. }
  684. do_xmote(gl, gh, gl->gl_target);
  685. out:
  686. return;
  687. out_sched:
  688. clear_bit(GLF_LOCK, &gl->gl_flags);
  689. smp_mb__after_atomic();
  690. gl->gl_lockref.count++;
  691. __gfs2_glock_queue_work(gl, 0);
  692. return;
  693. out_unlock:
  694. clear_bit(GLF_LOCK, &gl->gl_flags);
  695. smp_mb__after_atomic();
  696. return;
  697. }
  698. void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
  699. {
  700. struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
  701. if (ri->ri_magic == 0)
  702. ri->ri_magic = cpu_to_be32(GFS2_MAGIC);
  703. if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC))
  704. ri->ri_generation_deleted = cpu_to_be64(generation);
  705. }
  706. bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
  707. {
  708. struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
  709. if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC))
  710. return false;
  711. return generation <= be64_to_cpu(ri->ri_generation_deleted);
  712. }
  713. static void gfs2_glock_poke(struct gfs2_glock *gl)
  714. {
  715. int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP;
  716. struct gfs2_holder gh;
  717. int error;
  718. gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh);
  719. error = gfs2_glock_nq(&gh);
  720. if (!error)
  721. gfs2_glock_dq(&gh);
  722. gfs2_holder_uninit(&gh);
  723. }
  724. static bool gfs2_try_evict(struct gfs2_glock *gl)
  725. {
  726. struct gfs2_inode *ip;
  727. bool evicted = false;
  728. /*
  729. * If there is contention on the iopen glock and we have an inode, try
  730. * to grab and release the inode so that it can be evicted. This will
  731. * allow the remote node to go ahead and delete the inode without us
  732. * having to do it, which will avoid rgrp glock thrashing.
  733. *
  734. * The remote node is likely still holding the corresponding inode
  735. * glock, so it will run before we get to verify that the delete has
  736. * happened below.
  737. */
  738. spin_lock(&gl->gl_lockref.lock);
  739. ip = gl->gl_object;
  740. if (ip && !igrab(&ip->i_inode))
  741. ip = NULL;
  742. spin_unlock(&gl->gl_lockref.lock);
  743. if (ip) {
  744. struct gfs2_glock *inode_gl = NULL;
  745. gl->gl_no_formal_ino = ip->i_no_formal_ino;
  746. set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
  747. d_prune_aliases(&ip->i_inode);
  748. iput(&ip->i_inode);
  749. /* If the inode was evicted, gl->gl_object will now be NULL. */
  750. spin_lock(&gl->gl_lockref.lock);
  751. ip = gl->gl_object;
  752. if (ip) {
  753. inode_gl = ip->i_gl;
  754. lockref_get(&inode_gl->gl_lockref);
  755. clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
  756. }
  757. spin_unlock(&gl->gl_lockref.lock);
  758. if (inode_gl) {
  759. gfs2_glock_poke(inode_gl);
  760. gfs2_glock_put(inode_gl);
  761. }
  762. evicted = !ip;
  763. }
  764. return evicted;
  765. }
  766. static void delete_work_func(struct work_struct *work)
  767. {
  768. struct delayed_work *dwork = to_delayed_work(work);
  769. struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
  770. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  771. struct inode *inode;
  772. u64 no_addr = gl->gl_name.ln_number;
  773. spin_lock(&gl->gl_lockref.lock);
  774. clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
  775. spin_unlock(&gl->gl_lockref.lock);
  776. /* If someone's using this glock to create a new dinode, the block must
  777. have been freed by another node, then re-used, in which case our
  778. iopen callback is too late after the fact. Ignore it. */
  779. if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
  780. goto out;
  781. if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
  782. /*
  783. * If we can evict the inode, give the remote node trying to
  784. * delete the inode some time before verifying that the delete
  785. * has happened. Otherwise, if we cause contention on the inode glock
  786. * immediately, the remote node will think that we still have
  787. * the inode in use, and so it will give up waiting.
  788. *
  789. * If we can't evict the inode, signal to the remote node that
  790. * the inode is still in use. We'll later try to delete the
  791. * inode locally in gfs2_evict_inode.
  792. *
  793. * FIXME: We only need to verify that the remote node has
  794. * deleted the inode because nodes before this remote delete
  795. * rework won't cooperate. At a later time, when we no longer
  796. * care about compatibility with such nodes, we can skip this
  797. * step entirely.
  798. */
  799. if (gfs2_try_evict(gl)) {
  800. if (gfs2_queue_delete_work(gl, 5 * HZ))
  801. return;
  802. }
  803. goto out;
  804. }
  805. inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
  806. GFS2_BLKST_UNLINKED);
  807. if (!IS_ERR_OR_NULL(inode)) {
  808. d_prune_aliases(inode);
  809. iput(inode);
  810. }
  811. out:
  812. gfs2_glock_put(gl);
  813. }
  814. static void glock_work_func(struct work_struct *work)
  815. {
  816. unsigned long delay = 0;
  817. struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
  818. unsigned int drop_refs = 1;
  819. if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
  820. finish_xmote(gl, gl->gl_reply);
  821. drop_refs++;
  822. }
  823. spin_lock(&gl->gl_lockref.lock);
  824. if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  825. gl->gl_state != LM_ST_UNLOCKED &&
  826. gl->gl_demote_state != LM_ST_EXCLUSIVE) {
  827. unsigned long holdtime, now = jiffies;
  828. holdtime = gl->gl_tchange + gl->gl_hold_time;
  829. if (time_before(now, holdtime))
  830. delay = holdtime - now;
  831. if (!delay) {
  832. clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
  833. gfs2_set_demote(gl);
  834. }
  835. }
  836. run_queue(gl, 0);
  837. if (delay) {
  838. /* Keep one glock reference for the work we requeue. */
  839. drop_refs--;
  840. if (gl->gl_name.ln_type != LM_TYPE_INODE)
  841. delay = 0;
  842. __gfs2_glock_queue_work(gl, delay);
  843. }
  844. /*
  845. * Drop the remaining glock references manually here. (Mind that
  846. * __gfs2_glock_queue_work depends on the lockref spinlock begin held
  847. * here as well.)
  848. */
  849. gl->gl_lockref.count -= drop_refs;
  850. if (!gl->gl_lockref.count) {
  851. __gfs2_glock_put(gl);
  852. return;
  853. }
  854. spin_unlock(&gl->gl_lockref.lock);
  855. }
  856. static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
  857. struct gfs2_glock *new)
  858. {
  859. struct wait_glock_queue wait;
  860. wait_queue_head_t *wq = glock_waitqueue(name);
  861. struct gfs2_glock *gl;
  862. wait.name = name;
  863. init_wait(&wait.wait);
  864. wait.wait.func = glock_wake_function;
  865. again:
  866. prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
  867. rcu_read_lock();
  868. if (new) {
  869. gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
  870. &new->gl_node, ht_parms);
  871. if (IS_ERR(gl))
  872. goto out;
  873. } else {
  874. gl = rhashtable_lookup_fast(&gl_hash_table,
  875. name, ht_parms);
  876. }
  877. if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
  878. rcu_read_unlock();
  879. schedule();
  880. goto again;
  881. }
  882. out:
  883. rcu_read_unlock();
  884. finish_wait(wq, &wait.wait);
  885. return gl;
  886. }
  887. /**
  888. * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
  889. * @sdp: The GFS2 superblock
  890. * @number: the lock number
  891. * @glops: The glock_operations to use
  892. * @create: If 0, don't create the glock if it doesn't exist
  893. * @glp: the glock is returned here
  894. *
  895. * This does not lock a glock, just finds/creates structures for one.
  896. *
  897. * Returns: errno
  898. */
  899. int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
  900. const struct gfs2_glock_operations *glops, int create,
  901. struct gfs2_glock **glp)
  902. {
  903. struct super_block *s = sdp->sd_vfs;
  904. struct lm_lockname name = { .ln_number = number,
  905. .ln_type = glops->go_type,
  906. .ln_sbd = sdp };
  907. struct gfs2_glock *gl, *tmp;
  908. struct address_space *mapping;
  909. struct kmem_cache *cachep;
  910. int ret = 0;
  911. gl = find_insert_glock(&name, NULL);
  912. if (gl) {
  913. *glp = gl;
  914. return 0;
  915. }
  916. if (!create)
  917. return -ENOENT;
  918. if (glops->go_flags & GLOF_ASPACE)
  919. cachep = gfs2_glock_aspace_cachep;
  920. else
  921. cachep = gfs2_glock_cachep;
  922. gl = kmem_cache_alloc(cachep, GFP_NOFS);
  923. if (!gl)
  924. return -ENOMEM;
  925. memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
  926. if (glops->go_flags & GLOF_LVB) {
  927. gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
  928. if (!gl->gl_lksb.sb_lvbptr) {
  929. kmem_cache_free(cachep, gl);
  930. return -ENOMEM;
  931. }
  932. }
  933. atomic_inc(&sdp->sd_glock_disposal);
  934. gl->gl_node.next = NULL;
  935. gl->gl_flags = 0;
  936. gl->gl_name = name;
  937. lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
  938. gl->gl_lockref.count = 1;
  939. gl->gl_state = LM_ST_UNLOCKED;
  940. gl->gl_target = LM_ST_UNLOCKED;
  941. gl->gl_demote_state = LM_ST_EXCLUSIVE;
  942. gl->gl_ops = glops;
  943. gl->gl_dstamp = 0;
  944. preempt_disable();
  945. /* We use the global stats to estimate the initial per-glock stats */
  946. gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
  947. preempt_enable();
  948. gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
  949. gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
  950. gl->gl_tchange = jiffies;
  951. gl->gl_object = NULL;
  952. gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
  953. INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
  954. if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
  955. INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
  956. mapping = gfs2_glock2aspace(gl);
  957. if (mapping) {
  958. mapping->a_ops = &gfs2_meta_aops;
  959. mapping->host = s->s_bdev->bd_inode;
  960. mapping->flags = 0;
  961. mapping_set_gfp_mask(mapping, GFP_NOFS);
  962. mapping->private_data = NULL;
  963. mapping->writeback_index = 0;
  964. }
  965. tmp = find_insert_glock(&name, gl);
  966. if (!tmp) {
  967. *glp = gl;
  968. goto out;
  969. }
  970. if (IS_ERR(tmp)) {
  971. ret = PTR_ERR(tmp);
  972. goto out_free;
  973. }
  974. *glp = tmp;
  975. out_free:
  976. kfree(gl->gl_lksb.sb_lvbptr);
  977. kmem_cache_free(cachep, gl);
  978. if (atomic_dec_and_test(&sdp->sd_glock_disposal))
  979. wake_up(&sdp->sd_glock_wait);
  980. out:
  981. return ret;
  982. }
  983. /**
  984. * gfs2_holder_init - initialize a struct gfs2_holder in the default way
  985. * @gl: the glock
  986. * @state: the state we're requesting
  987. * @flags: the modifier flags
  988. * @gh: the holder structure
  989. *
  990. */
  991. void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
  992. struct gfs2_holder *gh)
  993. {
  994. INIT_LIST_HEAD(&gh->gh_list);
  995. gh->gh_gl = gl;
  996. gh->gh_ip = _RET_IP_;
  997. gh->gh_owner_pid = get_pid(task_pid(current));
  998. gh->gh_state = state;
  999. gh->gh_flags = flags;
  1000. gh->gh_error = 0;
  1001. gh->gh_iflags = 0;
  1002. gfs2_glock_hold(gl);
  1003. }
  1004. /**
  1005. * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
  1006. * @state: the state we're requesting
  1007. * @flags: the modifier flags
  1008. * @gh: the holder structure
  1009. *
  1010. * Don't mess with the glock.
  1011. *
  1012. */
  1013. void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
  1014. {
  1015. gh->gh_state = state;
  1016. gh->gh_flags = flags;
  1017. gh->gh_iflags = 0;
  1018. gh->gh_ip = _RET_IP_;
  1019. put_pid(gh->gh_owner_pid);
  1020. gh->gh_owner_pid = get_pid(task_pid(current));
  1021. }
  1022. /**
  1023. * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
  1024. * @gh: the holder structure
  1025. *
  1026. */
  1027. void gfs2_holder_uninit(struct gfs2_holder *gh)
  1028. {
  1029. put_pid(gh->gh_owner_pid);
  1030. gfs2_glock_put(gh->gh_gl);
  1031. gfs2_holder_mark_uninitialized(gh);
  1032. gh->gh_ip = 0;
  1033. }
  1034. static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
  1035. unsigned long start_time)
  1036. {
  1037. /* Have we waited longer that a second? */
  1038. if (time_after(jiffies, start_time + HZ)) {
  1039. /* Lengthen the minimum hold time. */
  1040. gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR,
  1041. GL_GLOCK_MAX_HOLD);
  1042. }
  1043. }
  1044. /**
  1045. * gfs2_glock_wait - wait on a glock acquisition
  1046. * @gh: the glock holder
  1047. *
  1048. * Returns: 0 on success
  1049. */
  1050. int gfs2_glock_wait(struct gfs2_holder *gh)
  1051. {
  1052. unsigned long start_time = jiffies;
  1053. might_sleep();
  1054. wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
  1055. gfs2_glock_update_hold_time(gh->gh_gl, start_time);
  1056. return gh->gh_error;
  1057. }
  1058. static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
  1059. {
  1060. int i;
  1061. for (i = 0; i < num_gh; i++)
  1062. if (test_bit(HIF_WAIT, &ghs[i].gh_iflags))
  1063. return 1;
  1064. return 0;
  1065. }
  1066. /**
  1067. * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
  1068. * @num_gh: the number of holders in the array
  1069. * @ghs: the glock holder array
  1070. *
  1071. * Returns: 0 on success, meaning all glocks have been granted and are held.
  1072. * -ESTALE if the request timed out, meaning all glocks were released,
  1073. * and the caller should retry the operation.
  1074. */
  1075. int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
  1076. {
  1077. struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
  1078. int i, ret = 0, timeout = 0;
  1079. unsigned long start_time = jiffies;
  1080. bool keep_waiting;
  1081. might_sleep();
  1082. /*
  1083. * Total up the (minimum hold time * 2) of all glocks and use that to
  1084. * determine the max amount of time we should wait.
  1085. */
  1086. for (i = 0; i < num_gh; i++)
  1087. timeout += ghs[i].gh_gl->gl_hold_time << 1;
  1088. wait_for_dlm:
  1089. if (!wait_event_timeout(sdp->sd_async_glock_wait,
  1090. !glocks_pending(num_gh, ghs), timeout))
  1091. ret = -ESTALE; /* request timed out. */
  1092. /*
  1093. * If dlm granted all our requests, we need to adjust the glock
  1094. * minimum hold time values according to how long we waited.
  1095. *
  1096. * If our request timed out, we need to repeatedly release any held
  1097. * glocks we acquired thus far to allow dlm to acquire the remaining
  1098. * glocks without deadlocking. We cannot currently cancel outstanding
  1099. * glock acquisitions.
  1100. *
  1101. * The HIF_WAIT bit tells us which requests still need a response from
  1102. * dlm.
  1103. *
  1104. * If dlm sent us any errors, we return the first error we find.
  1105. */
  1106. keep_waiting = false;
  1107. for (i = 0; i < num_gh; i++) {
  1108. /* Skip holders we have already dequeued below. */
  1109. if (!gfs2_holder_queued(&ghs[i]))
  1110. continue;
  1111. /* Skip holders with a pending DLM response. */
  1112. if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
  1113. keep_waiting = true;
  1114. continue;
  1115. }
  1116. if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) {
  1117. if (ret == -ESTALE)
  1118. gfs2_glock_dq(&ghs[i]);
  1119. else
  1120. gfs2_glock_update_hold_time(ghs[i].gh_gl,
  1121. start_time);
  1122. }
  1123. if (!ret)
  1124. ret = ghs[i].gh_error;
  1125. }
  1126. if (keep_waiting)
  1127. goto wait_for_dlm;
  1128. /*
  1129. * At this point, we've either acquired all locks or released them all.
  1130. */
  1131. return ret;
  1132. }
  1133. /**
  1134. * handle_callback - process a demote request
  1135. * @gl: the glock
  1136. * @state: the state the caller wants us to change to
  1137. *
  1138. * There are only two requests that we are going to see in actual
  1139. * practise: LM_ST_SHARED and LM_ST_UNLOCKED
  1140. */
  1141. static void handle_callback(struct gfs2_glock *gl, unsigned int state,
  1142. unsigned long delay, bool remote)
  1143. {
  1144. if (delay)
  1145. set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
  1146. else
  1147. gfs2_set_demote(gl);
  1148. if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
  1149. gl->gl_demote_state = state;
  1150. gl->gl_demote_time = jiffies;
  1151. } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
  1152. gl->gl_demote_state != state) {
  1153. gl->gl_demote_state = LM_ST_UNLOCKED;
  1154. }
  1155. if (gl->gl_ops->go_callback)
  1156. gl->gl_ops->go_callback(gl, remote);
  1157. trace_gfs2_demote_rq(gl, remote);
  1158. }
  1159. void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
  1160. {
  1161. struct va_format vaf;
  1162. va_list args;
  1163. va_start(args, fmt);
  1164. if (seq) {
  1165. seq_vprintf(seq, fmt, args);
  1166. } else {
  1167. vaf.fmt = fmt;
  1168. vaf.va = &args;
  1169. pr_err("%pV", &vaf);
  1170. }
  1171. va_end(args);
  1172. }
  1173. /**
  1174. * add_to_queue - Add a holder to the wait queue (but look for recursion)
  1175. * @gh: the holder structure to add
  1176. *
  1177. * Eventually we should move the recursive locking trap to a
  1178. * debugging option or something like that. This is the fast
  1179. * path and needs to have the minimum number of distractions.
  1180. *
  1181. */
  1182. static inline void add_to_queue(struct gfs2_holder *gh)
  1183. __releases(&gl->gl_lockref.lock)
  1184. __acquires(&gl->gl_lockref.lock)
  1185. {
  1186. struct gfs2_glock *gl = gh->gh_gl;
  1187. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  1188. struct list_head *insert_pt = NULL;
  1189. struct gfs2_holder *gh2;
  1190. int try_futile = 0;
  1191. GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
  1192. if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
  1193. GLOCK_BUG_ON(gl, true);
  1194. if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
  1195. if (test_bit(GLF_LOCK, &gl->gl_flags))
  1196. try_futile = !may_grant(gl, gh);
  1197. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
  1198. goto fail;
  1199. }
  1200. list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
  1201. if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
  1202. (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
  1203. goto trap_recursive;
  1204. if (try_futile &&
  1205. !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
  1206. fail:
  1207. gh->gh_error = GLR_TRYFAILED;
  1208. gfs2_holder_wake(gh);
  1209. return;
  1210. }
  1211. if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
  1212. continue;
  1213. if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
  1214. insert_pt = &gh2->gh_list;
  1215. }
  1216. trace_gfs2_glock_queue(gh, 1);
  1217. gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
  1218. gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
  1219. if (likely(insert_pt == NULL)) {
  1220. list_add_tail(&gh->gh_list, &gl->gl_holders);
  1221. if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
  1222. goto do_cancel;
  1223. return;
  1224. }
  1225. list_add_tail(&gh->gh_list, insert_pt);
  1226. do_cancel:
  1227. gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
  1228. if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
  1229. spin_unlock(&gl->gl_lockref.lock);
  1230. if (sdp->sd_lockstruct.ls_ops->lm_cancel)
  1231. sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
  1232. spin_lock(&gl->gl_lockref.lock);
  1233. }
  1234. return;
  1235. trap_recursive:
  1236. fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
  1237. fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
  1238. fs_err(sdp, "lock type: %d req lock state : %d\n",
  1239. gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
  1240. fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
  1241. fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
  1242. fs_err(sdp, "lock type: %d req lock state : %d\n",
  1243. gh->gh_gl->gl_name.ln_type, gh->gh_state);
  1244. gfs2_dump_glock(NULL, gl, true);
  1245. BUG();
  1246. }
  1247. /**
  1248. * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
  1249. * @gh: the holder structure
  1250. *
  1251. * if (gh->gh_flags & GL_ASYNC), this never returns an error
  1252. *
  1253. * Returns: 0, GLR_TRYFAILED, or errno on failure
  1254. */
  1255. int gfs2_glock_nq(struct gfs2_holder *gh)
  1256. {
  1257. struct gfs2_glock *gl = gh->gh_gl;
  1258. int error = 0;
  1259. if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
  1260. return -EIO;
  1261. if (test_bit(GLF_LRU, &gl->gl_flags))
  1262. gfs2_glock_remove_from_lru(gl);
  1263. spin_lock(&gl->gl_lockref.lock);
  1264. add_to_queue(gh);
  1265. if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
  1266. test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
  1267. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1268. gl->gl_lockref.count++;
  1269. __gfs2_glock_queue_work(gl, 0);
  1270. }
  1271. run_queue(gl, 1);
  1272. spin_unlock(&gl->gl_lockref.lock);
  1273. if (!(gh->gh_flags & GL_ASYNC))
  1274. error = gfs2_glock_wait(gh);
  1275. return error;
  1276. }
  1277. /**
  1278. * gfs2_glock_poll - poll to see if an async request has been completed
  1279. * @gh: the holder
  1280. *
  1281. * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
  1282. */
  1283. int gfs2_glock_poll(struct gfs2_holder *gh)
  1284. {
  1285. return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
  1286. }
  1287. /**
  1288. * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
  1289. * @gh: the glock holder
  1290. *
  1291. */
  1292. void gfs2_glock_dq(struct gfs2_holder *gh)
  1293. {
  1294. struct gfs2_glock *gl = gh->gh_gl;
  1295. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  1296. unsigned delay = 0;
  1297. int fast_path = 0;
  1298. spin_lock(&gl->gl_lockref.lock);
  1299. /*
  1300. * If we're in the process of file system withdraw, we cannot just
  1301. * dequeue any glocks until our journal is recovered, lest we
  1302. * introduce file system corruption. We need two exceptions to this
  1303. * rule: We need to allow unlocking of nondisk glocks and the glock
  1304. * for our own journal that needs recovery.
  1305. */
  1306. if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
  1307. glock_blocked_by_withdraw(gl) &&
  1308. gh->gh_gl != sdp->sd_jinode_gl) {
  1309. sdp->sd_glock_dqs_held++;
  1310. spin_unlock(&gl->gl_lockref.lock);
  1311. might_sleep();
  1312. wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
  1313. TASK_UNINTERRUPTIBLE);
  1314. spin_lock(&gl->gl_lockref.lock);
  1315. }
  1316. if (gh->gh_flags & GL_NOCACHE)
  1317. handle_callback(gl, LM_ST_UNLOCKED, 0, false);
  1318. list_del_init(&gh->gh_list);
  1319. clear_bit(HIF_HOLDER, &gh->gh_iflags);
  1320. if (find_first_holder(gl) == NULL) {
  1321. if (list_empty(&gl->gl_holders) &&
  1322. !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  1323. !test_bit(GLF_DEMOTE, &gl->gl_flags))
  1324. fast_path = 1;
  1325. }
  1326. if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
  1327. gfs2_glock_add_to_lru(gl);
  1328. trace_gfs2_glock_queue(gh, 0);
  1329. if (unlikely(!fast_path)) {
  1330. gl->gl_lockref.count++;
  1331. if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
  1332. !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
  1333. gl->gl_name.ln_type == LM_TYPE_INODE)
  1334. delay = gl->gl_hold_time;
  1335. __gfs2_glock_queue_work(gl, delay);
  1336. }
  1337. spin_unlock(&gl->gl_lockref.lock);
  1338. }
  1339. void gfs2_glock_dq_wait(struct gfs2_holder *gh)
  1340. {
  1341. struct gfs2_glock *gl = gh->gh_gl;
  1342. gfs2_glock_dq(gh);
  1343. might_sleep();
  1344. wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
  1345. }
  1346. /**
  1347. * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
  1348. * @gh: the holder structure
  1349. *
  1350. */
  1351. void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
  1352. {
  1353. gfs2_glock_dq(gh);
  1354. gfs2_holder_uninit(gh);
  1355. }
  1356. /**
  1357. * gfs2_glock_nq_num - acquire a glock based on lock number
  1358. * @sdp: the filesystem
  1359. * @number: the lock number
  1360. * @glops: the glock operations for the type of glock
  1361. * @state: the state to acquire the glock in
  1362. * @flags: modifier flags for the acquisition
  1363. * @gh: the struct gfs2_holder
  1364. *
  1365. * Returns: errno
  1366. */
  1367. int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
  1368. const struct gfs2_glock_operations *glops,
  1369. unsigned int state, u16 flags, struct gfs2_holder *gh)
  1370. {
  1371. struct gfs2_glock *gl;
  1372. int error;
  1373. error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
  1374. if (!error) {
  1375. error = gfs2_glock_nq_init(gl, state, flags, gh);
  1376. gfs2_glock_put(gl);
  1377. }
  1378. return error;
  1379. }
  1380. /**
  1381. * glock_compare - Compare two struct gfs2_glock structures for sorting
  1382. * @arg_a: the first structure
  1383. * @arg_b: the second structure
  1384. *
  1385. */
  1386. static int glock_compare(const void *arg_a, const void *arg_b)
  1387. {
  1388. const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
  1389. const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
  1390. const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
  1391. const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
  1392. if (a->ln_number > b->ln_number)
  1393. return 1;
  1394. if (a->ln_number < b->ln_number)
  1395. return -1;
  1396. BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
  1397. return 0;
  1398. }
  1399. /**
  1400. * nq_m_sync - synchonously acquire more than one glock in deadlock free order
  1401. * @num_gh: the number of structures
  1402. * @ghs: an array of struct gfs2_holder structures
  1403. *
  1404. * Returns: 0 on success (all glocks acquired),
  1405. * errno on failure (no glocks acquired)
  1406. */
  1407. static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
  1408. struct gfs2_holder **p)
  1409. {
  1410. unsigned int x;
  1411. int error = 0;
  1412. for (x = 0; x < num_gh; x++)
  1413. p[x] = &ghs[x];
  1414. sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
  1415. for (x = 0; x < num_gh; x++) {
  1416. p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1417. error = gfs2_glock_nq(p[x]);
  1418. if (error) {
  1419. while (x--)
  1420. gfs2_glock_dq(p[x]);
  1421. break;
  1422. }
  1423. }
  1424. return error;
  1425. }
  1426. /**
  1427. * gfs2_glock_nq_m - acquire multiple glocks
  1428. * @num_gh: the number of structures
  1429. * @ghs: an array of struct gfs2_holder structures
  1430. *
  1431. *
  1432. * Returns: 0 on success (all glocks acquired),
  1433. * errno on failure (no glocks acquired)
  1434. */
  1435. int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1436. {
  1437. struct gfs2_holder *tmp[4];
  1438. struct gfs2_holder **pph = tmp;
  1439. int error = 0;
  1440. switch(num_gh) {
  1441. case 0:
  1442. return 0;
  1443. case 1:
  1444. ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
  1445. return gfs2_glock_nq(ghs);
  1446. default:
  1447. if (num_gh <= 4)
  1448. break;
  1449. pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
  1450. GFP_NOFS);
  1451. if (!pph)
  1452. return -ENOMEM;
  1453. }
  1454. error = nq_m_sync(num_gh, ghs, pph);
  1455. if (pph != tmp)
  1456. kfree(pph);
  1457. return error;
  1458. }
  1459. /**
  1460. * gfs2_glock_dq_m - release multiple glocks
  1461. * @num_gh: the number of structures
  1462. * @ghs: an array of struct gfs2_holder structures
  1463. *
  1464. */
  1465. void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
  1466. {
  1467. while (num_gh--)
  1468. gfs2_glock_dq(&ghs[num_gh]);
  1469. }
  1470. void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
  1471. {
  1472. unsigned long delay = 0;
  1473. unsigned long holdtime;
  1474. unsigned long now = jiffies;
  1475. gfs2_glock_hold(gl);
  1476. spin_lock(&gl->gl_lockref.lock);
  1477. holdtime = gl->gl_tchange + gl->gl_hold_time;
  1478. if (!list_empty(&gl->gl_holders) &&
  1479. gl->gl_name.ln_type == LM_TYPE_INODE) {
  1480. if (time_before(now, holdtime))
  1481. delay = holdtime - now;
  1482. if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
  1483. delay = gl->gl_hold_time;
  1484. }
  1485. handle_callback(gl, state, delay, true);
  1486. __gfs2_glock_queue_work(gl, delay);
  1487. spin_unlock(&gl->gl_lockref.lock);
  1488. }
  1489. /**
  1490. * gfs2_should_freeze - Figure out if glock should be frozen
  1491. * @gl: The glock in question
  1492. *
  1493. * Glocks are not frozen if (a) the result of the dlm operation is
  1494. * an error, (b) the locking operation was an unlock operation or
  1495. * (c) if there is a "noexp" flagged request anywhere in the queue
  1496. *
  1497. * Returns: 1 if freezing should occur, 0 otherwise
  1498. */
  1499. static int gfs2_should_freeze(const struct gfs2_glock *gl)
  1500. {
  1501. const struct gfs2_holder *gh;
  1502. if (gl->gl_reply & ~LM_OUT_ST_MASK)
  1503. return 0;
  1504. if (gl->gl_target == LM_ST_UNLOCKED)
  1505. return 0;
  1506. list_for_each_entry(gh, &gl->gl_holders, gh_list) {
  1507. if (test_bit(HIF_HOLDER, &gh->gh_iflags))
  1508. continue;
  1509. if (LM_FLAG_NOEXP & gh->gh_flags)
  1510. return 0;
  1511. }
  1512. return 1;
  1513. }
  1514. /**
  1515. * gfs2_glock_complete - Callback used by locking
  1516. * @gl: Pointer to the glock
  1517. * @ret: The return value from the dlm
  1518. *
  1519. * The gl_reply field is under the gl_lockref.lock lock so that it is ok
  1520. * to use a bitfield shared with other glock state fields.
  1521. */
  1522. void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
  1523. {
  1524. struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
  1525. spin_lock(&gl->gl_lockref.lock);
  1526. gl->gl_reply = ret;
  1527. if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
  1528. if (gfs2_should_freeze(gl)) {
  1529. set_bit(GLF_FROZEN, &gl->gl_flags);
  1530. spin_unlock(&gl->gl_lockref.lock);
  1531. return;
  1532. }
  1533. }
  1534. gl->gl_lockref.count++;
  1535. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1536. __gfs2_glock_queue_work(gl, 0);
  1537. spin_unlock(&gl->gl_lockref.lock);
  1538. }
  1539. static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
  1540. {
  1541. struct gfs2_glock *gla, *glb;
  1542. gla = list_entry(a, struct gfs2_glock, gl_lru);
  1543. glb = list_entry(b, struct gfs2_glock, gl_lru);
  1544. if (gla->gl_name.ln_number > glb->gl_name.ln_number)
  1545. return 1;
  1546. if (gla->gl_name.ln_number < glb->gl_name.ln_number)
  1547. return -1;
  1548. return 0;
  1549. }
  1550. /**
  1551. * gfs2_dispose_glock_lru - Demote a list of glocks
  1552. * @list: The list to dispose of
  1553. *
  1554. * Disposing of glocks may involve disk accesses, so that here we sort
  1555. * the glocks by number (i.e. disk location of the inodes) so that if
  1556. * there are any such accesses, they'll be sent in order (mostly).
  1557. *
  1558. * Must be called under the lru_lock, but may drop and retake this
  1559. * lock. While the lru_lock is dropped, entries may vanish from the
  1560. * list, but no new entries will appear on the list (since it is
  1561. * private)
  1562. */
  1563. static void gfs2_dispose_glock_lru(struct list_head *list)
  1564. __releases(&lru_lock)
  1565. __acquires(&lru_lock)
  1566. {
  1567. struct gfs2_glock *gl;
  1568. list_sort(NULL, list, glock_cmp);
  1569. while(!list_empty(list)) {
  1570. gl = list_first_entry(list, struct gfs2_glock, gl_lru);
  1571. list_del_init(&gl->gl_lru);
  1572. clear_bit(GLF_LRU, &gl->gl_flags);
  1573. if (!spin_trylock(&gl->gl_lockref.lock)) {
  1574. add_back_to_lru:
  1575. list_add(&gl->gl_lru, &lru_list);
  1576. set_bit(GLF_LRU, &gl->gl_flags);
  1577. atomic_inc(&lru_count);
  1578. continue;
  1579. }
  1580. if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
  1581. spin_unlock(&gl->gl_lockref.lock);
  1582. goto add_back_to_lru;
  1583. }
  1584. gl->gl_lockref.count++;
  1585. if (demote_ok(gl))
  1586. handle_callback(gl, LM_ST_UNLOCKED, 0, false);
  1587. WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
  1588. __gfs2_glock_queue_work(gl, 0);
  1589. spin_unlock(&gl->gl_lockref.lock);
  1590. cond_resched_lock(&lru_lock);
  1591. }
  1592. }
  1593. /**
  1594. * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
  1595. * @nr: The number of entries to scan
  1596. *
  1597. * This function selects the entries on the LRU which are able to
  1598. * be demoted, and then kicks off the process by calling
  1599. * gfs2_dispose_glock_lru() above.
  1600. */
  1601. static long gfs2_scan_glock_lru(int nr)
  1602. {
  1603. struct gfs2_glock *gl;
  1604. LIST_HEAD(skipped);
  1605. LIST_HEAD(dispose);
  1606. long freed = 0;
  1607. spin_lock(&lru_lock);
  1608. while ((nr-- >= 0) && !list_empty(&lru_list)) {
  1609. gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru);
  1610. /* Test for being demotable */
  1611. if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
  1612. list_move(&gl->gl_lru, &dispose);
  1613. atomic_dec(&lru_count);
  1614. freed++;
  1615. continue;
  1616. }
  1617. list_move(&gl->gl_lru, &skipped);
  1618. }
  1619. list_splice(&skipped, &lru_list);
  1620. if (!list_empty(&dispose))
  1621. gfs2_dispose_glock_lru(&dispose);
  1622. spin_unlock(&lru_lock);
  1623. return freed;
  1624. }
  1625. static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
  1626. struct shrink_control *sc)
  1627. {
  1628. if (!(sc->gfp_mask & __GFP_FS))
  1629. return SHRINK_STOP;
  1630. return gfs2_scan_glock_lru(sc->nr_to_scan);
  1631. }
  1632. static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
  1633. struct shrink_control *sc)
  1634. {
  1635. return vfs_pressure_ratio(atomic_read(&lru_count));
  1636. }
  1637. static struct shrinker glock_shrinker = {
  1638. .seeks = DEFAULT_SEEKS,
  1639. .count_objects = gfs2_glock_shrink_count,
  1640. .scan_objects = gfs2_glock_shrink_scan,
  1641. };
  1642. /**
  1643. * glock_hash_walk - Call a function for glock in a hash bucket
  1644. * @examiner: the function
  1645. * @sdp: the filesystem
  1646. *
  1647. * Note that the function can be called multiple times on the same
  1648. * object. So the user must ensure that the function can cope with
  1649. * that.
  1650. */
  1651. static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
  1652. {
  1653. struct gfs2_glock *gl;
  1654. struct rhashtable_iter iter;
  1655. rhashtable_walk_enter(&gl_hash_table, &iter);
  1656. do {
  1657. rhashtable_walk_start(&iter);
  1658. while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
  1659. if (gl->gl_name.ln_sbd == sdp)
  1660. examiner(gl);
  1661. }
  1662. rhashtable_walk_stop(&iter);
  1663. } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
  1664. rhashtable_walk_exit(&iter);
  1665. }
  1666. bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
  1667. {
  1668. bool queued;
  1669. spin_lock(&gl->gl_lockref.lock);
  1670. queued = queue_delayed_work(gfs2_delete_workqueue,
  1671. &gl->gl_delete, delay);
  1672. if (queued)
  1673. set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
  1674. spin_unlock(&gl->gl_lockref.lock);
  1675. return queued;
  1676. }
  1677. void gfs2_cancel_delete_work(struct gfs2_glock *gl)
  1678. {
  1679. if (cancel_delayed_work(&gl->gl_delete)) {
  1680. clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
  1681. gfs2_glock_put(gl);
  1682. }
  1683. }
  1684. bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
  1685. {
  1686. return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
  1687. }
  1688. static void flush_delete_work(struct gfs2_glock *gl)
  1689. {
  1690. if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
  1691. if (cancel_delayed_work(&gl->gl_delete)) {
  1692. queue_delayed_work(gfs2_delete_workqueue,
  1693. &gl->gl_delete, 0);
  1694. }
  1695. }
  1696. }
  1697. void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
  1698. {
  1699. glock_hash_walk(flush_delete_work, sdp);
  1700. flush_workqueue(gfs2_delete_workqueue);
  1701. }
  1702. /**
  1703. * thaw_glock - thaw out a glock which has an unprocessed reply waiting
  1704. * @gl: The glock to thaw
  1705. *
  1706. */
  1707. static void thaw_glock(struct gfs2_glock *gl)
  1708. {
  1709. if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
  1710. return;
  1711. if (!lockref_get_not_dead(&gl->gl_lockref))
  1712. return;
  1713. set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
  1714. gfs2_glock_queue_work(gl, 0);
  1715. }
  1716. /**
  1717. * clear_glock - look at a glock and see if we can free it from glock cache
  1718. * @gl: the glock to look at
  1719. *
  1720. */
  1721. static void clear_glock(struct gfs2_glock *gl)
  1722. {
  1723. gfs2_glock_remove_from_lru(gl);
  1724. spin_lock(&gl->gl_lockref.lock);
  1725. if (!__lockref_is_dead(&gl->gl_lockref)) {
  1726. gl->gl_lockref.count++;
  1727. if (gl->gl_state != LM_ST_UNLOCKED)
  1728. handle_callback(gl, LM_ST_UNLOCKED, 0, false);
  1729. __gfs2_glock_queue_work(gl, 0);
  1730. }
  1731. spin_unlock(&gl->gl_lockref.lock);
  1732. }
  1733. /**
  1734. * gfs2_glock_thaw - Thaw any frozen glocks
  1735. * @sdp: The super block
  1736. *
  1737. */
  1738. void gfs2_glock_thaw(struct gfs2_sbd *sdp)
  1739. {
  1740. glock_hash_walk(thaw_glock, sdp);
  1741. }
  1742. static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
  1743. {
  1744. spin_lock(&gl->gl_lockref.lock);
  1745. gfs2_dump_glock(seq, gl, fsid);
  1746. spin_unlock(&gl->gl_lockref.lock);
  1747. }
  1748. static void dump_glock_func(struct gfs2_glock *gl)
  1749. {
  1750. dump_glock(NULL, gl, true);
  1751. }
  1752. /**
  1753. * gfs2_gl_hash_clear - Empty out the glock hash table
  1754. * @sdp: the filesystem
  1755. * @wait: wait until it's all gone
  1756. *
  1757. * Called when unmounting the filesystem.
  1758. */
  1759. void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
  1760. {
  1761. set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
  1762. flush_workqueue(glock_workqueue);
  1763. glock_hash_walk(clear_glock, sdp);
  1764. flush_workqueue(glock_workqueue);
  1765. wait_event_timeout(sdp->sd_glock_wait,
  1766. atomic_read(&sdp->sd_glock_disposal) == 0,
  1767. HZ * 600);
  1768. glock_hash_walk(dump_glock_func, sdp);
  1769. }
  1770. void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
  1771. {
  1772. struct gfs2_glock *gl = ip->i_gl;
  1773. int ret;
  1774. ret = gfs2_truncatei_resume(ip);
  1775. gfs2_glock_assert_withdraw(gl, ret == 0);
  1776. spin_lock(&gl->gl_lockref.lock);
  1777. clear_bit(GLF_LOCK, &gl->gl_flags);
  1778. run_queue(gl, 1);
  1779. spin_unlock(&gl->gl_lockref.lock);
  1780. }
  1781. static const char *state2str(unsigned state)
  1782. {
  1783. switch(state) {
  1784. case LM_ST_UNLOCKED:
  1785. return "UN";
  1786. case LM_ST_SHARED:
  1787. return "SH";
  1788. case LM_ST_DEFERRED:
  1789. return "DF";
  1790. case LM_ST_EXCLUSIVE:
  1791. return "EX";
  1792. }
  1793. return "??";
  1794. }
  1795. static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
  1796. {
  1797. char *p = buf;
  1798. if (flags & LM_FLAG_TRY)
  1799. *p++ = 't';
  1800. if (flags & LM_FLAG_TRY_1CB)
  1801. *p++ = 'T';
  1802. if (flags & LM_FLAG_NOEXP)
  1803. *p++ = 'e';
  1804. if (flags & LM_FLAG_ANY)
  1805. *p++ = 'A';
  1806. if (flags & LM_FLAG_PRIORITY)
  1807. *p++ = 'p';
  1808. if (flags & GL_ASYNC)
  1809. *p++ = 'a';
  1810. if (flags & GL_EXACT)
  1811. *p++ = 'E';
  1812. if (flags & GL_NOCACHE)
  1813. *p++ = 'c';
  1814. if (test_bit(HIF_HOLDER, &iflags))
  1815. *p++ = 'H';
  1816. if (test_bit(HIF_WAIT, &iflags))
  1817. *p++ = 'W';
  1818. if (test_bit(HIF_FIRST, &iflags))
  1819. *p++ = 'F';
  1820. *p = 0;
  1821. return buf;
  1822. }
  1823. /**
  1824. * dump_holder - print information about a glock holder
  1825. * @seq: the seq_file struct
  1826. * @gh: the glock holder
  1827. * @fs_id_buf: pointer to file system id (if requested)
  1828. *
  1829. */
  1830. static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
  1831. const char *fs_id_buf)
  1832. {
  1833. struct task_struct *gh_owner = NULL;
  1834. char flags_buf[32];
  1835. rcu_read_lock();
  1836. if (gh->gh_owner_pid)
  1837. gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
  1838. gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
  1839. fs_id_buf, state2str(gh->gh_state),
  1840. hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
  1841. gh->gh_error,
  1842. gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
  1843. gh_owner ? gh_owner->comm : "(ended)",
  1844. (void *)gh->gh_ip);
  1845. rcu_read_unlock();
  1846. }
  1847. static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
  1848. {
  1849. const unsigned long *gflags = &gl->gl_flags;
  1850. char *p = buf;
  1851. if (test_bit(GLF_LOCK, gflags))
  1852. *p++ = 'l';
  1853. if (test_bit(GLF_DEMOTE, gflags))
  1854. *p++ = 'D';
  1855. if (test_bit(GLF_PENDING_DEMOTE, gflags))
  1856. *p++ = 'd';
  1857. if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
  1858. *p++ = 'p';
  1859. if (test_bit(GLF_DIRTY, gflags))
  1860. *p++ = 'y';
  1861. if (test_bit(GLF_LFLUSH, gflags))
  1862. *p++ = 'f';
  1863. if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
  1864. *p++ = 'i';
  1865. if (test_bit(GLF_REPLY_PENDING, gflags))
  1866. *p++ = 'r';
  1867. if (test_bit(GLF_INITIAL, gflags))
  1868. *p++ = 'I';
  1869. if (test_bit(GLF_FROZEN, gflags))
  1870. *p++ = 'F';
  1871. if (!list_empty(&gl->gl_holders))
  1872. *p++ = 'q';
  1873. if (test_bit(GLF_LRU, gflags))
  1874. *p++ = 'L';
  1875. if (gl->gl_object)
  1876. *p++ = 'o';
  1877. if (test_bit(GLF_BLOCKING, gflags))
  1878. *p++ = 'b';
  1879. if (test_bit(GLF_INODE_CREATING, gflags))
  1880. *p++ = 'c';
  1881. if (test_bit(GLF_PENDING_DELETE, gflags))
  1882. *p++ = 'P';
  1883. if (test_bit(GLF_FREEING, gflags))
  1884. *p++ = 'x';
  1885. *p = 0;
  1886. return buf;
  1887. }
  1888. /**
  1889. * gfs2_dump_glock - print information about a glock
  1890. * @seq: The seq_file struct
  1891. * @gl: the glock
  1892. * @fsid: If true, also dump the file system id
  1893. *
  1894. * The file format is as follows:
  1895. * One line per object, capital letters are used to indicate objects
  1896. * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
  1897. * other objects are indented by a single space and follow the glock to
  1898. * which they are related. Fields are indicated by lower case letters
  1899. * followed by a colon and the field value, except for strings which are in
  1900. * [] so that its possible to see if they are composed of spaces for
  1901. * example. The field's are n = number (id of the object), f = flags,
  1902. * t = type, s = state, r = refcount, e = error, p = pid.
  1903. *
  1904. */
  1905. void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
  1906. {
  1907. const struct gfs2_glock_operations *glops = gl->gl_ops;
  1908. unsigned long long dtime;
  1909. const struct gfs2_holder *gh;
  1910. char gflags_buf[32];
  1911. struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
  1912. char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
  1913. unsigned long nrpages = 0;
  1914. if (gl->gl_ops->go_flags & GLOF_ASPACE) {
  1915. struct address_space *mapping = gfs2_glock2aspace(gl);
  1916. nrpages = mapping->nrpages;
  1917. }
  1918. memset(fs_id_buf, 0, sizeof(fs_id_buf));
  1919. if (fsid && sdp) /* safety precaution */
  1920. sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
  1921. dtime = jiffies - gl->gl_demote_time;
  1922. dtime *= 1000000/HZ; /* demote time in uSec */
  1923. if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
  1924. dtime = 0;
  1925. gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
  1926. "v:%d r:%d m:%ld p:%lu\n",
  1927. fs_id_buf, state2str(gl->gl_state),
  1928. gl->gl_name.ln_type,
  1929. (unsigned long long)gl->gl_name.ln_number,
  1930. gflags2str(gflags_buf, gl),
  1931. state2str(gl->gl_target),
  1932. state2str(gl->gl_demote_state), dtime,
  1933. atomic_read(&gl->gl_ail_count),
  1934. atomic_read(&gl->gl_revokes),
  1935. (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages);
  1936. list_for_each_entry(gh, &gl->gl_holders, gh_list)
  1937. dump_holder(seq, gh, fs_id_buf);
  1938. if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
  1939. glops->go_dump(seq, gl, fs_id_buf);
  1940. }
  1941. static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
  1942. {
  1943. struct gfs2_glock *gl = iter_ptr;
  1944. seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
  1945. gl->gl_name.ln_type,
  1946. (unsigned long long)gl->gl_name.ln_number,
  1947. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
  1948. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
  1949. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
  1950. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
  1951. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
  1952. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
  1953. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
  1954. (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
  1955. return 0;
  1956. }
  1957. static const char *gfs2_gltype[] = {
  1958. "type",
  1959. "reserved",
  1960. "nondisk",
  1961. "inode",
  1962. "rgrp",
  1963. "meta",
  1964. "iopen",
  1965. "flock",
  1966. "plock",
  1967. "quota",
  1968. "journal",
  1969. };
  1970. static const char *gfs2_stype[] = {
  1971. [GFS2_LKS_SRTT] = "srtt",
  1972. [GFS2_LKS_SRTTVAR] = "srttvar",
  1973. [GFS2_LKS_SRTTB] = "srttb",
  1974. [GFS2_LKS_SRTTVARB] = "srttvarb",
  1975. [GFS2_LKS_SIRT] = "sirt",
  1976. [GFS2_LKS_SIRTVAR] = "sirtvar",
  1977. [GFS2_LKS_DCOUNT] = "dlm",
  1978. [GFS2_LKS_QCOUNT] = "queue",
  1979. };
  1980. #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
  1981. static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
  1982. {
  1983. struct gfs2_sbd *sdp = seq->private;
  1984. loff_t pos = *(loff_t *)iter_ptr;
  1985. unsigned index = pos >> 3;
  1986. unsigned subindex = pos & 0x07;
  1987. int i;
  1988. if (index == 0 && subindex != 0)
  1989. return 0;
  1990. seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
  1991. (index == 0) ? "cpu": gfs2_stype[subindex]);
  1992. for_each_possible_cpu(i) {
  1993. const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
  1994. if (index == 0)
  1995. seq_printf(seq, " %15u", i);
  1996. else
  1997. seq_printf(seq, " %15llu", (unsigned long long)lkstats->
  1998. lkstats[index - 1].stats[subindex]);
  1999. }
  2000. seq_putc(seq, '\n');
  2001. return 0;
  2002. }
  2003. int __init gfs2_glock_init(void)
  2004. {
  2005. int i, ret;
  2006. ret = rhashtable_init(&gl_hash_table, &ht_parms);
  2007. if (ret < 0)
  2008. return ret;
  2009. glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
  2010. WQ_HIGHPRI | WQ_FREEZABLE, 0);
  2011. if (!glock_workqueue) {
  2012. rhashtable_destroy(&gl_hash_table);
  2013. return -ENOMEM;
  2014. }
  2015. gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
  2016. WQ_MEM_RECLAIM | WQ_FREEZABLE,
  2017. 0);
  2018. if (!gfs2_delete_workqueue) {
  2019. destroy_workqueue(glock_workqueue);
  2020. rhashtable_destroy(&gl_hash_table);
  2021. return -ENOMEM;
  2022. }
  2023. ret = register_shrinker(&glock_shrinker);
  2024. if (ret) {
  2025. destroy_workqueue(gfs2_delete_workqueue);
  2026. destroy_workqueue(glock_workqueue);
  2027. rhashtable_destroy(&gl_hash_table);
  2028. return ret;
  2029. }
  2030. for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
  2031. init_waitqueue_head(glock_wait_table + i);
  2032. return 0;
  2033. }
  2034. void gfs2_glock_exit(void)
  2035. {
  2036. unregister_shrinker(&glock_shrinker);
  2037. rhashtable_destroy(&gl_hash_table);
  2038. destroy_workqueue(glock_workqueue);
  2039. destroy_workqueue(gfs2_delete_workqueue);
  2040. }
  2041. static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
  2042. {
  2043. struct gfs2_glock *gl = gi->gl;
  2044. if (gl) {
  2045. if (n == 0)
  2046. return;
  2047. if (!lockref_put_not_zero(&gl->gl_lockref))
  2048. gfs2_glock_queue_put(gl);
  2049. }
  2050. for (;;) {
  2051. gl = rhashtable_walk_next(&gi->hti);
  2052. if (IS_ERR_OR_NULL(gl)) {
  2053. if (gl == ERR_PTR(-EAGAIN)) {
  2054. n = 1;
  2055. continue;
  2056. }
  2057. gl = NULL;
  2058. break;
  2059. }
  2060. if (gl->gl_name.ln_sbd != gi->sdp)
  2061. continue;
  2062. if (n <= 1) {
  2063. if (!lockref_get_not_dead(&gl->gl_lockref))
  2064. continue;
  2065. break;
  2066. } else {
  2067. if (__lockref_is_dead(&gl->gl_lockref))
  2068. continue;
  2069. n--;
  2070. }
  2071. }
  2072. gi->gl = gl;
  2073. }
  2074. static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
  2075. __acquires(RCU)
  2076. {
  2077. struct gfs2_glock_iter *gi = seq->private;
  2078. loff_t n;
  2079. /*
  2080. * We can either stay where we are, skip to the next hash table
  2081. * entry, or start from the beginning.
  2082. */
  2083. if (*pos < gi->last_pos) {
  2084. rhashtable_walk_exit(&gi->hti);
  2085. rhashtable_walk_enter(&gl_hash_table, &gi->hti);
  2086. n = *pos + 1;
  2087. } else {
  2088. n = *pos - gi->last_pos;
  2089. }
  2090. rhashtable_walk_start(&gi->hti);
  2091. gfs2_glock_iter_next(gi, n);
  2092. gi->last_pos = *pos;
  2093. return gi->gl;
  2094. }
  2095. static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
  2096. loff_t *pos)
  2097. {
  2098. struct gfs2_glock_iter *gi = seq->private;
  2099. (*pos)++;
  2100. gi->last_pos = *pos;
  2101. gfs2_glock_iter_next(gi, 1);
  2102. return gi->gl;
  2103. }
  2104. static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
  2105. __releases(RCU)
  2106. {
  2107. struct gfs2_glock_iter *gi = seq->private;
  2108. rhashtable_walk_stop(&gi->hti);
  2109. }
  2110. static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
  2111. {
  2112. dump_glock(seq, iter_ptr, false);
  2113. return 0;
  2114. }
  2115. static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
  2116. {
  2117. preempt_disable();
  2118. if (*pos >= GFS2_NR_SBSTATS)
  2119. return NULL;
  2120. return pos;
  2121. }
  2122. static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
  2123. loff_t *pos)
  2124. {
  2125. (*pos)++;
  2126. if (*pos >= GFS2_NR_SBSTATS)
  2127. return NULL;
  2128. return pos;
  2129. }
  2130. static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
  2131. {
  2132. preempt_enable();
  2133. }
  2134. static const struct seq_operations gfs2_glock_seq_ops = {
  2135. .start = gfs2_glock_seq_start,
  2136. .next = gfs2_glock_seq_next,
  2137. .stop = gfs2_glock_seq_stop,
  2138. .show = gfs2_glock_seq_show,
  2139. };
  2140. static const struct seq_operations gfs2_glstats_seq_ops = {
  2141. .start = gfs2_glock_seq_start,
  2142. .next = gfs2_glock_seq_next,
  2143. .stop = gfs2_glock_seq_stop,
  2144. .show = gfs2_glstats_seq_show,
  2145. };
  2146. static const struct seq_operations gfs2_sbstats_sops = {
  2147. .start = gfs2_sbstats_seq_start,
  2148. .next = gfs2_sbstats_seq_next,
  2149. .stop = gfs2_sbstats_seq_stop,
  2150. .show = gfs2_sbstats_seq_show,
  2151. };
  2152. #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
  2153. static int __gfs2_glocks_open(struct inode *inode, struct file *file,
  2154. const struct seq_operations *ops)
  2155. {
  2156. int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
  2157. if (ret == 0) {
  2158. struct seq_file *seq = file->private_data;
  2159. struct gfs2_glock_iter *gi = seq->private;
  2160. gi->sdp = inode->i_private;
  2161. seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
  2162. if (seq->buf)
  2163. seq->size = GFS2_SEQ_GOODSIZE;
  2164. /*
  2165. * Initially, we are "before" the first hash table entry; the
  2166. * first call to rhashtable_walk_next gets us the first entry.
  2167. */
  2168. gi->last_pos = -1;
  2169. gi->gl = NULL;
  2170. rhashtable_walk_enter(&gl_hash_table, &gi->hti);
  2171. }
  2172. return ret;
  2173. }
  2174. static int gfs2_glocks_open(struct inode *inode, struct file *file)
  2175. {
  2176. return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
  2177. }
  2178. static int gfs2_glocks_release(struct inode *inode, struct file *file)
  2179. {
  2180. struct seq_file *seq = file->private_data;
  2181. struct gfs2_glock_iter *gi = seq->private;
  2182. if (gi->gl)
  2183. gfs2_glock_put(gi->gl);
  2184. rhashtable_walk_exit(&gi->hti);
  2185. return seq_release_private(inode, file);
  2186. }
  2187. static int gfs2_glstats_open(struct inode *inode, struct file *file)
  2188. {
  2189. return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
  2190. }
  2191. static const struct file_operations gfs2_glocks_fops = {
  2192. .owner = THIS_MODULE,
  2193. .open = gfs2_glocks_open,
  2194. .read = seq_read,
  2195. .llseek = seq_lseek,
  2196. .release = gfs2_glocks_release,
  2197. };
  2198. static const struct file_operations gfs2_glstats_fops = {
  2199. .owner = THIS_MODULE,
  2200. .open = gfs2_glstats_open,
  2201. .read = seq_read,
  2202. .llseek = seq_lseek,
  2203. .release = gfs2_glocks_release,
  2204. };
  2205. DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats);
  2206. void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
  2207. {
  2208. sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
  2209. debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
  2210. &gfs2_glocks_fops);
  2211. debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
  2212. &gfs2_glstats_fops);
  2213. debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
  2214. &gfs2_sbstats_fops);
  2215. }
  2216. void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
  2217. {
  2218. debugfs_remove_recursive(sdp->debugfs_dir);
  2219. sdp->debugfs_dir = NULL;
  2220. }
  2221. void gfs2_register_debugfs(void)
  2222. {
  2223. gfs2_root = debugfs_create_dir("gfs2", NULL);
  2224. }
  2225. void gfs2_unregister_debugfs(void)
  2226. {
  2227. debugfs_remove(gfs2_root);
  2228. gfs2_root = NULL;
  2229. }