flock.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* AFS file locking support
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. */
  7. #include "internal.h"
  8. #define AFS_LOCK_GRANTED 0
  9. #define AFS_LOCK_PENDING 1
  10. #define AFS_LOCK_YOUR_TRY 2
  11. struct workqueue_struct *afs_lock_manager;
  12. static void afs_next_locker(struct afs_vnode *vnode, int error);
  13. static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
  14. static void afs_fl_release_private(struct file_lock *fl);
  15. static const struct file_lock_operations afs_lock_ops = {
  16. .fl_copy_lock = afs_fl_copy_lock,
  17. .fl_release_private = afs_fl_release_private,
  18. };
  19. static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
  20. {
  21. _debug("STATE %u -> %u", vnode->lock_state, state);
  22. vnode->lock_state = state;
  23. }
  24. static atomic_t afs_file_lock_debug_id;
  25. /*
  26. * if the callback is broken on this vnode, then the lock may now be available
  27. */
  28. void afs_lock_may_be_available(struct afs_vnode *vnode)
  29. {
  30. _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
  31. spin_lock(&vnode->lock);
  32. if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
  33. afs_next_locker(vnode, 0);
  34. trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
  35. spin_unlock(&vnode->lock);
  36. }
  37. /*
  38. * the lock will time out in 5 minutes unless we extend it, so schedule
  39. * extension in a bit less than that time
  40. */
  41. static void afs_schedule_lock_extension(struct afs_vnode *vnode)
  42. {
  43. ktime_t expires_at, now, duration;
  44. u64 duration_j;
  45. expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
  46. now = ktime_get_real();
  47. duration = ktime_sub(expires_at, now);
  48. if (duration <= 0)
  49. duration_j = 0;
  50. else
  51. duration_j = nsecs_to_jiffies(ktime_to_ns(duration));
  52. queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
  53. }
  54. /*
  55. * In the case of successful completion of a lock operation, record the time
  56. * the reply appeared and start the lock extension timer.
  57. */
  58. void afs_lock_op_done(struct afs_call *call)
  59. {
  60. struct afs_operation *op = call->op;
  61. struct afs_vnode *vnode = op->file[0].vnode;
  62. if (call->error == 0) {
  63. spin_lock(&vnode->lock);
  64. trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
  65. vnode->locked_at = call->reply_time;
  66. afs_schedule_lock_extension(vnode);
  67. spin_unlock(&vnode->lock);
  68. }
  69. }
  70. /*
  71. * grant one or more locks (readlocks are allowed to jump the queue if the
  72. * first lock in the queue is itself a readlock)
  73. * - the caller must hold the vnode lock
  74. */
  75. static void afs_grant_locks(struct afs_vnode *vnode)
  76. {
  77. struct file_lock *p, *_p;
  78. bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
  79. list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
  80. if (!exclusive && p->fl_type == F_WRLCK)
  81. continue;
  82. list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
  83. p->fl_u.afs.state = AFS_LOCK_GRANTED;
  84. trace_afs_flock_op(vnode, p, afs_flock_op_grant);
  85. wake_up(&p->fl_wait);
  86. }
  87. }
  88. /*
  89. * If an error is specified, reject every pending lock that matches the
  90. * authentication and type of the lock we failed to get. If there are any
  91. * remaining lockers, try to wake up one of them to have a go.
  92. */
  93. static void afs_next_locker(struct afs_vnode *vnode, int error)
  94. {
  95. struct file_lock *p, *_p, *next = NULL;
  96. struct key *key = vnode->lock_key;
  97. unsigned int fl_type = F_RDLCK;
  98. _enter("");
  99. if (vnode->lock_type == AFS_LOCK_WRITE)
  100. fl_type = F_WRLCK;
  101. list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
  102. if (error &&
  103. p->fl_type == fl_type &&
  104. afs_file_key(p->fl_file) == key) {
  105. list_del_init(&p->fl_u.afs.link);
  106. p->fl_u.afs.state = error;
  107. wake_up(&p->fl_wait);
  108. }
  109. /* Select the next locker to hand off to. */
  110. if (next &&
  111. (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
  112. continue;
  113. next = p;
  114. }
  115. vnode->lock_key = NULL;
  116. key_put(key);
  117. if (next) {
  118. afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
  119. next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
  120. trace_afs_flock_op(vnode, next, afs_flock_op_wake);
  121. wake_up(&next->fl_wait);
  122. } else {
  123. afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
  124. trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
  125. }
  126. _leave("");
  127. }
  128. /*
  129. * Kill off all waiters in the the pending lock queue due to the vnode being
  130. * deleted.
  131. */
  132. static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
  133. {
  134. struct file_lock *p;
  135. afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
  136. while (!list_empty(&vnode->pending_locks)) {
  137. p = list_entry(vnode->pending_locks.next,
  138. struct file_lock, fl_u.afs.link);
  139. list_del_init(&p->fl_u.afs.link);
  140. p->fl_u.afs.state = -ENOENT;
  141. wake_up(&p->fl_wait);
  142. }
  143. key_put(vnode->lock_key);
  144. vnode->lock_key = NULL;
  145. }
  146. static void afs_lock_success(struct afs_operation *op)
  147. {
  148. _enter("op=%08x", op->debug_id);
  149. afs_vnode_commit_status(op, &op->file[0]);
  150. }
  151. static const struct afs_operation_ops afs_set_lock_operation = {
  152. .issue_afs_rpc = afs_fs_set_lock,
  153. .issue_yfs_rpc = yfs_fs_set_lock,
  154. .success = afs_lock_success,
  155. .aborted = afs_check_for_remote_deletion,
  156. };
  157. /*
  158. * Get a lock on a file
  159. */
  160. static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
  161. afs_lock_type_t type)
  162. {
  163. struct afs_operation *op;
  164. _enter("%s{%llx:%llu.%u},%x,%u",
  165. vnode->volume->name,
  166. vnode->fid.vid,
  167. vnode->fid.vnode,
  168. vnode->fid.unique,
  169. key_serial(key), type);
  170. op = afs_alloc_operation(key, vnode->volume);
  171. if (IS_ERR(op))
  172. return PTR_ERR(op);
  173. afs_op_set_vnode(op, 0, vnode);
  174. op->lock.type = type;
  175. op->ops = &afs_set_lock_operation;
  176. return afs_do_sync_operation(op);
  177. }
  178. static const struct afs_operation_ops afs_extend_lock_operation = {
  179. .issue_afs_rpc = afs_fs_extend_lock,
  180. .issue_yfs_rpc = yfs_fs_extend_lock,
  181. .success = afs_lock_success,
  182. };
  183. /*
  184. * Extend a lock on a file
  185. */
  186. static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
  187. {
  188. struct afs_operation *op;
  189. _enter("%s{%llx:%llu.%u},%x",
  190. vnode->volume->name,
  191. vnode->fid.vid,
  192. vnode->fid.vnode,
  193. vnode->fid.unique,
  194. key_serial(key));
  195. op = afs_alloc_operation(key, vnode->volume);
  196. if (IS_ERR(op))
  197. return PTR_ERR(op);
  198. afs_op_set_vnode(op, 0, vnode);
  199. op->flags |= AFS_OPERATION_UNINTR;
  200. op->ops = &afs_extend_lock_operation;
  201. return afs_do_sync_operation(op);
  202. }
  203. static const struct afs_operation_ops afs_release_lock_operation = {
  204. .issue_afs_rpc = afs_fs_release_lock,
  205. .issue_yfs_rpc = yfs_fs_release_lock,
  206. .success = afs_lock_success,
  207. };
  208. /*
  209. * Release a lock on a file
  210. */
  211. static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
  212. {
  213. struct afs_operation *op;
  214. _enter("%s{%llx:%llu.%u},%x",
  215. vnode->volume->name,
  216. vnode->fid.vid,
  217. vnode->fid.vnode,
  218. vnode->fid.unique,
  219. key_serial(key));
  220. op = afs_alloc_operation(key, vnode->volume);
  221. if (IS_ERR(op))
  222. return PTR_ERR(op);
  223. afs_op_set_vnode(op, 0, vnode);
  224. op->flags |= AFS_OPERATION_UNINTR;
  225. op->ops = &afs_release_lock_operation;
  226. return afs_do_sync_operation(op);
  227. }
  228. /*
  229. * do work for a lock, including:
  230. * - probing for a lock we're waiting on but didn't get immediately
  231. * - extending a lock that's close to timing out
  232. */
  233. void afs_lock_work(struct work_struct *work)
  234. {
  235. struct afs_vnode *vnode =
  236. container_of(work, struct afs_vnode, lock_work.work);
  237. struct key *key;
  238. int ret;
  239. _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
  240. spin_lock(&vnode->lock);
  241. again:
  242. _debug("wstate %u for %p", vnode->lock_state, vnode);
  243. switch (vnode->lock_state) {
  244. case AFS_VNODE_LOCK_NEED_UNLOCK:
  245. afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
  246. trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
  247. spin_unlock(&vnode->lock);
  248. /* attempt to release the server lock; if it fails, we just
  249. * wait 5 minutes and it'll expire anyway */
  250. ret = afs_release_lock(vnode, vnode->lock_key);
  251. if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
  252. trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
  253. ret);
  254. printk(KERN_WARNING "AFS:"
  255. " Failed to release lock on {%llx:%llx} error %d\n",
  256. vnode->fid.vid, vnode->fid.vnode, ret);
  257. }
  258. spin_lock(&vnode->lock);
  259. if (ret == -ENOENT)
  260. afs_kill_lockers_enoent(vnode);
  261. else
  262. afs_next_locker(vnode, 0);
  263. spin_unlock(&vnode->lock);
  264. return;
  265. /* If we've already got a lock, then it must be time to extend that
  266. * lock as AFS locks time out after 5 minutes.
  267. */
  268. case AFS_VNODE_LOCK_GRANTED:
  269. _debug("extend");
  270. ASSERT(!list_empty(&vnode->granted_locks));
  271. key = key_get(vnode->lock_key);
  272. afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
  273. trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
  274. spin_unlock(&vnode->lock);
  275. ret = afs_extend_lock(vnode, key); /* RPC */
  276. key_put(key);
  277. if (ret < 0) {
  278. trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
  279. ret);
  280. pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
  281. vnode->fid.vid, vnode->fid.vnode, ret);
  282. }
  283. spin_lock(&vnode->lock);
  284. if (ret == -ENOENT) {
  285. afs_kill_lockers_enoent(vnode);
  286. spin_unlock(&vnode->lock);
  287. return;
  288. }
  289. if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
  290. goto again;
  291. afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
  292. if (ret != 0)
  293. queue_delayed_work(afs_lock_manager, &vnode->lock_work,
  294. HZ * 10);
  295. spin_unlock(&vnode->lock);
  296. _leave(" [ext]");
  297. return;
  298. /* If we're waiting for a callback to indicate lock release, we can't
  299. * actually rely on this, so need to recheck at regular intervals. The
  300. * problem is that the server might not notify us if the lock just
  301. * expires (say because a client died) rather than being explicitly
  302. * released.
  303. */
  304. case AFS_VNODE_LOCK_WAITING_FOR_CB:
  305. _debug("retry");
  306. afs_next_locker(vnode, 0);
  307. spin_unlock(&vnode->lock);
  308. return;
  309. case AFS_VNODE_LOCK_DELETED:
  310. afs_kill_lockers_enoent(vnode);
  311. spin_unlock(&vnode->lock);
  312. return;
  313. default:
  314. /* Looks like a lock request was withdrawn. */
  315. spin_unlock(&vnode->lock);
  316. _leave(" [no]");
  317. return;
  318. }
  319. }
  320. /*
  321. * pass responsibility for the unlocking of a vnode on the server to the
  322. * manager thread, lest a pending signal in the calling thread interrupt
  323. * AF_RXRPC
  324. * - the caller must hold the vnode lock
  325. */
  326. static void afs_defer_unlock(struct afs_vnode *vnode)
  327. {
  328. _enter("%u", vnode->lock_state);
  329. if (list_empty(&vnode->granted_locks) &&
  330. (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
  331. vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
  332. cancel_delayed_work(&vnode->lock_work);
  333. afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
  334. trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
  335. queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
  336. }
  337. }
  338. /*
  339. * Check that our view of the file metadata is up to date and check to see
  340. * whether we think that we have a locking permit.
  341. */
  342. static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
  343. enum afs_flock_mode mode, afs_lock_type_t type)
  344. {
  345. afs_access_t access;
  346. int ret;
  347. /* Make sure we've got a callback on this file and that our view of the
  348. * data version is up to date.
  349. */
  350. ret = afs_validate(vnode, key);
  351. if (ret < 0)
  352. return ret;
  353. /* Check the permission set to see if we're actually going to be
  354. * allowed to get a lock on this file.
  355. */
  356. ret = afs_check_permit(vnode, key, &access);
  357. if (ret < 0)
  358. return ret;
  359. /* At a rough estimation, you need LOCK, WRITE or INSERT perm to
  360. * read-lock a file and WRITE or INSERT perm to write-lock a file.
  361. *
  362. * We can't rely on the server to do this for us since if we want to
  363. * share a read lock that we already have, we won't go the server.
  364. */
  365. if (type == AFS_LOCK_READ) {
  366. if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
  367. return -EACCES;
  368. } else {
  369. if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
  370. return -EACCES;
  371. }
  372. return 0;
  373. }
  374. /*
  375. * request a lock on a file on the server
  376. */
  377. static int afs_do_setlk(struct file *file, struct file_lock *fl)
  378. {
  379. struct inode *inode = locks_inode(file);
  380. struct afs_vnode *vnode = AFS_FS_I(inode);
  381. enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode;
  382. afs_lock_type_t type;
  383. struct key *key = afs_file_key(file);
  384. bool partial, no_server_lock = false;
  385. int ret;
  386. if (mode == afs_flock_mode_unset)
  387. mode = afs_flock_mode_openafs;
  388. _enter("{%llx:%llu},%llu-%llu,%u,%u",
  389. vnode->fid.vid, vnode->fid.vnode,
  390. fl->fl_start, fl->fl_end, fl->fl_type, mode);
  391. fl->fl_ops = &afs_lock_ops;
  392. INIT_LIST_HEAD(&fl->fl_u.afs.link);
  393. fl->fl_u.afs.state = AFS_LOCK_PENDING;
  394. partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
  395. type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
  396. if (mode == afs_flock_mode_write && partial)
  397. type = AFS_LOCK_WRITE;
  398. ret = afs_do_setlk_check(vnode, key, mode, type);
  399. if (ret < 0)
  400. return ret;
  401. trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
  402. /* AFS3 protocol only supports full-file locks and doesn't provide any
  403. * method of upgrade/downgrade, so we need to emulate for partial-file
  404. * locks.
  405. *
  406. * The OpenAFS client only gets a server lock for a full-file lock and
  407. * keeps partial-file locks local. Allow this behaviour to be emulated
  408. * (as the default).
  409. */
  410. if (mode == afs_flock_mode_local ||
  411. (partial && mode == afs_flock_mode_openafs)) {
  412. no_server_lock = true;
  413. goto skip_server_lock;
  414. }
  415. spin_lock(&vnode->lock);
  416. list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
  417. ret = -ENOENT;
  418. if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
  419. goto error_unlock;
  420. /* If we've already got a lock on the server then try to move to having
  421. * the VFS grant the requested lock. Note that this means that other
  422. * clients may get starved out.
  423. */
  424. _debug("try %u", vnode->lock_state);
  425. if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
  426. if (type == AFS_LOCK_READ) {
  427. _debug("instant readlock");
  428. list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
  429. fl->fl_u.afs.state = AFS_LOCK_GRANTED;
  430. goto vnode_is_locked_u;
  431. }
  432. if (vnode->lock_type == AFS_LOCK_WRITE) {
  433. _debug("instant writelock");
  434. list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
  435. fl->fl_u.afs.state = AFS_LOCK_GRANTED;
  436. goto vnode_is_locked_u;
  437. }
  438. }
  439. if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
  440. !(fl->fl_flags & FL_SLEEP)) {
  441. ret = -EAGAIN;
  442. if (type == AFS_LOCK_READ) {
  443. if (vnode->status.lock_count == -1)
  444. goto lock_is_contended; /* Write locked */
  445. } else {
  446. if (vnode->status.lock_count != 0)
  447. goto lock_is_contended; /* Locked */
  448. }
  449. }
  450. if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
  451. goto need_to_wait;
  452. try_to_lock:
  453. /* We don't have a lock on this vnode and we aren't currently waiting
  454. * for one either, so ask the server for a lock.
  455. *
  456. * Note that we need to be careful if we get interrupted by a signal
  457. * after dispatching the request as we may still get the lock, even
  458. * though we don't wait for the reply (it's not too bad a problem - the
  459. * lock will expire in 5 mins anyway).
  460. */
  461. trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
  462. vnode->lock_key = key_get(key);
  463. vnode->lock_type = type;
  464. afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
  465. spin_unlock(&vnode->lock);
  466. ret = afs_set_lock(vnode, key, type); /* RPC */
  467. spin_lock(&vnode->lock);
  468. switch (ret) {
  469. case -EKEYREJECTED:
  470. case -EKEYEXPIRED:
  471. case -EKEYREVOKED:
  472. case -EPERM:
  473. case -EACCES:
  474. fl->fl_u.afs.state = ret;
  475. trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
  476. list_del_init(&fl->fl_u.afs.link);
  477. afs_next_locker(vnode, ret);
  478. goto error_unlock;
  479. case -ENOENT:
  480. fl->fl_u.afs.state = ret;
  481. trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
  482. list_del_init(&fl->fl_u.afs.link);
  483. afs_kill_lockers_enoent(vnode);
  484. goto error_unlock;
  485. default:
  486. fl->fl_u.afs.state = ret;
  487. trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
  488. list_del_init(&fl->fl_u.afs.link);
  489. afs_next_locker(vnode, 0);
  490. goto error_unlock;
  491. case -EWOULDBLOCK:
  492. /* The server doesn't have a lock-waiting queue, so the client
  493. * will have to retry. The server will break the outstanding
  494. * callbacks on a file when a lock is released.
  495. */
  496. ASSERT(list_empty(&vnode->granted_locks));
  497. ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
  498. goto lock_is_contended;
  499. case 0:
  500. afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
  501. trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
  502. afs_grant_locks(vnode);
  503. goto vnode_is_locked_u;
  504. }
  505. vnode_is_locked_u:
  506. spin_unlock(&vnode->lock);
  507. vnode_is_locked:
  508. /* the lock has been granted by the server... */
  509. ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
  510. skip_server_lock:
  511. /* ... but the VFS still needs to distribute access on this client. */
  512. trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
  513. ret = locks_lock_file_wait(file, fl);
  514. trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
  515. if (ret < 0)
  516. goto vfs_rejected_lock;
  517. /* Again, make sure we've got a callback on this file and, again, make
  518. * sure that our view of the data version is up to date (we ignore
  519. * errors incurred here and deal with the consequences elsewhere).
  520. */
  521. afs_validate(vnode, key);
  522. _leave(" = 0");
  523. return 0;
  524. lock_is_contended:
  525. if (!(fl->fl_flags & FL_SLEEP)) {
  526. list_del_init(&fl->fl_u.afs.link);
  527. afs_next_locker(vnode, 0);
  528. ret = -EAGAIN;
  529. goto error_unlock;
  530. }
  531. afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
  532. trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
  533. queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
  534. need_to_wait:
  535. /* We're going to have to wait. Either this client doesn't have a lock
  536. * on the server yet and we need to wait for a callback to occur, or
  537. * the client does have a lock on the server, but it's shared and we
  538. * need an exclusive lock.
  539. */
  540. spin_unlock(&vnode->lock);
  541. trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
  542. ret = wait_event_interruptible(fl->fl_wait,
  543. fl->fl_u.afs.state != AFS_LOCK_PENDING);
  544. trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
  545. if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
  546. spin_lock(&vnode->lock);
  547. switch (fl->fl_u.afs.state) {
  548. case AFS_LOCK_YOUR_TRY:
  549. fl->fl_u.afs.state = AFS_LOCK_PENDING;
  550. goto try_to_lock;
  551. case AFS_LOCK_PENDING:
  552. if (ret > 0) {
  553. /* We need to retry the lock. We may not be
  554. * notified by the server if it just expired
  555. * rather than being released.
  556. */
  557. ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
  558. afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
  559. fl->fl_u.afs.state = AFS_LOCK_PENDING;
  560. goto try_to_lock;
  561. }
  562. goto error_unlock;
  563. case AFS_LOCK_GRANTED:
  564. default:
  565. break;
  566. }
  567. spin_unlock(&vnode->lock);
  568. }
  569. if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
  570. goto vnode_is_locked;
  571. ret = fl->fl_u.afs.state;
  572. goto error;
  573. vfs_rejected_lock:
  574. /* The VFS rejected the lock we just obtained, so we have to discard
  575. * what we just got. We defer this to the lock manager work item to
  576. * deal with.
  577. */
  578. _debug("vfs refused %d", ret);
  579. if (no_server_lock)
  580. goto error;
  581. spin_lock(&vnode->lock);
  582. list_del_init(&fl->fl_u.afs.link);
  583. afs_defer_unlock(vnode);
  584. error_unlock:
  585. spin_unlock(&vnode->lock);
  586. error:
  587. _leave(" = %d", ret);
  588. return ret;
  589. }
  590. /*
  591. * unlock on a file on the server
  592. */
  593. static int afs_do_unlk(struct file *file, struct file_lock *fl)
  594. {
  595. struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
  596. int ret;
  597. _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
  598. trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
  599. /* Flush all pending writes before doing anything with locks. */
  600. vfs_fsync(file, 0);
  601. ret = locks_lock_file_wait(file, fl);
  602. _leave(" = %d [%u]", ret, vnode->lock_state);
  603. return ret;
  604. }
  605. /*
  606. * return information about a lock we currently hold, if indeed we hold one
  607. */
  608. static int afs_do_getlk(struct file *file, struct file_lock *fl)
  609. {
  610. struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
  611. struct key *key = afs_file_key(file);
  612. int ret, lock_count;
  613. _enter("");
  614. if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
  615. return -ENOENT;
  616. fl->fl_type = F_UNLCK;
  617. /* check local lock records first */
  618. posix_test_lock(file, fl);
  619. if (fl->fl_type == F_UNLCK) {
  620. /* no local locks; consult the server */
  621. ret = afs_fetch_status(vnode, key, false, NULL);
  622. if (ret < 0)
  623. goto error;
  624. lock_count = READ_ONCE(vnode->status.lock_count);
  625. if (lock_count != 0) {
  626. if (lock_count > 0)
  627. fl->fl_type = F_RDLCK;
  628. else
  629. fl->fl_type = F_WRLCK;
  630. fl->fl_start = 0;
  631. fl->fl_end = OFFSET_MAX;
  632. fl->fl_pid = 0;
  633. }
  634. }
  635. ret = 0;
  636. error:
  637. _leave(" = %d [%hd]", ret, fl->fl_type);
  638. return ret;
  639. }
  640. /*
  641. * manage POSIX locks on a file
  642. */
  643. int afs_lock(struct file *file, int cmd, struct file_lock *fl)
  644. {
  645. struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
  646. enum afs_flock_operation op;
  647. int ret;
  648. _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
  649. vnode->fid.vid, vnode->fid.vnode, cmd,
  650. fl->fl_type, fl->fl_flags,
  651. (long long) fl->fl_start, (long long) fl->fl_end);
  652. /* AFS doesn't support mandatory locks */
  653. if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
  654. return -ENOLCK;
  655. if (IS_GETLK(cmd))
  656. return afs_do_getlk(file, fl);
  657. fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
  658. trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
  659. if (fl->fl_type == F_UNLCK)
  660. ret = afs_do_unlk(file, fl);
  661. else
  662. ret = afs_do_setlk(file, fl);
  663. switch (ret) {
  664. case 0: op = afs_flock_op_return_ok; break;
  665. case -EAGAIN: op = afs_flock_op_return_eagain; break;
  666. case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
  667. default: op = afs_flock_op_return_error; break;
  668. }
  669. trace_afs_flock_op(vnode, fl, op);
  670. return ret;
  671. }
  672. /*
  673. * manage FLOCK locks on a file
  674. */
  675. int afs_flock(struct file *file, int cmd, struct file_lock *fl)
  676. {
  677. struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
  678. enum afs_flock_operation op;
  679. int ret;
  680. _enter("{%llx:%llu},%d,{t=%x,fl=%x}",
  681. vnode->fid.vid, vnode->fid.vnode, cmd,
  682. fl->fl_type, fl->fl_flags);
  683. /*
  684. * No BSD flocks over NFS allowed.
  685. * Note: we could try to fake a POSIX lock request here by
  686. * using ((u32) filp | 0x80000000) or some such as the pid.
  687. * Not sure whether that would be unique, though, or whether
  688. * that would break in other places.
  689. */
  690. if (!(fl->fl_flags & FL_FLOCK))
  691. return -ENOLCK;
  692. fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
  693. trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
  694. /* we're simulating flock() locks using posix locks on the server */
  695. if (fl->fl_type == F_UNLCK)
  696. ret = afs_do_unlk(file, fl);
  697. else
  698. ret = afs_do_setlk(file, fl);
  699. switch (ret) {
  700. case 0: op = afs_flock_op_return_ok; break;
  701. case -EAGAIN: op = afs_flock_op_return_eagain; break;
  702. case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
  703. default: op = afs_flock_op_return_error; break;
  704. }
  705. trace_afs_flock_op(vnode, fl, op);
  706. return ret;
  707. }
  708. /*
  709. * the POSIX lock management core VFS code copies the lock record and adds the
  710. * copy into its own list, so we need to add that copy to the vnode's lock
  711. * queue in the same place as the original (which will be deleted shortly
  712. * after)
  713. */
  714. static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
  715. {
  716. struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
  717. _enter("");
  718. new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
  719. spin_lock(&vnode->lock);
  720. trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
  721. list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
  722. spin_unlock(&vnode->lock);
  723. }
  724. /*
  725. * need to remove this lock from the vnode queue when it's removed from the
  726. * VFS's list
  727. */
  728. static void afs_fl_release_private(struct file_lock *fl)
  729. {
  730. struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
  731. _enter("");
  732. spin_lock(&vnode->lock);
  733. trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
  734. list_del_init(&fl->fl_u.afs.link);
  735. if (list_empty(&vnode->granted_locks))
  736. afs_defer_unlock(vnode);
  737. _debug("state %u for %p", vnode->lock_state, vnode);
  738. spin_unlock(&vnode->lock);
  739. }