locks.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/ceph/ceph_debug.h>
  3. #include <linux/file.h>
  4. #include <linux/namei.h>
  5. #include <linux/random.h>
  6. #include "super.h"
  7. #include "mds_client.h"
  8. #include <linux/ceph/pagelist.h>
  9. static u64 lock_secret;
  10. static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
  11. struct ceph_mds_request *req);
  12. static inline u64 secure_addr(void *addr)
  13. {
  14. u64 v = lock_secret ^ (u64)(unsigned long)addr;
  15. /*
  16. * Set the most significant bit, so that MDS knows the 'owner'
  17. * is sufficient to identify the owner of lock. (old code uses
  18. * both 'owner' and 'pid')
  19. */
  20. v |= (1ULL << 63);
  21. return v;
  22. }
  23. void __init ceph_flock_init(void)
  24. {
  25. get_random_bytes(&lock_secret, sizeof(lock_secret));
  26. }
  27. static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
  28. {
  29. struct ceph_file_info *fi = dst->fl_file->private_data;
  30. struct inode *inode = file_inode(dst->fl_file);
  31. atomic_inc(&ceph_inode(inode)->i_filelock_ref);
  32. atomic_inc(&fi->num_locks);
  33. }
  34. static void ceph_fl_release_lock(struct file_lock *fl)
  35. {
  36. struct ceph_file_info *fi = fl->fl_file->private_data;
  37. struct inode *inode = file_inode(fl->fl_file);
  38. struct ceph_inode_info *ci = ceph_inode(inode);
  39. atomic_dec(&fi->num_locks);
  40. if (atomic_dec_and_test(&ci->i_filelock_ref)) {
  41. /* clear error when all locks are released */
  42. spin_lock(&ci->i_ceph_lock);
  43. ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
  44. spin_unlock(&ci->i_ceph_lock);
  45. }
  46. }
  47. static const struct file_lock_operations ceph_fl_lock_ops = {
  48. .fl_copy_lock = ceph_fl_copy_lock,
  49. .fl_release_private = ceph_fl_release_lock,
  50. };
  51. /**
  52. * Implement fcntl and flock locking functions.
  53. */
  54. static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
  55. int cmd, u8 wait, struct file_lock *fl)
  56. {
  57. struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
  58. struct ceph_mds_request *req;
  59. int err;
  60. u64 length = 0;
  61. u64 owner;
  62. if (operation == CEPH_MDS_OP_SETFILELOCK) {
  63. /*
  64. * increasing i_filelock_ref closes race window between
  65. * handling request reply and adding file_lock struct to
  66. * inode. Otherwise, auth caps may get trimmed in the
  67. * window. Caller function will decrease the counter.
  68. */
  69. fl->fl_ops = &ceph_fl_lock_ops;
  70. fl->fl_ops->fl_copy_lock(fl, NULL);
  71. }
  72. if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
  73. wait = 0;
  74. req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
  75. if (IS_ERR(req))
  76. return PTR_ERR(req);
  77. req->r_inode = inode;
  78. ihold(inode);
  79. req->r_num_caps = 1;
  80. /* mds requires start and length rather than start and end */
  81. if (LLONG_MAX == fl->fl_end)
  82. length = 0;
  83. else
  84. length = fl->fl_end - fl->fl_start + 1;
  85. owner = secure_addr(fl->fl_owner);
  86. dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
  87. "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type,
  88. (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
  89. wait, fl->fl_type);
  90. req->r_args.filelock_change.rule = lock_type;
  91. req->r_args.filelock_change.type = cmd;
  92. req->r_args.filelock_change.owner = cpu_to_le64(owner);
  93. req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
  94. req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
  95. req->r_args.filelock_change.length = cpu_to_le64(length);
  96. req->r_args.filelock_change.wait = wait;
  97. if (wait)
  98. req->r_wait_for_completion = ceph_lock_wait_for_completion;
  99. err = ceph_mdsc_do_request(mdsc, inode, req);
  100. if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
  101. fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
  102. if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
  103. fl->fl_type = F_RDLCK;
  104. else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
  105. fl->fl_type = F_WRLCK;
  106. else
  107. fl->fl_type = F_UNLCK;
  108. fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
  109. length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
  110. le64_to_cpu(req->r_reply_info.filelock_reply->length);
  111. if (length >= 1)
  112. fl->fl_end = length -1;
  113. else
  114. fl->fl_end = 0;
  115. }
  116. ceph_mdsc_put_request(req);
  117. dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
  118. "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type,
  119. (int)operation, (u64)fl->fl_pid, fl->fl_start,
  120. length, wait, fl->fl_type, err);
  121. return err;
  122. }
  123. static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
  124. struct ceph_mds_request *req)
  125. {
  126. struct ceph_mds_request *intr_req;
  127. struct inode *inode = req->r_inode;
  128. int err, lock_type;
  129. BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK);
  130. if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL)
  131. lock_type = CEPH_LOCK_FCNTL_INTR;
  132. else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK)
  133. lock_type = CEPH_LOCK_FLOCK_INTR;
  134. else
  135. BUG_ON(1);
  136. BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK);
  137. err = wait_for_completion_interruptible(&req->r_completion);
  138. if (!err)
  139. return 0;
  140. dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
  141. req->r_tid);
  142. mutex_lock(&mdsc->mutex);
  143. if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
  144. err = 0;
  145. } else {
  146. /*
  147. * ensure we aren't running concurrently with
  148. * ceph_fill_trace or ceph_readdir_prepopulate, which
  149. * rely on locks (dir mutex) held by our caller.
  150. */
  151. mutex_lock(&req->r_fill_mutex);
  152. req->r_err = err;
  153. set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
  154. mutex_unlock(&req->r_fill_mutex);
  155. if (!req->r_session) {
  156. // haven't sent the request
  157. err = 0;
  158. }
  159. }
  160. mutex_unlock(&mdsc->mutex);
  161. if (!err)
  162. return 0;
  163. intr_req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETFILELOCK,
  164. USE_AUTH_MDS);
  165. if (IS_ERR(intr_req))
  166. return PTR_ERR(intr_req);
  167. intr_req->r_inode = inode;
  168. ihold(inode);
  169. intr_req->r_num_caps = 1;
  170. intr_req->r_args.filelock_change = req->r_args.filelock_change;
  171. intr_req->r_args.filelock_change.rule = lock_type;
  172. intr_req->r_args.filelock_change.type = CEPH_LOCK_UNLOCK;
  173. err = ceph_mdsc_do_request(mdsc, inode, intr_req);
  174. ceph_mdsc_put_request(intr_req);
  175. if (err && err != -ERESTARTSYS)
  176. return err;
  177. wait_for_completion_killable(&req->r_safe_completion);
  178. return 0;
  179. }
  180. static int try_unlock_file(struct file *file, struct file_lock *fl)
  181. {
  182. int err;
  183. unsigned int orig_flags = fl->fl_flags;
  184. fl->fl_flags |= FL_EXISTS;
  185. err = locks_lock_file_wait(file, fl);
  186. fl->fl_flags = orig_flags;
  187. if (err == -ENOENT) {
  188. if (!(orig_flags & FL_EXISTS))
  189. err = 0;
  190. return err;
  191. }
  192. return 1;
  193. }
  194. /**
  195. * Attempt to set an fcntl lock.
  196. * For now, this just goes away to the server. Later it may be more awesome.
  197. */
  198. int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
  199. {
  200. struct inode *inode = file_inode(file);
  201. struct ceph_inode_info *ci = ceph_inode(inode);
  202. int err = 0;
  203. u16 op = CEPH_MDS_OP_SETFILELOCK;
  204. u8 wait = 0;
  205. u8 lock_cmd;
  206. if (!(fl->fl_flags & FL_POSIX))
  207. return -ENOLCK;
  208. /* No mandatory locks */
  209. if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK)
  210. return -ENOLCK;
  211. dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
  212. /* set wait bit as appropriate, then make command as Ceph expects it*/
  213. if (IS_GETLK(cmd))
  214. op = CEPH_MDS_OP_GETFILELOCK;
  215. else if (IS_SETLKW(cmd))
  216. wait = 1;
  217. spin_lock(&ci->i_ceph_lock);
  218. if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
  219. err = -EIO;
  220. }
  221. spin_unlock(&ci->i_ceph_lock);
  222. if (err < 0) {
  223. if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type)
  224. posix_lock_file(file, fl, NULL);
  225. return err;
  226. }
  227. if (F_RDLCK == fl->fl_type)
  228. lock_cmd = CEPH_LOCK_SHARED;
  229. else if (F_WRLCK == fl->fl_type)
  230. lock_cmd = CEPH_LOCK_EXCL;
  231. else
  232. lock_cmd = CEPH_LOCK_UNLOCK;
  233. if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) {
  234. err = try_unlock_file(file, fl);
  235. if (err <= 0)
  236. return err;
  237. }
  238. err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
  239. if (!err) {
  240. if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) {
  241. dout("mds locked, locking locally\n");
  242. err = posix_lock_file(file, fl, NULL);
  243. if (err) {
  244. /* undo! This should only happen if
  245. * the kernel detects local
  246. * deadlock. */
  247. ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
  248. CEPH_LOCK_UNLOCK, 0, fl);
  249. dout("got %d on posix_lock_file, undid lock\n",
  250. err);
  251. }
  252. }
  253. }
  254. return err;
  255. }
  256. int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
  257. {
  258. struct inode *inode = file_inode(file);
  259. struct ceph_inode_info *ci = ceph_inode(inode);
  260. int err = 0;
  261. u8 wait = 0;
  262. u8 lock_cmd;
  263. if (!(fl->fl_flags & FL_FLOCK))
  264. return -ENOLCK;
  265. /* No mandatory locks */
  266. if (fl->fl_type & LOCK_MAND)
  267. return -EOPNOTSUPP;
  268. dout("ceph_flock, fl_file: %p\n", fl->fl_file);
  269. spin_lock(&ci->i_ceph_lock);
  270. if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
  271. err = -EIO;
  272. }
  273. spin_unlock(&ci->i_ceph_lock);
  274. if (err < 0) {
  275. if (F_UNLCK == fl->fl_type)
  276. locks_lock_file_wait(file, fl);
  277. return err;
  278. }
  279. if (IS_SETLKW(cmd))
  280. wait = 1;
  281. if (F_RDLCK == fl->fl_type)
  282. lock_cmd = CEPH_LOCK_SHARED;
  283. else if (F_WRLCK == fl->fl_type)
  284. lock_cmd = CEPH_LOCK_EXCL;
  285. else
  286. lock_cmd = CEPH_LOCK_UNLOCK;
  287. if (F_UNLCK == fl->fl_type) {
  288. err = try_unlock_file(file, fl);
  289. if (err <= 0)
  290. return err;
  291. }
  292. err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
  293. inode, lock_cmd, wait, fl);
  294. if (!err && F_UNLCK != fl->fl_type) {
  295. err = locks_lock_file_wait(file, fl);
  296. if (err) {
  297. ceph_lock_message(CEPH_LOCK_FLOCK,
  298. CEPH_MDS_OP_SETFILELOCK,
  299. inode, CEPH_LOCK_UNLOCK, 0, fl);
  300. dout("got %d on locks_lock_file_wait, undid lock\n", err);
  301. }
  302. }
  303. return err;
  304. }
  305. /*
  306. * Fills in the passed counter variables, so you can prepare pagelist metadata
  307. * before calling ceph_encode_locks.
  308. */
  309. void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
  310. {
  311. struct file_lock *lock;
  312. struct file_lock_context *ctx;
  313. *fcntl_count = 0;
  314. *flock_count = 0;
  315. ctx = inode->i_flctx;
  316. if (ctx) {
  317. spin_lock(&ctx->flc_lock);
  318. list_for_each_entry(lock, &ctx->flc_posix, fl_list)
  319. ++(*fcntl_count);
  320. list_for_each_entry(lock, &ctx->flc_flock, fl_list)
  321. ++(*flock_count);
  322. spin_unlock(&ctx->flc_lock);
  323. }
  324. dout("counted %d flock locks and %d fcntl locks\n",
  325. *flock_count, *fcntl_count);
  326. }
  327. /*
  328. * Given a pointer to a lock, convert it to a ceph filelock
  329. */
  330. static int lock_to_ceph_filelock(struct file_lock *lock,
  331. struct ceph_filelock *cephlock)
  332. {
  333. int err = 0;
  334. cephlock->start = cpu_to_le64(lock->fl_start);
  335. cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
  336. cephlock->client = cpu_to_le64(0);
  337. cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
  338. cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
  339. switch (lock->fl_type) {
  340. case F_RDLCK:
  341. cephlock->type = CEPH_LOCK_SHARED;
  342. break;
  343. case F_WRLCK:
  344. cephlock->type = CEPH_LOCK_EXCL;
  345. break;
  346. case F_UNLCK:
  347. cephlock->type = CEPH_LOCK_UNLOCK;
  348. break;
  349. default:
  350. dout("Have unknown lock type %d\n", lock->fl_type);
  351. err = -EINVAL;
  352. }
  353. return err;
  354. }
  355. /**
  356. * Encode the flock and fcntl locks for the given inode into the ceph_filelock
  357. * array. Must be called with inode->i_lock already held.
  358. * If we encounter more of a specific lock type than expected, return -ENOSPC.
  359. */
  360. int ceph_encode_locks_to_buffer(struct inode *inode,
  361. struct ceph_filelock *flocks,
  362. int num_fcntl_locks, int num_flock_locks)
  363. {
  364. struct file_lock *lock;
  365. struct file_lock_context *ctx = inode->i_flctx;
  366. int err = 0;
  367. int seen_fcntl = 0;
  368. int seen_flock = 0;
  369. int l = 0;
  370. dout("encoding %d flock and %d fcntl locks\n", num_flock_locks,
  371. num_fcntl_locks);
  372. if (!ctx)
  373. return 0;
  374. spin_lock(&ctx->flc_lock);
  375. list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
  376. ++seen_fcntl;
  377. if (seen_fcntl > num_fcntl_locks) {
  378. err = -ENOSPC;
  379. goto fail;
  380. }
  381. err = lock_to_ceph_filelock(lock, &flocks[l]);
  382. if (err)
  383. goto fail;
  384. ++l;
  385. }
  386. list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
  387. ++seen_flock;
  388. if (seen_flock > num_flock_locks) {
  389. err = -ENOSPC;
  390. goto fail;
  391. }
  392. err = lock_to_ceph_filelock(lock, &flocks[l]);
  393. if (err)
  394. goto fail;
  395. ++l;
  396. }
  397. fail:
  398. spin_unlock(&ctx->flc_lock);
  399. return err;
  400. }
  401. /**
  402. * Copy the encoded flock and fcntl locks into the pagelist.
  403. * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
  404. * sequential flock locks.
  405. * Returns zero on success.
  406. */
  407. int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
  408. struct ceph_pagelist *pagelist,
  409. int num_fcntl_locks, int num_flock_locks)
  410. {
  411. int err = 0;
  412. __le32 nlocks;
  413. nlocks = cpu_to_le32(num_fcntl_locks);
  414. err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
  415. if (err)
  416. goto out_fail;
  417. if (num_fcntl_locks > 0) {
  418. err = ceph_pagelist_append(pagelist, flocks,
  419. num_fcntl_locks * sizeof(*flocks));
  420. if (err)
  421. goto out_fail;
  422. }
  423. nlocks = cpu_to_le32(num_flock_locks);
  424. err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
  425. if (err)
  426. goto out_fail;
  427. if (num_flock_locks > 0) {
  428. err = ceph_pagelist_append(pagelist, &flocks[num_fcntl_locks],
  429. num_flock_locks * sizeof(*flocks));
  430. }
  431. out_fail:
  432. return err;
  433. }