xattr.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2017-2018 HUAWEI, Inc.
  4. * https://www.huawei.com/
  5. * Created by Gao Xiang <gaoxiang25@huawei.com>
  6. */
  7. #include <linux/security.h>
  8. #include "xattr.h"
  9. struct xattr_iter {
  10. struct super_block *sb;
  11. struct page *page;
  12. void *kaddr;
  13. erofs_blk_t blkaddr;
  14. unsigned int ofs;
  15. };
  16. static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
  17. {
  18. /* the only user of kunmap() is 'init_inode_xattrs' */
  19. if (!atomic)
  20. kunmap(it->page);
  21. else
  22. kunmap_atomic(it->kaddr);
  23. unlock_page(it->page);
  24. put_page(it->page);
  25. }
  26. static inline void xattr_iter_end_final(struct xattr_iter *it)
  27. {
  28. if (!it->page)
  29. return;
  30. xattr_iter_end(it, true);
  31. }
  32. static int init_inode_xattrs(struct inode *inode)
  33. {
  34. struct erofs_inode *const vi = EROFS_I(inode);
  35. struct xattr_iter it;
  36. unsigned int i;
  37. struct erofs_xattr_ibody_header *ih;
  38. struct super_block *sb;
  39. struct erofs_sb_info *sbi;
  40. bool atomic_map;
  41. int ret = 0;
  42. /* the most case is that xattrs of this inode are initialized. */
  43. if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
  44. /*
  45. * paired with smp_mb() at the end of the function to ensure
  46. * fields will only be observed after the bit is set.
  47. */
  48. smp_mb();
  49. return 0;
  50. }
  51. if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
  52. return -ERESTARTSYS;
  53. /* someone has initialized xattrs for us? */
  54. if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
  55. goto out_unlock;
  56. /*
  57. * bypass all xattr operations if ->xattr_isize is not greater than
  58. * sizeof(struct erofs_xattr_ibody_header), in detail:
  59. * 1) it is not enough to contain erofs_xattr_ibody_header then
  60. * ->xattr_isize should be 0 (it means no xattr);
  61. * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
  62. * undefined right now (maybe use later with some new sb feature).
  63. */
  64. if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
  65. erofs_err(inode->i_sb,
  66. "xattr_isize %d of nid %llu is not supported yet",
  67. vi->xattr_isize, vi->nid);
  68. ret = -EOPNOTSUPP;
  69. goto out_unlock;
  70. } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
  71. if (vi->xattr_isize) {
  72. erofs_err(inode->i_sb,
  73. "bogus xattr ibody @ nid %llu", vi->nid);
  74. DBG_BUGON(1);
  75. ret = -EFSCORRUPTED;
  76. goto out_unlock; /* xattr ondisk layout error */
  77. }
  78. ret = -ENOATTR;
  79. goto out_unlock;
  80. }
  81. sb = inode->i_sb;
  82. sbi = EROFS_SB(sb);
  83. it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
  84. it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
  85. it.page = erofs_get_meta_page(sb, it.blkaddr);
  86. if (IS_ERR(it.page)) {
  87. ret = PTR_ERR(it.page);
  88. goto out_unlock;
  89. }
  90. /* read in shared xattr array (non-atomic, see kmalloc below) */
  91. it.kaddr = kmap(it.page);
  92. atomic_map = false;
  93. ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
  94. vi->xattr_shared_count = ih->h_shared_count;
  95. vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
  96. sizeof(uint), GFP_KERNEL);
  97. if (!vi->xattr_shared_xattrs) {
  98. xattr_iter_end(&it, atomic_map);
  99. ret = -ENOMEM;
  100. goto out_unlock;
  101. }
  102. /* let's skip ibody header */
  103. it.ofs += sizeof(struct erofs_xattr_ibody_header);
  104. for (i = 0; i < vi->xattr_shared_count; ++i) {
  105. if (it.ofs >= EROFS_BLKSIZ) {
  106. /* cannot be unaligned */
  107. DBG_BUGON(it.ofs != EROFS_BLKSIZ);
  108. xattr_iter_end(&it, atomic_map);
  109. it.page = erofs_get_meta_page(sb, ++it.blkaddr);
  110. if (IS_ERR(it.page)) {
  111. kfree(vi->xattr_shared_xattrs);
  112. vi->xattr_shared_xattrs = NULL;
  113. ret = PTR_ERR(it.page);
  114. goto out_unlock;
  115. }
  116. it.kaddr = kmap_atomic(it.page);
  117. atomic_map = true;
  118. it.ofs = 0;
  119. }
  120. vi->xattr_shared_xattrs[i] =
  121. le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
  122. it.ofs += sizeof(__le32);
  123. }
  124. xattr_iter_end(&it, atomic_map);
  125. /* paired with smp_mb() at the beginning of the function. */
  126. smp_mb();
  127. set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
  128. out_unlock:
  129. clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
  130. return ret;
  131. }
  132. /*
  133. * the general idea for these return values is
  134. * if 0 is returned, go on processing the current xattr;
  135. * 1 (> 0) is returned, skip this round to process the next xattr;
  136. * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
  137. * and need to be handled
  138. */
  139. struct xattr_iter_handlers {
  140. int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
  141. int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
  142. unsigned int len);
  143. int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
  144. void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
  145. unsigned int len);
  146. };
  147. static inline int xattr_iter_fixup(struct xattr_iter *it)
  148. {
  149. if (it->ofs < EROFS_BLKSIZ)
  150. return 0;
  151. xattr_iter_end(it, true);
  152. it->blkaddr += erofs_blknr(it->ofs);
  153. it->page = erofs_get_meta_page(it->sb, it->blkaddr);
  154. if (IS_ERR(it->page)) {
  155. int err = PTR_ERR(it->page);
  156. it->page = NULL;
  157. return err;
  158. }
  159. it->kaddr = kmap_atomic(it->page);
  160. it->ofs = erofs_blkoff(it->ofs);
  161. return 0;
  162. }
  163. static int inline_xattr_iter_begin(struct xattr_iter *it,
  164. struct inode *inode)
  165. {
  166. struct erofs_inode *const vi = EROFS_I(inode);
  167. struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
  168. unsigned int xattr_header_sz, inline_xattr_ofs;
  169. xattr_header_sz = inlinexattr_header_size(inode);
  170. if (xattr_header_sz >= vi->xattr_isize) {
  171. DBG_BUGON(xattr_header_sz > vi->xattr_isize);
  172. return -ENOATTR;
  173. }
  174. inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
  175. it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
  176. it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
  177. it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
  178. if (IS_ERR(it->page))
  179. return PTR_ERR(it->page);
  180. it->kaddr = kmap_atomic(it->page);
  181. return vi->xattr_isize - xattr_header_sz;
  182. }
  183. /*
  184. * Regardless of success or failure, `xattr_foreach' will end up with
  185. * `ofs' pointing to the next xattr item rather than an arbitrary position.
  186. */
  187. static int xattr_foreach(struct xattr_iter *it,
  188. const struct xattr_iter_handlers *op,
  189. unsigned int *tlimit)
  190. {
  191. struct erofs_xattr_entry entry;
  192. unsigned int value_sz, processed, slice;
  193. int err;
  194. /* 0. fixup blkaddr, ofs, ipage */
  195. err = xattr_iter_fixup(it);
  196. if (err)
  197. return err;
  198. /*
  199. * 1. read xattr entry to the memory,
  200. * since we do EROFS_XATTR_ALIGN
  201. * therefore entry should be in the page
  202. */
  203. entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
  204. if (tlimit) {
  205. unsigned int entry_sz = erofs_xattr_entry_size(&entry);
  206. /* xattr on-disk corruption: xattr entry beyond xattr_isize */
  207. if (*tlimit < entry_sz) {
  208. DBG_BUGON(1);
  209. return -EFSCORRUPTED;
  210. }
  211. *tlimit -= entry_sz;
  212. }
  213. it->ofs += sizeof(struct erofs_xattr_entry);
  214. value_sz = le16_to_cpu(entry.e_value_size);
  215. /* handle entry */
  216. err = op->entry(it, &entry);
  217. if (err) {
  218. it->ofs += entry.e_name_len + value_sz;
  219. goto out;
  220. }
  221. /* 2. handle xattr name (ofs will finally be at the end of name) */
  222. processed = 0;
  223. while (processed < entry.e_name_len) {
  224. if (it->ofs >= EROFS_BLKSIZ) {
  225. DBG_BUGON(it->ofs > EROFS_BLKSIZ);
  226. err = xattr_iter_fixup(it);
  227. if (err)
  228. goto out;
  229. it->ofs = 0;
  230. }
  231. slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
  232. entry.e_name_len - processed);
  233. /* handle name */
  234. err = op->name(it, processed, it->kaddr + it->ofs, slice);
  235. if (err) {
  236. it->ofs += entry.e_name_len - processed + value_sz;
  237. goto out;
  238. }
  239. it->ofs += slice;
  240. processed += slice;
  241. }
  242. /* 3. handle xattr value */
  243. processed = 0;
  244. if (op->alloc_buffer) {
  245. err = op->alloc_buffer(it, value_sz);
  246. if (err) {
  247. it->ofs += value_sz;
  248. goto out;
  249. }
  250. }
  251. while (processed < value_sz) {
  252. if (it->ofs >= EROFS_BLKSIZ) {
  253. DBG_BUGON(it->ofs > EROFS_BLKSIZ);
  254. err = xattr_iter_fixup(it);
  255. if (err)
  256. goto out;
  257. it->ofs = 0;
  258. }
  259. slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
  260. value_sz - processed);
  261. op->value(it, processed, it->kaddr + it->ofs, slice);
  262. it->ofs += slice;
  263. processed += slice;
  264. }
  265. out:
  266. /* xattrs should be 4-byte aligned (on-disk constraint) */
  267. it->ofs = EROFS_XATTR_ALIGN(it->ofs);
  268. return err < 0 ? err : 0;
  269. }
  270. struct getxattr_iter {
  271. struct xattr_iter it;
  272. char *buffer;
  273. int buffer_size, index;
  274. struct qstr name;
  275. };
  276. static int xattr_entrymatch(struct xattr_iter *_it,
  277. struct erofs_xattr_entry *entry)
  278. {
  279. struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
  280. return (it->index != entry->e_name_index ||
  281. it->name.len != entry->e_name_len) ? -ENOATTR : 0;
  282. }
  283. static int xattr_namematch(struct xattr_iter *_it,
  284. unsigned int processed, char *buf, unsigned int len)
  285. {
  286. struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
  287. return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
  288. }
  289. static int xattr_checkbuffer(struct xattr_iter *_it,
  290. unsigned int value_sz)
  291. {
  292. struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
  293. int err = it->buffer_size < value_sz ? -ERANGE : 0;
  294. it->buffer_size = value_sz;
  295. return !it->buffer ? 1 : err;
  296. }
  297. static void xattr_copyvalue(struct xattr_iter *_it,
  298. unsigned int processed,
  299. char *buf, unsigned int len)
  300. {
  301. struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
  302. memcpy(it->buffer + processed, buf, len);
  303. }
  304. static const struct xattr_iter_handlers find_xattr_handlers = {
  305. .entry = xattr_entrymatch,
  306. .name = xattr_namematch,
  307. .alloc_buffer = xattr_checkbuffer,
  308. .value = xattr_copyvalue
  309. };
  310. static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
  311. {
  312. int ret;
  313. unsigned int remaining;
  314. ret = inline_xattr_iter_begin(&it->it, inode);
  315. if (ret < 0)
  316. return ret;
  317. remaining = ret;
  318. while (remaining) {
  319. ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
  320. if (ret != -ENOATTR)
  321. break;
  322. }
  323. xattr_iter_end_final(&it->it);
  324. return ret ? ret : it->buffer_size;
  325. }
  326. static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
  327. {
  328. struct erofs_inode *const vi = EROFS_I(inode);
  329. struct super_block *const sb = inode->i_sb;
  330. struct erofs_sb_info *const sbi = EROFS_SB(sb);
  331. unsigned int i;
  332. int ret = -ENOATTR;
  333. for (i = 0; i < vi->xattr_shared_count; ++i) {
  334. erofs_blk_t blkaddr =
  335. xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
  336. it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
  337. if (!i || blkaddr != it->it.blkaddr) {
  338. if (i)
  339. xattr_iter_end(&it->it, true);
  340. it->it.page = erofs_get_meta_page(sb, blkaddr);
  341. if (IS_ERR(it->it.page))
  342. return PTR_ERR(it->it.page);
  343. it->it.kaddr = kmap_atomic(it->it.page);
  344. it->it.blkaddr = blkaddr;
  345. }
  346. ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
  347. if (ret != -ENOATTR)
  348. break;
  349. }
  350. if (vi->xattr_shared_count)
  351. xattr_iter_end_final(&it->it);
  352. return ret ? ret : it->buffer_size;
  353. }
  354. static bool erofs_xattr_user_list(struct dentry *dentry)
  355. {
  356. return test_opt(&EROFS_SB(dentry->d_sb)->ctx, XATTR_USER);
  357. }
  358. static bool erofs_xattr_trusted_list(struct dentry *dentry)
  359. {
  360. return capable(CAP_SYS_ADMIN);
  361. }
  362. int erofs_getxattr(struct inode *inode, int index,
  363. const char *name,
  364. void *buffer, size_t buffer_size)
  365. {
  366. int ret;
  367. struct getxattr_iter it;
  368. if (!name)
  369. return -EINVAL;
  370. ret = init_inode_xattrs(inode);
  371. if (ret)
  372. return ret;
  373. it.index = index;
  374. it.name.len = strlen(name);
  375. if (it.name.len > EROFS_NAME_LEN)
  376. return -ERANGE;
  377. it.name.name = name;
  378. it.buffer = buffer;
  379. it.buffer_size = buffer_size;
  380. it.it.sb = inode->i_sb;
  381. ret = inline_getxattr(inode, &it);
  382. if (ret == -ENOATTR)
  383. ret = shared_getxattr(inode, &it);
  384. return ret;
  385. }
  386. static int erofs_xattr_generic_get(const struct xattr_handler *handler,
  387. struct dentry *unused, struct inode *inode,
  388. const char *name, void *buffer, size_t size,
  389. int flags)
  390. {
  391. struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
  392. switch (handler->flags) {
  393. case EROFS_XATTR_INDEX_USER:
  394. if (!test_opt(&sbi->ctx, XATTR_USER))
  395. return -EOPNOTSUPP;
  396. break;
  397. case EROFS_XATTR_INDEX_TRUSTED:
  398. break;
  399. case EROFS_XATTR_INDEX_SECURITY:
  400. break;
  401. default:
  402. return -EINVAL;
  403. }
  404. return erofs_getxattr(inode, handler->flags, name, buffer, size);
  405. }
  406. const struct xattr_handler erofs_xattr_user_handler = {
  407. .prefix = XATTR_USER_PREFIX,
  408. .flags = EROFS_XATTR_INDEX_USER,
  409. .list = erofs_xattr_user_list,
  410. .get = erofs_xattr_generic_get,
  411. };
  412. const struct xattr_handler erofs_xattr_trusted_handler = {
  413. .prefix = XATTR_TRUSTED_PREFIX,
  414. .flags = EROFS_XATTR_INDEX_TRUSTED,
  415. .list = erofs_xattr_trusted_list,
  416. .get = erofs_xattr_generic_get,
  417. };
  418. #ifdef CONFIG_EROFS_FS_SECURITY
  419. const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
  420. .prefix = XATTR_SECURITY_PREFIX,
  421. .flags = EROFS_XATTR_INDEX_SECURITY,
  422. .get = erofs_xattr_generic_get,
  423. };
  424. #endif
  425. const struct xattr_handler *erofs_xattr_handlers[] = {
  426. &erofs_xattr_user_handler,
  427. #ifdef CONFIG_EROFS_FS_POSIX_ACL
  428. &posix_acl_access_xattr_handler,
  429. &posix_acl_default_xattr_handler,
  430. #endif
  431. &erofs_xattr_trusted_handler,
  432. #ifdef CONFIG_EROFS_FS_SECURITY
  433. &erofs_xattr_security_handler,
  434. #endif
  435. NULL,
  436. };
  437. struct listxattr_iter {
  438. struct xattr_iter it;
  439. struct dentry *dentry;
  440. char *buffer;
  441. int buffer_size, buffer_ofs;
  442. };
  443. static int xattr_entrylist(struct xattr_iter *_it,
  444. struct erofs_xattr_entry *entry)
  445. {
  446. struct listxattr_iter *it =
  447. container_of(_it, struct listxattr_iter, it);
  448. unsigned int prefix_len;
  449. const char *prefix;
  450. const struct xattr_handler *h =
  451. erofs_xattr_handler(entry->e_name_index);
  452. if (!h || (h->list && !h->list(it->dentry)))
  453. return 1;
  454. prefix = xattr_prefix(h);
  455. prefix_len = strlen(prefix);
  456. if (!it->buffer) {
  457. it->buffer_ofs += prefix_len + entry->e_name_len + 1;
  458. return 1;
  459. }
  460. if (it->buffer_ofs + prefix_len
  461. + entry->e_name_len + 1 > it->buffer_size)
  462. return -ERANGE;
  463. memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
  464. it->buffer_ofs += prefix_len;
  465. return 0;
  466. }
  467. static int xattr_namelist(struct xattr_iter *_it,
  468. unsigned int processed, char *buf, unsigned int len)
  469. {
  470. struct listxattr_iter *it =
  471. container_of(_it, struct listxattr_iter, it);
  472. memcpy(it->buffer + it->buffer_ofs, buf, len);
  473. it->buffer_ofs += len;
  474. return 0;
  475. }
  476. static int xattr_skipvalue(struct xattr_iter *_it,
  477. unsigned int value_sz)
  478. {
  479. struct listxattr_iter *it =
  480. container_of(_it, struct listxattr_iter, it);
  481. it->buffer[it->buffer_ofs++] = '\0';
  482. return 1;
  483. }
  484. static const struct xattr_iter_handlers list_xattr_handlers = {
  485. .entry = xattr_entrylist,
  486. .name = xattr_namelist,
  487. .alloc_buffer = xattr_skipvalue,
  488. .value = NULL
  489. };
  490. static int inline_listxattr(struct listxattr_iter *it)
  491. {
  492. int ret;
  493. unsigned int remaining;
  494. ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
  495. if (ret < 0)
  496. return ret;
  497. remaining = ret;
  498. while (remaining) {
  499. ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
  500. if (ret)
  501. break;
  502. }
  503. xattr_iter_end_final(&it->it);
  504. return ret ? ret : it->buffer_ofs;
  505. }
  506. static int shared_listxattr(struct listxattr_iter *it)
  507. {
  508. struct inode *const inode = d_inode(it->dentry);
  509. struct erofs_inode *const vi = EROFS_I(inode);
  510. struct super_block *const sb = inode->i_sb;
  511. struct erofs_sb_info *const sbi = EROFS_SB(sb);
  512. unsigned int i;
  513. int ret = 0;
  514. for (i = 0; i < vi->xattr_shared_count; ++i) {
  515. erofs_blk_t blkaddr =
  516. xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
  517. it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
  518. if (!i || blkaddr != it->it.blkaddr) {
  519. if (i)
  520. xattr_iter_end(&it->it, true);
  521. it->it.page = erofs_get_meta_page(sb, blkaddr);
  522. if (IS_ERR(it->it.page))
  523. return PTR_ERR(it->it.page);
  524. it->it.kaddr = kmap_atomic(it->it.page);
  525. it->it.blkaddr = blkaddr;
  526. }
  527. ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
  528. if (ret)
  529. break;
  530. }
  531. if (vi->xattr_shared_count)
  532. xattr_iter_end_final(&it->it);
  533. return ret ? ret : it->buffer_ofs;
  534. }
  535. ssize_t erofs_listxattr(struct dentry *dentry,
  536. char *buffer, size_t buffer_size)
  537. {
  538. int ret;
  539. struct listxattr_iter it;
  540. ret = init_inode_xattrs(d_inode(dentry));
  541. if (ret == -ENOATTR)
  542. return 0;
  543. if (ret)
  544. return ret;
  545. it.dentry = dentry;
  546. it.buffer = buffer;
  547. it.buffer_size = buffer_size;
  548. it.buffer_ofs = 0;
  549. it.it.sb = dentry->d_sb;
  550. ret = inline_listxattr(&it);
  551. if (ret < 0 && ret != -ENOATTR)
  552. return ret;
  553. return shared_listxattr(&it);
  554. }
  555. #ifdef CONFIG_EROFS_FS_POSIX_ACL
  556. struct posix_acl *erofs_get_acl(struct inode *inode, int type)
  557. {
  558. struct posix_acl *acl;
  559. int prefix, rc;
  560. char *value = NULL;
  561. switch (type) {
  562. case ACL_TYPE_ACCESS:
  563. prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
  564. break;
  565. case ACL_TYPE_DEFAULT:
  566. prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
  567. break;
  568. default:
  569. return ERR_PTR(-EINVAL);
  570. }
  571. rc = erofs_getxattr(inode, prefix, "", NULL, 0);
  572. if (rc > 0) {
  573. value = kmalloc(rc, GFP_KERNEL);
  574. if (!value)
  575. return ERR_PTR(-ENOMEM);
  576. rc = erofs_getxattr(inode, prefix, "", value, rc);
  577. }
  578. if (rc == -ENOATTR)
  579. acl = NULL;
  580. else if (rc < 0)
  581. acl = ERR_PTR(rc);
  582. else
  583. acl = posix_acl_from_xattr(&init_user_ns, value, rc);
  584. kfree(value);
  585. return acl;
  586. }
  587. #endif