audit_tree.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "audit.h"
  3. #include <linux/fsnotify_backend.h>
  4. #include <linux/namei.h>
  5. #include <linux/mount.h>
  6. #include <linux/kthread.h>
  7. #include <linux/refcount.h>
  8. #include <linux/slab.h>
  9. struct audit_tree;
  10. struct audit_chunk;
  11. struct audit_tree {
  12. refcount_t count;
  13. int goner;
  14. struct audit_chunk *root;
  15. struct list_head chunks;
  16. struct list_head rules;
  17. struct list_head list;
  18. struct list_head same_root;
  19. struct rcu_head head;
  20. char pathname[];
  21. };
  22. struct audit_chunk {
  23. struct list_head hash;
  24. unsigned long key;
  25. struct fsnotify_mark *mark;
  26. struct list_head trees; /* with root here */
  27. int count;
  28. atomic_long_t refs;
  29. struct rcu_head head;
  30. struct node {
  31. struct list_head list;
  32. struct audit_tree *owner;
  33. unsigned index; /* index; upper bit indicates 'will prune' */
  34. } owners[];
  35. };
  36. struct audit_tree_mark {
  37. struct fsnotify_mark mark;
  38. struct audit_chunk *chunk;
  39. };
  40. static LIST_HEAD(tree_list);
  41. static LIST_HEAD(prune_list);
  42. static struct task_struct *prune_thread;
  43. /*
  44. * One struct chunk is attached to each inode of interest through
  45. * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
  46. * untagging, the mark is stable as long as there is chunk attached. The
  47. * association between mark and chunk is protected by hash_lock and
  48. * audit_tree_group->mark_mutex. Thus as long as we hold
  49. * audit_tree_group->mark_mutex and check that the mark is alive by
  50. * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
  51. * the current chunk.
  52. *
  53. * Rules have pointer to struct audit_tree.
  54. * Rules have struct list_head rlist forming a list of rules over
  55. * the same tree.
  56. * References to struct chunk are collected at audit_inode{,_child}()
  57. * time and used in AUDIT_TREE rule matching.
  58. * These references are dropped at the same time we are calling
  59. * audit_free_names(), etc.
  60. *
  61. * Cyclic lists galore:
  62. * tree.chunks anchors chunk.owners[].list hash_lock
  63. * tree.rules anchors rule.rlist audit_filter_mutex
  64. * chunk.trees anchors tree.same_root hash_lock
  65. * chunk.hash is a hash with middle bits of watch.inode as
  66. * a hash function. RCU, hash_lock
  67. *
  68. * tree is refcounted; one reference for "some rules on rules_list refer to
  69. * it", one for each chunk with pointer to it.
  70. *
  71. * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
  72. * one chunk reference. This reference is dropped either when a mark is going
  73. * to be freed (corresponding inode goes away) or when chunk attached to the
  74. * mark gets replaced. This reference must be dropped using
  75. * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
  76. * grace period as it protects RCU readers of the hash table.
  77. *
  78. * node.index allows to get from node.list to containing chunk.
  79. * MSB of that sucker is stolen to mark taggings that we might have to
  80. * revert - several operations have very unpleasant cleanup logics and
  81. * that makes a difference. Some.
  82. */
  83. static struct fsnotify_group *audit_tree_group;
  84. static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
  85. static struct audit_tree *alloc_tree(const char *s)
  86. {
  87. struct audit_tree *tree;
  88. tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
  89. if (tree) {
  90. refcount_set(&tree->count, 1);
  91. tree->goner = 0;
  92. INIT_LIST_HEAD(&tree->chunks);
  93. INIT_LIST_HEAD(&tree->rules);
  94. INIT_LIST_HEAD(&tree->list);
  95. INIT_LIST_HEAD(&tree->same_root);
  96. tree->root = NULL;
  97. strcpy(tree->pathname, s);
  98. }
  99. return tree;
  100. }
  101. static inline void get_tree(struct audit_tree *tree)
  102. {
  103. refcount_inc(&tree->count);
  104. }
  105. static inline void put_tree(struct audit_tree *tree)
  106. {
  107. if (refcount_dec_and_test(&tree->count))
  108. kfree_rcu(tree, head);
  109. }
  110. /* to avoid bringing the entire thing in audit.h */
  111. const char *audit_tree_path(struct audit_tree *tree)
  112. {
  113. return tree->pathname;
  114. }
  115. static void free_chunk(struct audit_chunk *chunk)
  116. {
  117. int i;
  118. for (i = 0; i < chunk->count; i++) {
  119. if (chunk->owners[i].owner)
  120. put_tree(chunk->owners[i].owner);
  121. }
  122. kfree(chunk);
  123. }
  124. void audit_put_chunk(struct audit_chunk *chunk)
  125. {
  126. if (atomic_long_dec_and_test(&chunk->refs))
  127. free_chunk(chunk);
  128. }
  129. static void __put_chunk(struct rcu_head *rcu)
  130. {
  131. struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
  132. audit_put_chunk(chunk);
  133. }
  134. /*
  135. * Drop reference to the chunk that was held by the mark. This is the reference
  136. * that gets dropped after we've removed the chunk from the hash table and we
  137. * use it to make sure chunk cannot be freed before RCU grace period expires.
  138. */
  139. static void audit_mark_put_chunk(struct audit_chunk *chunk)
  140. {
  141. call_rcu(&chunk->head, __put_chunk);
  142. }
  143. static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
  144. {
  145. return container_of(mark, struct audit_tree_mark, mark);
  146. }
  147. static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
  148. {
  149. return audit_mark(mark)->chunk;
  150. }
  151. static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
  152. {
  153. kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
  154. }
  155. static struct fsnotify_mark *alloc_mark(void)
  156. {
  157. struct audit_tree_mark *amark;
  158. amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
  159. if (!amark)
  160. return NULL;
  161. fsnotify_init_mark(&amark->mark, audit_tree_group);
  162. amark->mark.mask = FS_IN_IGNORED;
  163. return &amark->mark;
  164. }
  165. static struct audit_chunk *alloc_chunk(int count)
  166. {
  167. struct audit_chunk *chunk;
  168. int i;
  169. chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL);
  170. if (!chunk)
  171. return NULL;
  172. INIT_LIST_HEAD(&chunk->hash);
  173. INIT_LIST_HEAD(&chunk->trees);
  174. chunk->count = count;
  175. atomic_long_set(&chunk->refs, 1);
  176. for (i = 0; i < count; i++) {
  177. INIT_LIST_HEAD(&chunk->owners[i].list);
  178. chunk->owners[i].index = i;
  179. }
  180. return chunk;
  181. }
  182. enum {HASH_SIZE = 128};
  183. static struct list_head chunk_hash_heads[HASH_SIZE];
  184. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
  185. /* Function to return search key in our hash from inode. */
  186. static unsigned long inode_to_key(const struct inode *inode)
  187. {
  188. /* Use address pointed to by connector->obj as the key */
  189. return (unsigned long)&inode->i_fsnotify_marks;
  190. }
  191. static inline struct list_head *chunk_hash(unsigned long key)
  192. {
  193. unsigned long n = key / L1_CACHE_BYTES;
  194. return chunk_hash_heads + n % HASH_SIZE;
  195. }
  196. /* hash_lock & mark->group->mark_mutex is held by caller */
  197. static void insert_hash(struct audit_chunk *chunk)
  198. {
  199. struct list_head *list;
  200. /*
  201. * Make sure chunk is fully initialized before making it visible in the
  202. * hash. Pairs with a data dependency barrier in READ_ONCE() in
  203. * audit_tree_lookup().
  204. */
  205. smp_wmb();
  206. WARN_ON_ONCE(!chunk->key);
  207. list = chunk_hash(chunk->key);
  208. list_add_rcu(&chunk->hash, list);
  209. }
  210. /* called under rcu_read_lock */
  211. struct audit_chunk *audit_tree_lookup(const struct inode *inode)
  212. {
  213. unsigned long key = inode_to_key(inode);
  214. struct list_head *list = chunk_hash(key);
  215. struct audit_chunk *p;
  216. list_for_each_entry_rcu(p, list, hash) {
  217. /*
  218. * We use a data dependency barrier in READ_ONCE() to make sure
  219. * the chunk we see is fully initialized.
  220. */
  221. if (READ_ONCE(p->key) == key) {
  222. atomic_long_inc(&p->refs);
  223. return p;
  224. }
  225. }
  226. return NULL;
  227. }
  228. bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
  229. {
  230. int n;
  231. for (n = 0; n < chunk->count; n++)
  232. if (chunk->owners[n].owner == tree)
  233. return true;
  234. return false;
  235. }
  236. /* tagging and untagging inodes with trees */
  237. static struct audit_chunk *find_chunk(struct node *p)
  238. {
  239. int index = p->index & ~(1U<<31);
  240. p -= index;
  241. return container_of(p, struct audit_chunk, owners[0]);
  242. }
  243. static void replace_mark_chunk(struct fsnotify_mark *mark,
  244. struct audit_chunk *chunk)
  245. {
  246. struct audit_chunk *old;
  247. assert_spin_locked(&hash_lock);
  248. old = mark_chunk(mark);
  249. audit_mark(mark)->chunk = chunk;
  250. if (chunk)
  251. chunk->mark = mark;
  252. if (old)
  253. old->mark = NULL;
  254. }
  255. static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
  256. {
  257. struct audit_tree *owner;
  258. int i, j;
  259. new->key = old->key;
  260. list_splice_init(&old->trees, &new->trees);
  261. list_for_each_entry(owner, &new->trees, same_root)
  262. owner->root = new;
  263. for (i = j = 0; j < old->count; i++, j++) {
  264. if (!old->owners[j].owner) {
  265. i--;
  266. continue;
  267. }
  268. owner = old->owners[j].owner;
  269. new->owners[i].owner = owner;
  270. new->owners[i].index = old->owners[j].index - j + i;
  271. if (!owner) /* result of earlier fallback */
  272. continue;
  273. get_tree(owner);
  274. list_replace_init(&old->owners[j].list, &new->owners[i].list);
  275. }
  276. replace_mark_chunk(old->mark, new);
  277. /*
  278. * Make sure chunk is fully initialized before making it visible in the
  279. * hash. Pairs with a data dependency barrier in READ_ONCE() in
  280. * audit_tree_lookup().
  281. */
  282. smp_wmb();
  283. list_replace_rcu(&old->hash, &new->hash);
  284. }
  285. static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
  286. {
  287. struct audit_tree *owner = p->owner;
  288. if (owner->root == chunk) {
  289. list_del_init(&owner->same_root);
  290. owner->root = NULL;
  291. }
  292. list_del_init(&p->list);
  293. p->owner = NULL;
  294. put_tree(owner);
  295. }
  296. static int chunk_count_trees(struct audit_chunk *chunk)
  297. {
  298. int i;
  299. int ret = 0;
  300. for (i = 0; i < chunk->count; i++)
  301. if (chunk->owners[i].owner)
  302. ret++;
  303. return ret;
  304. }
  305. static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
  306. {
  307. struct audit_chunk *new;
  308. int size;
  309. mutex_lock(&audit_tree_group->mark_mutex);
  310. /*
  311. * mark_mutex stabilizes chunk attached to the mark so we can check
  312. * whether it didn't change while we've dropped hash_lock.
  313. */
  314. if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
  315. mark_chunk(mark) != chunk)
  316. goto out_mutex;
  317. size = chunk_count_trees(chunk);
  318. if (!size) {
  319. spin_lock(&hash_lock);
  320. list_del_init(&chunk->trees);
  321. list_del_rcu(&chunk->hash);
  322. replace_mark_chunk(mark, NULL);
  323. spin_unlock(&hash_lock);
  324. fsnotify_detach_mark(mark);
  325. mutex_unlock(&audit_tree_group->mark_mutex);
  326. audit_mark_put_chunk(chunk);
  327. fsnotify_free_mark(mark);
  328. return;
  329. }
  330. new = alloc_chunk(size);
  331. if (!new)
  332. goto out_mutex;
  333. spin_lock(&hash_lock);
  334. /*
  335. * This has to go last when updating chunk as once replace_chunk() is
  336. * called, new RCU readers can see the new chunk.
  337. */
  338. replace_chunk(new, chunk);
  339. spin_unlock(&hash_lock);
  340. mutex_unlock(&audit_tree_group->mark_mutex);
  341. audit_mark_put_chunk(chunk);
  342. return;
  343. out_mutex:
  344. mutex_unlock(&audit_tree_group->mark_mutex);
  345. }
  346. /* Call with group->mark_mutex held, releases it */
  347. static int create_chunk(struct inode *inode, struct audit_tree *tree)
  348. {
  349. struct fsnotify_mark *mark;
  350. struct audit_chunk *chunk = alloc_chunk(1);
  351. if (!chunk) {
  352. mutex_unlock(&audit_tree_group->mark_mutex);
  353. return -ENOMEM;
  354. }
  355. mark = alloc_mark();
  356. if (!mark) {
  357. mutex_unlock(&audit_tree_group->mark_mutex);
  358. kfree(chunk);
  359. return -ENOMEM;
  360. }
  361. if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
  362. mutex_unlock(&audit_tree_group->mark_mutex);
  363. fsnotify_put_mark(mark);
  364. kfree(chunk);
  365. return -ENOSPC;
  366. }
  367. spin_lock(&hash_lock);
  368. if (tree->goner) {
  369. spin_unlock(&hash_lock);
  370. fsnotify_detach_mark(mark);
  371. mutex_unlock(&audit_tree_group->mark_mutex);
  372. fsnotify_free_mark(mark);
  373. fsnotify_put_mark(mark);
  374. kfree(chunk);
  375. return 0;
  376. }
  377. replace_mark_chunk(mark, chunk);
  378. chunk->owners[0].index = (1U << 31);
  379. chunk->owners[0].owner = tree;
  380. get_tree(tree);
  381. list_add(&chunk->owners[0].list, &tree->chunks);
  382. if (!tree->root) {
  383. tree->root = chunk;
  384. list_add(&tree->same_root, &chunk->trees);
  385. }
  386. chunk->key = inode_to_key(inode);
  387. /*
  388. * Inserting into the hash table has to go last as once we do that RCU
  389. * readers can see the chunk.
  390. */
  391. insert_hash(chunk);
  392. spin_unlock(&hash_lock);
  393. mutex_unlock(&audit_tree_group->mark_mutex);
  394. /*
  395. * Drop our initial reference. When mark we point to is getting freed,
  396. * we get notification through ->freeing_mark callback and cleanup
  397. * chunk pointing to this mark.
  398. */
  399. fsnotify_put_mark(mark);
  400. return 0;
  401. }
  402. /* the first tagged inode becomes root of tree */
  403. static int tag_chunk(struct inode *inode, struct audit_tree *tree)
  404. {
  405. struct fsnotify_mark *mark;
  406. struct audit_chunk *chunk, *old;
  407. struct node *p;
  408. int n;
  409. mutex_lock(&audit_tree_group->mark_mutex);
  410. mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
  411. if (!mark)
  412. return create_chunk(inode, tree);
  413. /*
  414. * Found mark is guaranteed to be attached and mark_mutex protects mark
  415. * from getting detached and thus it makes sure there is chunk attached
  416. * to the mark.
  417. */
  418. /* are we already there? */
  419. spin_lock(&hash_lock);
  420. old = mark_chunk(mark);
  421. for (n = 0; n < old->count; n++) {
  422. if (old->owners[n].owner == tree) {
  423. spin_unlock(&hash_lock);
  424. mutex_unlock(&audit_tree_group->mark_mutex);
  425. fsnotify_put_mark(mark);
  426. return 0;
  427. }
  428. }
  429. spin_unlock(&hash_lock);
  430. chunk = alloc_chunk(old->count + 1);
  431. if (!chunk) {
  432. mutex_unlock(&audit_tree_group->mark_mutex);
  433. fsnotify_put_mark(mark);
  434. return -ENOMEM;
  435. }
  436. spin_lock(&hash_lock);
  437. if (tree->goner) {
  438. spin_unlock(&hash_lock);
  439. mutex_unlock(&audit_tree_group->mark_mutex);
  440. fsnotify_put_mark(mark);
  441. kfree(chunk);
  442. return 0;
  443. }
  444. p = &chunk->owners[chunk->count - 1];
  445. p->index = (chunk->count - 1) | (1U<<31);
  446. p->owner = tree;
  447. get_tree(tree);
  448. list_add(&p->list, &tree->chunks);
  449. if (!tree->root) {
  450. tree->root = chunk;
  451. list_add(&tree->same_root, &chunk->trees);
  452. }
  453. /*
  454. * This has to go last when updating chunk as once replace_chunk() is
  455. * called, new RCU readers can see the new chunk.
  456. */
  457. replace_chunk(chunk, old);
  458. spin_unlock(&hash_lock);
  459. mutex_unlock(&audit_tree_group->mark_mutex);
  460. fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
  461. audit_mark_put_chunk(old);
  462. return 0;
  463. }
  464. static void audit_tree_log_remove_rule(struct audit_context *context,
  465. struct audit_krule *rule)
  466. {
  467. struct audit_buffer *ab;
  468. if (!audit_enabled)
  469. return;
  470. ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
  471. if (unlikely(!ab))
  472. return;
  473. audit_log_format(ab, "op=remove_rule dir=");
  474. audit_log_untrustedstring(ab, rule->tree->pathname);
  475. audit_log_key(ab, rule->filterkey);
  476. audit_log_format(ab, " list=%d res=1", rule->listnr);
  477. audit_log_end(ab);
  478. }
  479. static void kill_rules(struct audit_context *context, struct audit_tree *tree)
  480. {
  481. struct audit_krule *rule, *next;
  482. struct audit_entry *entry;
  483. list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
  484. entry = container_of(rule, struct audit_entry, rule);
  485. list_del_init(&rule->rlist);
  486. if (rule->tree) {
  487. /* not a half-baked one */
  488. audit_tree_log_remove_rule(context, rule);
  489. if (entry->rule.exe)
  490. audit_remove_mark(entry->rule.exe);
  491. rule->tree = NULL;
  492. list_del_rcu(&entry->list);
  493. list_del(&entry->rule.list);
  494. call_rcu(&entry->rcu, audit_free_rule_rcu);
  495. }
  496. }
  497. }
  498. /*
  499. * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
  500. * chunks. The function expects tagged chunks are all at the beginning of the
  501. * chunks list.
  502. */
  503. static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
  504. {
  505. spin_lock(&hash_lock);
  506. while (!list_empty(&victim->chunks)) {
  507. struct node *p;
  508. struct audit_chunk *chunk;
  509. struct fsnotify_mark *mark;
  510. p = list_first_entry(&victim->chunks, struct node, list);
  511. /* have we run out of marked? */
  512. if (tagged && !(p->index & (1U<<31)))
  513. break;
  514. chunk = find_chunk(p);
  515. mark = chunk->mark;
  516. remove_chunk_node(chunk, p);
  517. /* Racing with audit_tree_freeing_mark()? */
  518. if (!mark)
  519. continue;
  520. fsnotify_get_mark(mark);
  521. spin_unlock(&hash_lock);
  522. untag_chunk(chunk, mark);
  523. fsnotify_put_mark(mark);
  524. spin_lock(&hash_lock);
  525. }
  526. spin_unlock(&hash_lock);
  527. }
  528. /*
  529. * finish killing struct audit_tree
  530. */
  531. static void prune_one(struct audit_tree *victim)
  532. {
  533. prune_tree_chunks(victim, false);
  534. put_tree(victim);
  535. }
  536. /* trim the uncommitted chunks from tree */
  537. static void trim_marked(struct audit_tree *tree)
  538. {
  539. struct list_head *p, *q;
  540. spin_lock(&hash_lock);
  541. if (tree->goner) {
  542. spin_unlock(&hash_lock);
  543. return;
  544. }
  545. /* reorder */
  546. for (p = tree->chunks.next; p != &tree->chunks; p = q) {
  547. struct node *node = list_entry(p, struct node, list);
  548. q = p->next;
  549. if (node->index & (1U<<31)) {
  550. list_del_init(p);
  551. list_add(p, &tree->chunks);
  552. }
  553. }
  554. spin_unlock(&hash_lock);
  555. prune_tree_chunks(tree, true);
  556. spin_lock(&hash_lock);
  557. if (!tree->root && !tree->goner) {
  558. tree->goner = 1;
  559. spin_unlock(&hash_lock);
  560. mutex_lock(&audit_filter_mutex);
  561. kill_rules(audit_context(), tree);
  562. list_del_init(&tree->list);
  563. mutex_unlock(&audit_filter_mutex);
  564. prune_one(tree);
  565. } else {
  566. spin_unlock(&hash_lock);
  567. }
  568. }
  569. static void audit_schedule_prune(void);
  570. /* called with audit_filter_mutex */
  571. int audit_remove_tree_rule(struct audit_krule *rule)
  572. {
  573. struct audit_tree *tree;
  574. tree = rule->tree;
  575. if (tree) {
  576. spin_lock(&hash_lock);
  577. list_del_init(&rule->rlist);
  578. if (list_empty(&tree->rules) && !tree->goner) {
  579. tree->root = NULL;
  580. list_del_init(&tree->same_root);
  581. tree->goner = 1;
  582. list_move(&tree->list, &prune_list);
  583. rule->tree = NULL;
  584. spin_unlock(&hash_lock);
  585. audit_schedule_prune();
  586. return 1;
  587. }
  588. rule->tree = NULL;
  589. spin_unlock(&hash_lock);
  590. return 1;
  591. }
  592. return 0;
  593. }
  594. static int compare_root(struct vfsmount *mnt, void *arg)
  595. {
  596. return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
  597. (unsigned long)arg;
  598. }
  599. void audit_trim_trees(void)
  600. {
  601. struct list_head cursor;
  602. mutex_lock(&audit_filter_mutex);
  603. list_add(&cursor, &tree_list);
  604. while (cursor.next != &tree_list) {
  605. struct audit_tree *tree;
  606. struct path path;
  607. struct vfsmount *root_mnt;
  608. struct node *node;
  609. int err;
  610. tree = container_of(cursor.next, struct audit_tree, list);
  611. get_tree(tree);
  612. list_del(&cursor);
  613. list_add(&cursor, &tree->list);
  614. mutex_unlock(&audit_filter_mutex);
  615. err = kern_path(tree->pathname, 0, &path);
  616. if (err)
  617. goto skip_it;
  618. root_mnt = collect_mounts(&path);
  619. path_put(&path);
  620. if (IS_ERR(root_mnt))
  621. goto skip_it;
  622. spin_lock(&hash_lock);
  623. list_for_each_entry(node, &tree->chunks, list) {
  624. struct audit_chunk *chunk = find_chunk(node);
  625. /* this could be NULL if the watch is dying else where... */
  626. node->index |= 1U<<31;
  627. if (iterate_mounts(compare_root,
  628. (void *)(chunk->key),
  629. root_mnt))
  630. node->index &= ~(1U<<31);
  631. }
  632. spin_unlock(&hash_lock);
  633. trim_marked(tree);
  634. drop_collected_mounts(root_mnt);
  635. skip_it:
  636. put_tree(tree);
  637. mutex_lock(&audit_filter_mutex);
  638. }
  639. list_del(&cursor);
  640. mutex_unlock(&audit_filter_mutex);
  641. }
  642. int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
  643. {
  644. if (pathname[0] != '/' ||
  645. rule->listnr != AUDIT_FILTER_EXIT ||
  646. op != Audit_equal ||
  647. rule->inode_f || rule->watch || rule->tree)
  648. return -EINVAL;
  649. rule->tree = alloc_tree(pathname);
  650. if (!rule->tree)
  651. return -ENOMEM;
  652. return 0;
  653. }
  654. void audit_put_tree(struct audit_tree *tree)
  655. {
  656. put_tree(tree);
  657. }
  658. static int tag_mount(struct vfsmount *mnt, void *arg)
  659. {
  660. return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
  661. }
  662. /*
  663. * That gets run when evict_chunk() ends up needing to kill audit_tree.
  664. * Runs from a separate thread.
  665. */
  666. static int prune_tree_thread(void *unused)
  667. {
  668. for (;;) {
  669. if (list_empty(&prune_list)) {
  670. set_current_state(TASK_INTERRUPTIBLE);
  671. schedule();
  672. }
  673. audit_ctl_lock();
  674. mutex_lock(&audit_filter_mutex);
  675. while (!list_empty(&prune_list)) {
  676. struct audit_tree *victim;
  677. victim = list_entry(prune_list.next,
  678. struct audit_tree, list);
  679. list_del_init(&victim->list);
  680. mutex_unlock(&audit_filter_mutex);
  681. prune_one(victim);
  682. mutex_lock(&audit_filter_mutex);
  683. }
  684. mutex_unlock(&audit_filter_mutex);
  685. audit_ctl_unlock();
  686. }
  687. return 0;
  688. }
  689. static int audit_launch_prune(void)
  690. {
  691. if (prune_thread)
  692. return 0;
  693. prune_thread = kthread_run(prune_tree_thread, NULL,
  694. "audit_prune_tree");
  695. if (IS_ERR(prune_thread)) {
  696. pr_err("cannot start thread audit_prune_tree");
  697. prune_thread = NULL;
  698. return -ENOMEM;
  699. }
  700. return 0;
  701. }
  702. /* called with audit_filter_mutex */
  703. int audit_add_tree_rule(struct audit_krule *rule)
  704. {
  705. struct audit_tree *seed = rule->tree, *tree;
  706. struct path path;
  707. struct vfsmount *mnt;
  708. int err;
  709. rule->tree = NULL;
  710. list_for_each_entry(tree, &tree_list, list) {
  711. if (!strcmp(seed->pathname, tree->pathname)) {
  712. put_tree(seed);
  713. rule->tree = tree;
  714. list_add(&rule->rlist, &tree->rules);
  715. return 0;
  716. }
  717. }
  718. tree = seed;
  719. list_add(&tree->list, &tree_list);
  720. list_add(&rule->rlist, &tree->rules);
  721. /* do not set rule->tree yet */
  722. mutex_unlock(&audit_filter_mutex);
  723. if (unlikely(!prune_thread)) {
  724. err = audit_launch_prune();
  725. if (err)
  726. goto Err;
  727. }
  728. err = kern_path(tree->pathname, 0, &path);
  729. if (err)
  730. goto Err;
  731. mnt = collect_mounts(&path);
  732. path_put(&path);
  733. if (IS_ERR(mnt)) {
  734. err = PTR_ERR(mnt);
  735. goto Err;
  736. }
  737. get_tree(tree);
  738. err = iterate_mounts(tag_mount, tree, mnt);
  739. drop_collected_mounts(mnt);
  740. if (!err) {
  741. struct node *node;
  742. spin_lock(&hash_lock);
  743. list_for_each_entry(node, &tree->chunks, list)
  744. node->index &= ~(1U<<31);
  745. spin_unlock(&hash_lock);
  746. } else {
  747. trim_marked(tree);
  748. goto Err;
  749. }
  750. mutex_lock(&audit_filter_mutex);
  751. if (list_empty(&rule->rlist)) {
  752. put_tree(tree);
  753. return -ENOENT;
  754. }
  755. rule->tree = tree;
  756. put_tree(tree);
  757. return 0;
  758. Err:
  759. mutex_lock(&audit_filter_mutex);
  760. list_del_init(&tree->list);
  761. list_del_init(&tree->rules);
  762. put_tree(tree);
  763. return err;
  764. }
  765. int audit_tag_tree(char *old, char *new)
  766. {
  767. struct list_head cursor, barrier;
  768. int failed = 0;
  769. struct path path1, path2;
  770. struct vfsmount *tagged;
  771. int err;
  772. err = kern_path(new, 0, &path2);
  773. if (err)
  774. return err;
  775. tagged = collect_mounts(&path2);
  776. path_put(&path2);
  777. if (IS_ERR(tagged))
  778. return PTR_ERR(tagged);
  779. err = kern_path(old, 0, &path1);
  780. if (err) {
  781. drop_collected_mounts(tagged);
  782. return err;
  783. }
  784. mutex_lock(&audit_filter_mutex);
  785. list_add(&barrier, &tree_list);
  786. list_add(&cursor, &barrier);
  787. while (cursor.next != &tree_list) {
  788. struct audit_tree *tree;
  789. int good_one = 0;
  790. tree = container_of(cursor.next, struct audit_tree, list);
  791. get_tree(tree);
  792. list_del(&cursor);
  793. list_add(&cursor, &tree->list);
  794. mutex_unlock(&audit_filter_mutex);
  795. err = kern_path(tree->pathname, 0, &path2);
  796. if (!err) {
  797. good_one = path_is_under(&path1, &path2);
  798. path_put(&path2);
  799. }
  800. if (!good_one) {
  801. put_tree(tree);
  802. mutex_lock(&audit_filter_mutex);
  803. continue;
  804. }
  805. failed = iterate_mounts(tag_mount, tree, tagged);
  806. if (failed) {
  807. put_tree(tree);
  808. mutex_lock(&audit_filter_mutex);
  809. break;
  810. }
  811. mutex_lock(&audit_filter_mutex);
  812. spin_lock(&hash_lock);
  813. if (!tree->goner) {
  814. list_del(&tree->list);
  815. list_add(&tree->list, &tree_list);
  816. }
  817. spin_unlock(&hash_lock);
  818. put_tree(tree);
  819. }
  820. while (barrier.prev != &tree_list) {
  821. struct audit_tree *tree;
  822. tree = container_of(barrier.prev, struct audit_tree, list);
  823. get_tree(tree);
  824. list_del(&tree->list);
  825. list_add(&tree->list, &barrier);
  826. mutex_unlock(&audit_filter_mutex);
  827. if (!failed) {
  828. struct node *node;
  829. spin_lock(&hash_lock);
  830. list_for_each_entry(node, &tree->chunks, list)
  831. node->index &= ~(1U<<31);
  832. spin_unlock(&hash_lock);
  833. } else {
  834. trim_marked(tree);
  835. }
  836. put_tree(tree);
  837. mutex_lock(&audit_filter_mutex);
  838. }
  839. list_del(&barrier);
  840. list_del(&cursor);
  841. mutex_unlock(&audit_filter_mutex);
  842. path_put(&path1);
  843. drop_collected_mounts(tagged);
  844. return failed;
  845. }
  846. static void audit_schedule_prune(void)
  847. {
  848. wake_up_process(prune_thread);
  849. }
  850. /*
  851. * ... and that one is done if evict_chunk() decides to delay until the end
  852. * of syscall. Runs synchronously.
  853. */
  854. void audit_kill_trees(struct audit_context *context)
  855. {
  856. struct list_head *list = &context->killed_trees;
  857. audit_ctl_lock();
  858. mutex_lock(&audit_filter_mutex);
  859. while (!list_empty(list)) {
  860. struct audit_tree *victim;
  861. victim = list_entry(list->next, struct audit_tree, list);
  862. kill_rules(context, victim);
  863. list_del_init(&victim->list);
  864. mutex_unlock(&audit_filter_mutex);
  865. prune_one(victim);
  866. mutex_lock(&audit_filter_mutex);
  867. }
  868. mutex_unlock(&audit_filter_mutex);
  869. audit_ctl_unlock();
  870. }
  871. /*
  872. * Here comes the stuff asynchronous to auditctl operations
  873. */
  874. static void evict_chunk(struct audit_chunk *chunk)
  875. {
  876. struct audit_tree *owner;
  877. struct list_head *postponed = audit_killed_trees();
  878. int need_prune = 0;
  879. int n;
  880. mutex_lock(&audit_filter_mutex);
  881. spin_lock(&hash_lock);
  882. while (!list_empty(&chunk->trees)) {
  883. owner = list_entry(chunk->trees.next,
  884. struct audit_tree, same_root);
  885. owner->goner = 1;
  886. owner->root = NULL;
  887. list_del_init(&owner->same_root);
  888. spin_unlock(&hash_lock);
  889. if (!postponed) {
  890. kill_rules(audit_context(), owner);
  891. list_move(&owner->list, &prune_list);
  892. need_prune = 1;
  893. } else {
  894. list_move(&owner->list, postponed);
  895. }
  896. spin_lock(&hash_lock);
  897. }
  898. list_del_rcu(&chunk->hash);
  899. for (n = 0; n < chunk->count; n++)
  900. list_del_init(&chunk->owners[n].list);
  901. spin_unlock(&hash_lock);
  902. mutex_unlock(&audit_filter_mutex);
  903. if (need_prune)
  904. audit_schedule_prune();
  905. }
  906. static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
  907. struct inode *inode, struct inode *dir,
  908. const struct qstr *file_name, u32 cookie)
  909. {
  910. return 0;
  911. }
  912. static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
  913. struct fsnotify_group *group)
  914. {
  915. struct audit_chunk *chunk;
  916. mutex_lock(&mark->group->mark_mutex);
  917. spin_lock(&hash_lock);
  918. chunk = mark_chunk(mark);
  919. replace_mark_chunk(mark, NULL);
  920. spin_unlock(&hash_lock);
  921. mutex_unlock(&mark->group->mark_mutex);
  922. if (chunk) {
  923. evict_chunk(chunk);
  924. audit_mark_put_chunk(chunk);
  925. }
  926. /*
  927. * We are guaranteed to have at least one reference to the mark from
  928. * either the inode or the caller of fsnotify_destroy_mark().
  929. */
  930. BUG_ON(refcount_read(&mark->refcnt) < 1);
  931. }
  932. static const struct fsnotify_ops audit_tree_ops = {
  933. .handle_inode_event = audit_tree_handle_event,
  934. .freeing_mark = audit_tree_freeing_mark,
  935. .free_mark = audit_tree_destroy_watch,
  936. };
  937. static int __init audit_tree_init(void)
  938. {
  939. int i;
  940. audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
  941. audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
  942. if (IS_ERR(audit_tree_group))
  943. audit_panic("cannot initialize fsnotify group for rectree watches");
  944. for (i = 0; i < HASH_SIZE; i++)
  945. INIT_LIST_HEAD(&chunk_hash_heads[i]);
  946. return 0;
  947. }
  948. __initcall(audit_tree_init);