notification.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
  4. */
  5. /*
  6. * Basic idea behind the notification queue: An fsnotify group (like inotify)
  7. * sends the userspace notification about events asynchronously some time after
  8. * the event happened. When inotify gets an event it will need to add that
  9. * event to the group notify queue. Since a single event might need to be on
  10. * multiple group's notification queues we can't add the event directly to each
  11. * queue and instead add a small "event_holder" to each queue. This event_holder
  12. * has a pointer back to the original event. Since the majority of events are
  13. * going to end up on one, and only one, notification queue we embed one
  14. * event_holder into each event. This means we have a single allocation instead
  15. * of always needing two. If the embedded event_holder is already in use by
  16. * another group a new event_holder (from fsnotify_event_holder_cachep) will be
  17. * allocated and used.
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/init.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/module.h>
  24. #include <linux/mount.h>
  25. #include <linux/mutex.h>
  26. #include <linux/namei.h>
  27. #include <linux/path.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/atomic.h>
  31. #include <linux/fsnotify_backend.h>
  32. #include "fsnotify.h"
  33. static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
  34. /**
  35. * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
  36. * Called from fsnotify_move, which is inlined into filesystem modules.
  37. */
  38. u32 fsnotify_get_cookie(void)
  39. {
  40. return atomic_inc_return(&fsnotify_sync_cookie);
  41. }
  42. EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
  43. /* return true if the notify queue is empty, false otherwise */
  44. bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
  45. {
  46. assert_spin_locked(&group->notification_lock);
  47. return list_empty(&group->notification_list) ? true : false;
  48. }
  49. void fsnotify_destroy_event(struct fsnotify_group *group,
  50. struct fsnotify_event *event)
  51. {
  52. /* Overflow events are per-group and we don't want to free them */
  53. if (!event || event == group->overflow_event)
  54. return;
  55. /*
  56. * If the event is still queued, we have a problem... Do an unreliable
  57. * lockless check first to avoid locking in the common case. The
  58. * locking may be necessary for permission events which got removed
  59. * from the list by a different CPU than the one freeing the event.
  60. */
  61. if (!list_empty(&event->list)) {
  62. spin_lock(&group->notification_lock);
  63. WARN_ON(!list_empty(&event->list));
  64. spin_unlock(&group->notification_lock);
  65. }
  66. group->ops->free_event(event);
  67. }
  68. /*
  69. * Add an event to the group notification queue. The group can later pull this
  70. * event off the queue to deal with. The function returns 0 if the event was
  71. * added to the queue, 1 if the event was merged with some other queued event,
  72. * 2 if the event was not queued - either the queue of events has overflown
  73. * or the group is shutting down.
  74. */
  75. int fsnotify_add_event(struct fsnotify_group *group,
  76. struct fsnotify_event *event,
  77. int (*merge)(struct list_head *,
  78. struct fsnotify_event *))
  79. {
  80. int ret = 0;
  81. struct list_head *list = &group->notification_list;
  82. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  83. spin_lock(&group->notification_lock);
  84. if (group->shutdown) {
  85. spin_unlock(&group->notification_lock);
  86. return 2;
  87. }
  88. if (event == group->overflow_event ||
  89. group->q_len >= group->max_events) {
  90. ret = 2;
  91. /* Queue overflow event only if it isn't already queued */
  92. if (!list_empty(&group->overflow_event->list)) {
  93. spin_unlock(&group->notification_lock);
  94. return ret;
  95. }
  96. event = group->overflow_event;
  97. goto queue;
  98. }
  99. if (!list_empty(list) && merge) {
  100. ret = merge(list, event);
  101. if (ret) {
  102. spin_unlock(&group->notification_lock);
  103. return ret;
  104. }
  105. }
  106. queue:
  107. group->q_len++;
  108. list_add_tail(&event->list, list);
  109. spin_unlock(&group->notification_lock);
  110. wake_up(&group->notification_waitq);
  111. kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
  112. return ret;
  113. }
  114. void fsnotify_remove_queued_event(struct fsnotify_group *group,
  115. struct fsnotify_event *event)
  116. {
  117. assert_spin_locked(&group->notification_lock);
  118. /*
  119. * We need to init list head for the case of overflow event so that
  120. * check in fsnotify_add_event() works
  121. */
  122. list_del_init(&event->list);
  123. group->q_len--;
  124. }
  125. /*
  126. * Remove and return the first event from the notification list. It is the
  127. * responsibility of the caller to destroy the obtained event
  128. */
  129. struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
  130. {
  131. struct fsnotify_event *event;
  132. assert_spin_locked(&group->notification_lock);
  133. pr_debug("%s: group=%p\n", __func__, group);
  134. event = list_first_entry(&group->notification_list,
  135. struct fsnotify_event, list);
  136. fsnotify_remove_queued_event(group, event);
  137. return event;
  138. }
  139. /*
  140. * This will not remove the event, that must be done with
  141. * fsnotify_remove_first_event()
  142. */
  143. struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
  144. {
  145. assert_spin_locked(&group->notification_lock);
  146. return list_first_entry(&group->notification_list,
  147. struct fsnotify_event, list);
  148. }
  149. /*
  150. * Called when a group is being torn down to clean up any outstanding
  151. * event notifications.
  152. */
  153. void fsnotify_flush_notify(struct fsnotify_group *group)
  154. {
  155. struct fsnotify_event *event;
  156. spin_lock(&group->notification_lock);
  157. while (!fsnotify_notify_queue_is_empty(group)) {
  158. event = fsnotify_remove_first_event(group);
  159. spin_unlock(&group->notification_lock);
  160. fsnotify_destroy_event(group, event);
  161. spin_lock(&group->notification_lock);
  162. }
  163. spin_unlock(&group->notification_lock);
  164. }