watcher_dispatcher.cc 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. // Copyright 2017 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "mojo/core/watcher_dispatcher.h"
  5. #include <algorithm>
  6. #include <limits>
  7. #include "base/compiler_specific.h"
  8. #include "base/debug/alias.h"
  9. #include "base/memory/ptr_util.h"
  10. #include "mojo/core/watch.h"
  11. namespace mojo {
  12. namespace core {
  13. WatcherDispatcher::WatcherDispatcher(MojoTrapEventHandler handler)
  14. : handler_(handler) {}
  15. void WatcherDispatcher::NotifyHandleState(Dispatcher* dispatcher,
  16. const HandleSignalsState& state) {
  17. base::AutoLock lock(lock_);
  18. auto it = watched_handles_.find(dispatcher);
  19. if (it == watched_handles_.end())
  20. return;
  21. // Maybe fire a notification to the watch associated with this dispatcher,
  22. // provided we're armed and it cares about the new state.
  23. if (it->second->NotifyState(state, armed_)) {
  24. ready_watches_.insert(it->second.get());
  25. // If we were armed and got here, we notified the watch. Disarm.
  26. armed_ = false;
  27. } else {
  28. ready_watches_.erase(it->second.get());
  29. }
  30. }
  31. void WatcherDispatcher::NotifyHandleClosed(Dispatcher* dispatcher) {
  32. scoped_refptr<Watch> watch;
  33. {
  34. base::AutoLock lock(lock_);
  35. auto it = watched_handles_.find(dispatcher);
  36. if (it == watched_handles_.end())
  37. return;
  38. watch = std::move(it->second);
  39. // Wipe out all state associated with the closed dispatcher.
  40. watches_.erase(watch->context());
  41. ready_watches_.erase(watch.get());
  42. watched_handles_.erase(it);
  43. }
  44. // NOTE: It's important that this is called outside of |lock_| since it
  45. // acquires internal Watch locks.
  46. watch->Cancel();
  47. }
  48. // handler_ may be address-taken in a different DSO, and hence incompatible with
  49. // CFI-icall.
  50. NO_SANITIZE("cfi-icall")
  51. void WatcherDispatcher::InvokeWatchCallback(uintptr_t context,
  52. MojoResult result,
  53. const HandleSignalsState& state,
  54. MojoTrapEventFlags flags) {
  55. MojoTrapEvent event;
  56. event.struct_size = sizeof(event);
  57. event.trigger_context = context;
  58. event.result = result;
  59. event.signals_state = static_cast<MojoHandleSignalsState>(state);
  60. event.flags = flags;
  61. {
  62. // We avoid holding the lock during dispatch. It's OK for notification
  63. // callbacks to close this watcher, and it's OK for notifications to race
  64. // with closure, if for example the watcher is closed from another thread
  65. // between this test and the invocation of |callback_| below.
  66. //
  67. // Because cancellation synchronously blocks all future notifications, and
  68. // because notifications themselves are mutually exclusive for any given
  69. // context, we still guarantee that a single MOJO_RESULT_CANCELLED result
  70. // is the last notification received for any given context.
  71. //
  72. // This guarantee is sufficient to make safe, synchronized, per-context
  73. // state management possible in user code.
  74. base::AutoLock lock(lock_);
  75. if (closed_ && result != MOJO_RESULT_CANCELLED)
  76. return;
  77. }
  78. handler_(&event);
  79. }
  80. Dispatcher::Type WatcherDispatcher::GetType() const {
  81. return Type::WATCHER;
  82. }
  83. MojoResult WatcherDispatcher::Close() {
  84. // We swap out all the watched handle information onto the stack so we can
  85. // call into their dispatchers without our own lock held.
  86. base::flat_map<uintptr_t, scoped_refptr<Watch>> watches;
  87. {
  88. base::AutoLock lock(lock_);
  89. if (closed_)
  90. return MOJO_RESULT_INVALID_ARGUMENT;
  91. closed_ = true;
  92. std::swap(watches, watches_);
  93. watched_handles_.clear();
  94. }
  95. // Remove all refs from our watched dispatchers and fire cancellations.
  96. for (auto& entry : watches) {
  97. entry.second->dispatcher()->RemoveWatcherRef(this, entry.first);
  98. entry.second->Cancel();
  99. }
  100. return MOJO_RESULT_OK;
  101. }
  102. MojoResult WatcherDispatcher::WatchDispatcher(
  103. scoped_refptr<Dispatcher> dispatcher,
  104. MojoHandleSignals signals,
  105. MojoTriggerCondition condition,
  106. uintptr_t context) {
  107. // NOTE: Because it's critical to avoid acquiring any other dispatcher locks
  108. // while |lock_| is held, we defer adding oursevles to the dispatcher until
  109. // after we've updated all our own relevant state and released |lock_|.
  110. {
  111. base::AutoLock lock(lock_);
  112. if (closed_)
  113. return MOJO_RESULT_INVALID_ARGUMENT;
  114. if (watches_.count(context) || watched_handles_.count(dispatcher.get()))
  115. return MOJO_RESULT_ALREADY_EXISTS;
  116. scoped_refptr<Watch> watch =
  117. new Watch(this, dispatcher, context, signals, condition);
  118. watches_.insert({context, watch});
  119. auto result =
  120. watched_handles_.insert(std::make_pair(dispatcher.get(), watch));
  121. DCHECK(result.second);
  122. }
  123. MojoResult rv = dispatcher->AddWatcherRef(this, context);
  124. if (rv != MOJO_RESULT_OK) {
  125. // Oops. This was not a valid handle to watch. Undo the above work and
  126. // fail gracefully.
  127. base::AutoLock lock(lock_);
  128. watches_.erase(context);
  129. watched_handles_.erase(dispatcher.get());
  130. return rv;
  131. }
  132. bool remove_now;
  133. {
  134. // If we've been closed already, there's a chance our closure raced with
  135. // the call to AddWatcherRef() above. In that case we want to ensure we've
  136. // removed our ref from |dispatcher|. Note that this may in turn race
  137. // with normal removal, but that's fine.
  138. base::AutoLock lock(lock_);
  139. remove_now = closed_;
  140. }
  141. if (remove_now)
  142. dispatcher->RemoveWatcherRef(this, context);
  143. return MOJO_RESULT_OK;
  144. }
  145. MojoResult WatcherDispatcher::CancelWatch(uintptr_t context) {
  146. // We may remove the last stored ref to the Watch below, so we retain
  147. // a reference on the stack.
  148. scoped_refptr<Watch> watch;
  149. {
  150. base::AutoLock lock(lock_);
  151. if (closed_)
  152. return MOJO_RESULT_INVALID_ARGUMENT;
  153. auto it = watches_.find(context);
  154. if (it == watches_.end())
  155. return MOJO_RESULT_NOT_FOUND;
  156. watch = it->second;
  157. watches_.erase(it);
  158. }
  159. // Mark the watch as cancelled so no further notifications get through.
  160. watch->Cancel();
  161. // We remove the watcher ref for this context before updating any more
  162. // internal watcher state, ensuring that we don't receiving further
  163. // notifications for this context.
  164. watch->dispatcher()->RemoveWatcherRef(this, context);
  165. {
  166. base::AutoLock lock(lock_);
  167. auto handle_it = watched_handles_.find(watch->dispatcher().get());
  168. // If another thread races to close this watcher handler, |watched_handles_|
  169. // may have been cleared by the time we reach this section.
  170. if (handle_it == watched_handles_.end())
  171. return MOJO_RESULT_OK;
  172. ready_watches_.erase(handle_it->second.get());
  173. watched_handles_.erase(handle_it);
  174. }
  175. return MOJO_RESULT_OK;
  176. }
  177. MojoResult WatcherDispatcher::Arm(uint32_t* num_blocking_events,
  178. MojoTrapEvent* blocking_events) {
  179. base::AutoLock lock(lock_);
  180. if (num_blocking_events && !blocking_events)
  181. return MOJO_RESULT_INVALID_ARGUMENT;
  182. if (closed_)
  183. return MOJO_RESULT_INVALID_ARGUMENT;
  184. if (watched_handles_.empty())
  185. return MOJO_RESULT_NOT_FOUND;
  186. if (ready_watches_.empty()) {
  187. // Fast path: No watches are ready to notify, so we're done.
  188. armed_ = true;
  189. return MOJO_RESULT_OK;
  190. }
  191. if (num_blocking_events) {
  192. DCHECK_LE(ready_watches_.size(), std::numeric_limits<uint32_t>::max());
  193. *num_blocking_events = std::min(
  194. *num_blocking_events, static_cast<uint32_t>(ready_watches_.size()));
  195. WatchSet::const_iterator next_ready_iter = ready_watches_.begin();
  196. if (last_watch_to_block_arming_) {
  197. // Find the next watch to notify in simple round-robin order on the
  198. // |ready_watches_| map, wrapping around to the beginning if necessary.
  199. next_ready_iter = ready_watches_.find(
  200. reinterpret_cast<const Watch*>(last_watch_to_block_arming_));
  201. if (next_ready_iter != ready_watches_.end())
  202. ++next_ready_iter;
  203. if (next_ready_iter == ready_watches_.end())
  204. next_ready_iter = ready_watches_.begin();
  205. }
  206. for (size_t i = 0; i < *num_blocking_events; ++i) {
  207. const Watch* const watch = *next_ready_iter;
  208. if (blocking_events[i].struct_size < sizeof(*blocking_events))
  209. return MOJO_RESULT_INVALID_ARGUMENT;
  210. blocking_events[i].flags = MOJO_TRAP_EVENT_FLAG_WITHIN_API_CALL;
  211. blocking_events[i].trigger_context = watch->context();
  212. blocking_events[i].result = watch->last_known_result();
  213. blocking_events[i].signals_state = watch->last_known_signals_state();
  214. // Iterate and wrap around.
  215. last_watch_to_block_arming_ = reinterpret_cast<uintptr_t>(watch);
  216. ++next_ready_iter;
  217. if (next_ready_iter == ready_watches_.end())
  218. next_ready_iter = ready_watches_.begin();
  219. }
  220. }
  221. return MOJO_RESULT_FAILED_PRECONDITION;
  222. }
  223. WatcherDispatcher::~WatcherDispatcher() = default;
  224. } // namespace core
  225. } // namespace mojo