message_pipe_dispatcher.cc 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. // Copyright 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "mojo/core/message_pipe_dispatcher.h"
  5. #include <limits>
  6. #include <memory>
  7. #include "base/logging.h"
  8. #include "base/memory/ref_counted.h"
  9. #include "base/trace_event/trace_event.h"
  10. #include "mojo/core/core.h"
  11. #include "mojo/core/node_controller.h"
  12. #include "mojo/core/ports/event.h"
  13. #include "mojo/core/ports/message_filter.h"
  14. #include "mojo/core/request_context.h"
  15. #include "mojo/core/user_message_impl.h"
  16. #include "mojo/public/cpp/bindings/mojo_buildflags.h"
  17. namespace mojo {
  18. namespace core {
  19. namespace {
  20. #pragma pack(push, 1)
  21. struct SerializedState {
  22. uint64_t pipe_id;
  23. int8_t endpoint;
  24. char padding[7];
  25. };
  26. static_assert(sizeof(SerializedState) % 8 == 0,
  27. "Invalid SerializedState size.");
  28. #pragma pack(pop)
  29. } // namespace
  30. // A PortObserver which forwards to a MessagePipeDispatcher. This owns a
  31. // reference to the MPD to ensure it lives as long as the observed port.
  32. class MessagePipeDispatcher::PortObserverThunk
  33. : public NodeController::PortObserver {
  34. public:
  35. explicit PortObserverThunk(scoped_refptr<MessagePipeDispatcher> dispatcher)
  36. : dispatcher_(dispatcher) {}
  37. PortObserverThunk(const PortObserverThunk&) = delete;
  38. PortObserverThunk& operator=(const PortObserverThunk&) = delete;
  39. private:
  40. ~PortObserverThunk() override = default;
  41. // NodeController::PortObserver:
  42. void OnPortStatusChanged() override { dispatcher_->OnPortStatusChanged(); }
  43. scoped_refptr<MessagePipeDispatcher> dispatcher_;
  44. };
  45. #if DCHECK_IS_ON()
  46. // A MessageFilter which never matches a message. Used to peek at the size of
  47. // the next available message on a port, for debug logging only.
  48. class PeekSizeMessageFilter : public ports::MessageFilter {
  49. public:
  50. PeekSizeMessageFilter() = default;
  51. PeekSizeMessageFilter(const PeekSizeMessageFilter&) = delete;
  52. PeekSizeMessageFilter& operator=(const PeekSizeMessageFilter&) = delete;
  53. ~PeekSizeMessageFilter() override = default;
  54. // ports::MessageFilter:
  55. bool Match(const ports::UserMessageEvent& message_event) override {
  56. const auto* message = message_event.GetMessage<UserMessageImpl>();
  57. if (message->IsSerialized())
  58. message_size_ = message->user_payload_size();
  59. return false;
  60. }
  61. size_t message_size() const { return message_size_; }
  62. private:
  63. size_t message_size_ = 0;
  64. };
  65. #endif // DCHECK_IS_ON()
  66. MessagePipeDispatcher::MessagePipeDispatcher(NodeController* node_controller,
  67. const ports::PortRef& port,
  68. uint64_t pipe_id,
  69. int endpoint)
  70. : node_controller_(node_controller),
  71. port_(port),
  72. pipe_id_(pipe_id),
  73. endpoint_(endpoint),
  74. watchers_(this) {
  75. DVLOG(2) << "Creating new MessagePipeDispatcher for port " << port.name()
  76. << " [pipe_id=" << pipe_id << "; endpoint=" << endpoint << "]";
  77. node_controller_->SetPortObserver(
  78. port_, base::MakeRefCounted<PortObserverThunk>(this));
  79. }
  80. bool MessagePipeDispatcher::Fuse(MessagePipeDispatcher* other) {
  81. node_controller_->SetPortObserver(port_, nullptr);
  82. node_controller_->SetPortObserver(other->port_, nullptr);
  83. ports::PortRef port0;
  84. {
  85. base::AutoLock lock(signal_lock_);
  86. port0 = port_;
  87. port_closed_.Set(true);
  88. watchers_.NotifyClosed();
  89. }
  90. ports::PortRef port1;
  91. {
  92. base::AutoLock lock(other->signal_lock_);
  93. port1 = other->port_;
  94. other->port_closed_.Set(true);
  95. other->watchers_.NotifyClosed();
  96. }
  97. // Both ports are always closed by this call.
  98. int rv = node_controller_->MergeLocalPorts(port0, port1);
  99. return rv == ports::OK;
  100. }
  101. Dispatcher::Type MessagePipeDispatcher::GetType() const {
  102. return Type::MESSAGE_PIPE;
  103. }
  104. MojoResult MessagePipeDispatcher::Close() {
  105. base::AutoLock lock(signal_lock_);
  106. DVLOG(2) << "Closing message pipe " << pipe_id_ << " endpoint " << endpoint_
  107. << " [port=" << port_.name() << "]";
  108. return CloseNoLock();
  109. }
  110. MojoResult MessagePipeDispatcher::WriteMessage(
  111. std::unique_ptr<ports::UserMessageEvent> message) {
  112. if (port_closed_ || in_transit_)
  113. return MOJO_RESULT_INVALID_ARGUMENT;
  114. int rv = node_controller_->SendUserMessage(port_, std::move(message));
  115. DVLOG(4) << "Sent message on pipe " << pipe_id_ << " endpoint " << endpoint_
  116. << " [port=" << port_.name() << "; rv=" << rv << "]";
  117. if (rv != ports::OK) {
  118. if (rv == ports::ERROR_PORT_UNKNOWN ||
  119. rv == ports::ERROR_PORT_STATE_UNEXPECTED ||
  120. rv == ports::ERROR_PORT_CANNOT_SEND_PEER) {
  121. return MOJO_RESULT_INVALID_ARGUMENT;
  122. } else if (rv == ports::ERROR_PORT_PEER_CLOSED) {
  123. return MOJO_RESULT_FAILED_PRECONDITION;
  124. }
  125. NOTREACHED();
  126. return MOJO_RESULT_UNKNOWN;
  127. }
  128. // We may need to update anyone watching our signals in case we just exceeded
  129. // the unread message count quota.
  130. base::AutoLock lock(signal_lock_);
  131. watchers_.NotifyState(GetHandleSignalsStateNoLock());
  132. return MOJO_RESULT_OK;
  133. }
  134. MojoResult MessagePipeDispatcher::ReadMessage(
  135. std::unique_ptr<ports::UserMessageEvent>* message) {
  136. // We can't read from a port that's closed or in transit!
  137. if (port_closed_ || in_transit_)
  138. return MOJO_RESULT_INVALID_ARGUMENT;
  139. int rv = node_controller_->node()->GetMessage(port_, message, nullptr);
  140. if (rv != ports::OK && rv != ports::ERROR_PORT_PEER_CLOSED) {
  141. if (rv == ports::ERROR_PORT_UNKNOWN ||
  142. rv == ports::ERROR_PORT_STATE_UNEXPECTED)
  143. return MOJO_RESULT_INVALID_ARGUMENT;
  144. NOTREACHED();
  145. return MOJO_RESULT_UNKNOWN;
  146. }
  147. if (!*message) {
  148. // No message was available in queue.
  149. if (rv == ports::OK)
  150. return MOJO_RESULT_SHOULD_WAIT;
  151. // Peer is closed and there are no more messages to read.
  152. DCHECK_EQ(rv, ports::ERROR_PORT_PEER_CLOSED);
  153. return MOJO_RESULT_FAILED_PRECONDITION;
  154. }
  155. // We may need to update anyone watching our signals in case we just read the
  156. // last available message.
  157. base::AutoLock lock(signal_lock_);
  158. watchers_.NotifyState(GetHandleSignalsStateNoLock());
  159. return MOJO_RESULT_OK;
  160. }
  161. MojoResult MessagePipeDispatcher::SetQuota(MojoQuotaType type, uint64_t limit) {
  162. absl::optional<uint64_t> new_ack_request_interval;
  163. {
  164. base::AutoLock lock(signal_lock_);
  165. switch (type) {
  166. case MOJO_QUOTA_TYPE_RECEIVE_QUEUE_LENGTH:
  167. if (limit == MOJO_QUOTA_LIMIT_NONE)
  168. receive_queue_length_limit_.reset();
  169. else
  170. receive_queue_length_limit_ = limit;
  171. break;
  172. case MOJO_QUOTA_TYPE_RECEIVE_QUEUE_MEMORY_SIZE:
  173. if (limit == MOJO_QUOTA_LIMIT_NONE)
  174. receive_queue_memory_size_limit_.reset();
  175. else
  176. receive_queue_memory_size_limit_ = limit;
  177. break;
  178. case MOJO_QUOTA_TYPE_UNREAD_MESSAGE_COUNT:
  179. if (limit == MOJO_QUOTA_LIMIT_NONE) {
  180. unread_message_count_limit_.reset();
  181. new_ack_request_interval = 0;
  182. } else {
  183. unread_message_count_limit_ = limit;
  184. // Setting the acknowledge request interval for the port to half the
  185. // unread quota limit, means the ack roundtrip has half the window to
  186. // catch up with sent messages. In other words, if the producer is
  187. // producing messages at a steady rate of limit/2 packets per message
  188. // round trip or lower, the quota limit won't be exceeded. This is
  189. // assuming the consumer is consuming messages at the same rate.
  190. new_ack_request_interval = (limit + 1) / 2;
  191. }
  192. break;
  193. default:
  194. return MOJO_RESULT_INVALID_ARGUMENT;
  195. }
  196. }
  197. if (new_ack_request_interval.has_value()) {
  198. // NOTE: It is not safe to call into SetAcknowledgeRequestInterval while
  199. // holding a `signal_lock_`, as it may re-enter this object when the peer is
  200. // in the same process.
  201. node_controller_->node()->SetAcknowledgeRequestInterval(
  202. port_, *new_ack_request_interval);
  203. }
  204. return MOJO_RESULT_OK;
  205. }
  206. MojoResult MessagePipeDispatcher::QueryQuota(MojoQuotaType type,
  207. uint64_t* limit,
  208. uint64_t* usage) {
  209. base::AutoLock lock(signal_lock_);
  210. ports::PortStatus port_status;
  211. if (node_controller_->node()->GetStatus(port_, &port_status) != ports::OK) {
  212. CHECK(in_transit_ || port_transferred_ || port_closed_);
  213. return MOJO_RESULT_INVALID_ARGUMENT;
  214. }
  215. switch (type) {
  216. case MOJO_QUOTA_TYPE_RECEIVE_QUEUE_LENGTH:
  217. *limit = receive_queue_length_limit_.value_or(MOJO_QUOTA_LIMIT_NONE);
  218. *usage = port_status.queued_message_count;
  219. break;
  220. case MOJO_QUOTA_TYPE_RECEIVE_QUEUE_MEMORY_SIZE:
  221. *limit = receive_queue_memory_size_limit_.value_or(MOJO_QUOTA_LIMIT_NONE);
  222. *usage = port_status.queued_num_bytes;
  223. break;
  224. case MOJO_QUOTA_TYPE_UNREAD_MESSAGE_COUNT:
  225. *limit = unread_message_count_limit_.value_or(MOJO_QUOTA_LIMIT_NONE);
  226. *usage = port_status.unacknowledged_message_count;
  227. break;
  228. default:
  229. return MOJO_RESULT_INVALID_ARGUMENT;
  230. }
  231. return MOJO_RESULT_OK;
  232. }
  233. HandleSignalsState MessagePipeDispatcher::GetHandleSignalsState() const {
  234. base::AutoLock lock(signal_lock_);
  235. return GetHandleSignalsStateNoLock();
  236. }
  237. MojoResult MessagePipeDispatcher::AddWatcherRef(
  238. const scoped_refptr<WatcherDispatcher>& watcher,
  239. uintptr_t context) {
  240. base::AutoLock lock(signal_lock_);
  241. if (port_closed_ || in_transit_)
  242. return MOJO_RESULT_INVALID_ARGUMENT;
  243. return watchers_.Add(watcher, context, GetHandleSignalsStateNoLock());
  244. }
  245. MojoResult MessagePipeDispatcher::RemoveWatcherRef(WatcherDispatcher* watcher,
  246. uintptr_t context) {
  247. base::AutoLock lock(signal_lock_);
  248. if (port_closed_ || in_transit_)
  249. return MOJO_RESULT_INVALID_ARGUMENT;
  250. return watchers_.Remove(watcher, context);
  251. }
  252. void MessagePipeDispatcher::StartSerialize(uint32_t* num_bytes,
  253. uint32_t* num_ports,
  254. uint32_t* num_handles) {
  255. *num_bytes = static_cast<uint32_t>(sizeof(SerializedState));
  256. *num_ports = 1;
  257. *num_handles = 0;
  258. }
  259. bool MessagePipeDispatcher::EndSerialize(void* destination,
  260. ports::PortName* ports,
  261. PlatformHandle* handles) {
  262. SerializedState* state = static_cast<SerializedState*>(destination);
  263. state->pipe_id = pipe_id_;
  264. state->endpoint = static_cast<int8_t>(endpoint_);
  265. memset(state->padding, 0, sizeof(state->padding));
  266. ports[0] = port_.name();
  267. return true;
  268. }
  269. bool MessagePipeDispatcher::BeginTransit() {
  270. base::AutoLock lock(signal_lock_);
  271. if (in_transit_ || port_closed_)
  272. return false;
  273. in_transit_.Set(true);
  274. return in_transit_;
  275. }
  276. void MessagePipeDispatcher::CompleteTransitAndClose() {
  277. node_controller_->SetPortObserver(port_, nullptr);
  278. base::AutoLock lock(signal_lock_);
  279. port_transferred_ = true;
  280. in_transit_.Set(false);
  281. CloseNoLock();
  282. }
  283. void MessagePipeDispatcher::CancelTransit() {
  284. base::AutoLock lock(signal_lock_);
  285. in_transit_.Set(false);
  286. // Something may have happened while we were waiting for potential transit.
  287. watchers_.NotifyState(GetHandleSignalsStateNoLock());
  288. }
  289. // static
  290. scoped_refptr<Dispatcher> MessagePipeDispatcher::Deserialize(
  291. const void* data,
  292. size_t num_bytes,
  293. const ports::PortName* ports,
  294. size_t num_ports,
  295. PlatformHandle* handles,
  296. size_t num_handles) {
  297. if (num_ports != 1 || num_handles || num_bytes != sizeof(SerializedState)) {
  298. AssertNotExtractingHandlesFromMessage();
  299. return nullptr;
  300. }
  301. const SerializedState* state = static_cast<const SerializedState*>(data);
  302. ports::Node* node = Core::Get()->GetNodeController()->node();
  303. ports::PortRef port;
  304. if (node->GetPort(ports[0], &port) != ports::OK) {
  305. AssertNotExtractingHandlesFromMessage();
  306. return nullptr;
  307. }
  308. ports::PortStatus status;
  309. if (node->GetStatus(port, &status) != ports::OK) {
  310. AssertNotExtractingHandlesFromMessage();
  311. return nullptr;
  312. }
  313. return new MessagePipeDispatcher(Core::Get()->GetNodeController(), port,
  314. state->pipe_id, state->endpoint);
  315. }
  316. MessagePipeDispatcher::~MessagePipeDispatcher() = default;
  317. MojoResult MessagePipeDispatcher::CloseNoLock() {
  318. signal_lock_.AssertAcquired();
  319. if (port_closed_ || in_transit_)
  320. return MOJO_RESULT_INVALID_ARGUMENT;
  321. port_closed_.Set(true);
  322. watchers_.NotifyClosed();
  323. if (!port_transferred_) {
  324. base::AutoUnlock unlock(signal_lock_);
  325. node_controller_->ClosePort(port_);
  326. #if BUILDFLAG(MOJO_TRACE_ENABLED)
  327. TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("mojom"),
  328. "MessagePipe closing", pipe_id_ + endpoint_,
  329. TRACE_EVENT_FLAG_FLOW_OUT);
  330. #endif
  331. }
  332. return MOJO_RESULT_OK;
  333. }
  334. HandleSignalsState MessagePipeDispatcher::GetHandleSignalsStateNoLock() const {
  335. HandleSignalsState rv;
  336. ports::PortStatus port_status;
  337. if (node_controller_->node()->GetStatus(port_, &port_status) != ports::OK) {
  338. CHECK(in_transit_ || port_transferred_ || port_closed_);
  339. return HandleSignalsState();
  340. }
  341. if (port_status.has_messages) {
  342. rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_READABLE;
  343. rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_READABLE;
  344. }
  345. if (port_status.receiving_messages)
  346. rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_READABLE;
  347. if (!port_status.peer_closed) {
  348. rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_WRITABLE;
  349. rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_WRITABLE;
  350. rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_READABLE;
  351. rv.satisfiable_signals |= MOJO_HANDLE_SIGNAL_PEER_REMOTE;
  352. if (port_status.peer_remote)
  353. rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_PEER_REMOTE;
  354. } else {
  355. rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_PEER_CLOSED;
  356. }
  357. if (receive_queue_length_limit_ &&
  358. port_status.queued_message_count > *receive_queue_length_limit_) {
  359. rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_QUOTA_EXCEEDED;
  360. } else if (receive_queue_memory_size_limit_ &&
  361. port_status.queued_num_bytes > *receive_queue_memory_size_limit_) {
  362. rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_QUOTA_EXCEEDED;
  363. } else if (unread_message_count_limit_ &&
  364. port_status.unacknowledged_message_count >
  365. *unread_message_count_limit_) {
  366. rv.satisfied_signals |= MOJO_HANDLE_SIGNAL_QUOTA_EXCEEDED;
  367. }
  368. rv.satisfiable_signals |=
  369. MOJO_HANDLE_SIGNAL_PEER_CLOSED | MOJO_HANDLE_SIGNAL_QUOTA_EXCEEDED;
  370. #if BUILDFLAG(MOJO_TRACE_ENABLED)
  371. const bool was_peer_closed =
  372. last_known_satisfied_signals_ & MOJO_HANDLE_SIGNAL_PEER_CLOSED;
  373. const bool is_peer_closed =
  374. rv.satisfied_signals & MOJO_HANDLE_SIGNAL_PEER_CLOSED;
  375. if (is_peer_closed && !was_peer_closed) {
  376. TRACE_EVENT_WITH_FLOW0(
  377. TRACE_DISABLED_BY_DEFAULT("mojom"), "MessagePipe peer closed",
  378. pipe_id_ + (1 - endpoint_), TRACE_EVENT_FLAG_FLOW_IN);
  379. }
  380. #endif
  381. last_known_satisfied_signals_ = rv.satisfied_signals;
  382. return rv;
  383. }
  384. void MessagePipeDispatcher::OnPortStatusChanged() {
  385. DCHECK(RequestContext::current());
  386. base::AutoLock lock(signal_lock_);
  387. // We stop observing our port as soon as it's transferred, but this can race
  388. // with events which are raised right before that happens. This is fine to
  389. // ignore.
  390. if (port_transferred_)
  391. return;
  392. #if DCHECK_IS_ON()
  393. ports::PortStatus port_status;
  394. if (node_controller_->node()->GetStatus(port_, &port_status) == ports::OK) {
  395. if (port_status.has_messages) {
  396. std::unique_ptr<ports::UserMessageEvent> unused;
  397. PeekSizeMessageFilter filter;
  398. node_controller_->node()->GetMessage(port_, &unused, &filter);
  399. DVLOG(4) << "New message detected on message pipe " << pipe_id_
  400. << " endpoint " << endpoint_ << " [port=" << port_.name()
  401. << "; size=" << filter.message_size() << "]";
  402. }
  403. if (port_status.peer_closed) {
  404. DVLOG(2) << "Peer closure detected on message pipe " << pipe_id_
  405. << " endpoint " << endpoint_ << " [port=" << port_.name() << "]";
  406. }
  407. }
  408. #endif
  409. watchers_.NotifyState(GetHandleSignalsStateNoLock());
  410. }
  411. } // namespace core
  412. } // namespace mojo