ipc_mojo_bootstrap.cc 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226
  1. // Copyright 2014 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "ipc/ipc_mojo_bootstrap.h"
  5. #include <inttypes.h>
  6. #include <stdint.h>
  7. #include <map>
  8. #include <memory>
  9. #include <set>
  10. #include <utility>
  11. #include <vector>
  12. #include "base/bind.h"
  13. #include "base/callback.h"
  14. #include "base/check_op.h"
  15. #include "base/containers/contains.h"
  16. #include "base/containers/queue.h"
  17. #include "base/memory/ptr_util.h"
  18. #include "base/memory/raw_ptr.h"
  19. #include "base/no_destructor.h"
  20. #include "base/strings/stringprintf.h"
  21. #include "base/synchronization/lock.h"
  22. #include "base/task/common/task_annotator.h"
  23. #include "base/task/sequenced_task_runner.h"
  24. #include "base/task/single_thread_task_runner.h"
  25. #include "base/threading/thread_checker.h"
  26. #include "base/threading/thread_local.h"
  27. #include "base/threading/thread_task_runner_handle.h"
  28. #include "base/trace_event/memory_allocator_dump.h"
  29. #include "base/trace_event/memory_dump_manager.h"
  30. #include "base/trace_event/memory_dump_provider.h"
  31. #include "base/trace_event/typed_macros.h"
  32. #include "ipc/ipc_channel.h"
  33. #include "mojo/public/cpp/bindings/associated_group.h"
  34. #include "mojo/public/cpp/bindings/associated_group_controller.h"
  35. #include "mojo/public/cpp/bindings/connector.h"
  36. #include "mojo/public/cpp/bindings/interface_endpoint_client.h"
  37. #include "mojo/public/cpp/bindings/interface_endpoint_controller.h"
  38. #include "mojo/public/cpp/bindings/interface_id.h"
  39. #include "mojo/public/cpp/bindings/message.h"
  40. #include "mojo/public/cpp/bindings/message_header_validator.h"
  41. #include "mojo/public/cpp/bindings/mojo_buildflags.h"
  42. #include "mojo/public/cpp/bindings/pipe_control_message_handler.h"
  43. #include "mojo/public/cpp/bindings/pipe_control_message_handler_delegate.h"
  44. #include "mojo/public/cpp/bindings/pipe_control_message_proxy.h"
  45. #include "mojo/public/cpp/bindings/sequence_local_sync_event_watcher.h"
  46. #include "mojo/public/cpp/bindings/tracing_helpers.h"
  47. namespace IPC {
  48. namespace {
  49. class ChannelAssociatedGroupController;
  50. base::ThreadLocalBoolean& GetOffSequenceBindingAllowedFlag() {
  51. static base::NoDestructor<base::ThreadLocalBoolean> flag;
  52. return *flag;
  53. }
  54. bool CanBindOffSequence() {
  55. return GetOffSequenceBindingAllowedFlag().Get();
  56. }
  57. // Used to track some internal Channel state in pursuit of message leaks.
  58. //
  59. // TODO(https://crbug.com/813045): Remove this.
  60. class ControllerMemoryDumpProvider
  61. : public base::trace_event::MemoryDumpProvider {
  62. public:
  63. ControllerMemoryDumpProvider() {
  64. base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
  65. this, "IPCChannel", nullptr);
  66. }
  67. ControllerMemoryDumpProvider(const ControllerMemoryDumpProvider&) = delete;
  68. ControllerMemoryDumpProvider& operator=(const ControllerMemoryDumpProvider&) =
  69. delete;
  70. ~ControllerMemoryDumpProvider() override {
  71. base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
  72. this);
  73. }
  74. void AddController(ChannelAssociatedGroupController* controller) {
  75. base::AutoLock lock(lock_);
  76. controllers_.insert(controller);
  77. }
  78. void RemoveController(ChannelAssociatedGroupController* controller) {
  79. base::AutoLock lock(lock_);
  80. controllers_.erase(controller);
  81. }
  82. // base::trace_event::MemoryDumpProvider:
  83. bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
  84. base::trace_event::ProcessMemoryDump* pmd) override;
  85. private:
  86. base::Lock lock_;
  87. std::set<ChannelAssociatedGroupController*> controllers_;
  88. };
  89. ControllerMemoryDumpProvider& GetMemoryDumpProvider() {
  90. static base::NoDestructor<ControllerMemoryDumpProvider> provider;
  91. return *provider;
  92. }
  93. // Messages are grouped by this info when recording memory metrics.
  94. struct MessageMemoryDumpInfo {
  95. MessageMemoryDumpInfo(const mojo::Message& message)
  96. : id(message.name()), profiler_tag(message.heap_profiler_tag()) {}
  97. MessageMemoryDumpInfo() = default;
  98. bool operator==(const MessageMemoryDumpInfo& other) const {
  99. return other.id == id && other.profiler_tag == profiler_tag;
  100. }
  101. uint32_t id = 0;
  102. const char* profiler_tag = nullptr;
  103. };
  104. struct MessageMemoryDumpInfoHash {
  105. size_t operator()(const MessageMemoryDumpInfo& info) const {
  106. return base::HashInts(
  107. info.id, info.profiler_tag ? base::FastHash(info.profiler_tag) : 0);
  108. }
  109. };
  110. class ChannelAssociatedGroupController
  111. : public mojo::AssociatedGroupController,
  112. public mojo::MessageReceiver,
  113. public mojo::PipeControlMessageHandlerDelegate {
  114. public:
  115. ChannelAssociatedGroupController(
  116. bool set_interface_id_namespace_bit,
  117. const scoped_refptr<base::SingleThreadTaskRunner>& ipc_task_runner,
  118. const scoped_refptr<base::SingleThreadTaskRunner>& proxy_task_runner,
  119. const scoped_refptr<mojo::internal::MessageQuotaChecker>& quota_checker)
  120. : task_runner_(ipc_task_runner),
  121. proxy_task_runner_(proxy_task_runner),
  122. quota_checker_(quota_checker),
  123. set_interface_id_namespace_bit_(set_interface_id_namespace_bit),
  124. dispatcher_(this),
  125. control_message_handler_(this),
  126. control_message_proxy_thunk_(this),
  127. control_message_proxy_(&control_message_proxy_thunk_) {
  128. thread_checker_.DetachFromThread();
  129. control_message_handler_.SetDescription(
  130. "IPC::mojom::Bootstrap [primary] PipeControlMessageHandler");
  131. dispatcher_.SetValidator(std::make_unique<mojo::MessageHeaderValidator>(
  132. "IPC::mojom::Bootstrap [primary] MessageHeaderValidator"));
  133. GetMemoryDumpProvider().AddController(this);
  134. }
  135. ChannelAssociatedGroupController(const ChannelAssociatedGroupController&) =
  136. delete;
  137. ChannelAssociatedGroupController& operator=(
  138. const ChannelAssociatedGroupController&) = delete;
  139. size_t GetQueuedMessageCount() {
  140. base::AutoLock lock(outgoing_messages_lock_);
  141. return outgoing_messages_.size();
  142. }
  143. void GetTopQueuedMessageMemoryDumpInfo(MessageMemoryDumpInfo* info,
  144. size_t* count) {
  145. std::unordered_map<MessageMemoryDumpInfo, size_t, MessageMemoryDumpInfoHash>
  146. counts;
  147. std::pair<MessageMemoryDumpInfo, size_t> top_message_info_and_count = {
  148. MessageMemoryDumpInfo(), 0};
  149. base::AutoLock lock(outgoing_messages_lock_);
  150. for (const auto& message : outgoing_messages_) {
  151. auto it_and_inserted = counts.emplace(MessageMemoryDumpInfo(message), 0);
  152. it_and_inserted.first->second++;
  153. if (it_and_inserted.first->second > top_message_info_and_count.second)
  154. top_message_info_and_count = *it_and_inserted.first;
  155. }
  156. *info = top_message_info_and_count.first;
  157. *count = top_message_info_and_count.second;
  158. }
  159. void Pause() {
  160. DCHECK(!paused_);
  161. paused_ = true;
  162. }
  163. void Unpause() {
  164. DCHECK(paused_);
  165. paused_ = false;
  166. }
  167. void FlushOutgoingMessages() {
  168. std::vector<mojo::Message> outgoing_messages;
  169. {
  170. base::AutoLock lock(outgoing_messages_lock_);
  171. std::swap(outgoing_messages, outgoing_messages_);
  172. }
  173. if (quota_checker_ && outgoing_messages.size())
  174. quota_checker_->AfterMessagesDequeued(outgoing_messages.size());
  175. for (auto& message : outgoing_messages)
  176. SendMessage(&message);
  177. }
  178. void Bind(mojo::ScopedMessagePipeHandle handle,
  179. mojo::PendingAssociatedRemote<mojom::Channel>* sender,
  180. mojo::PendingAssociatedReceiver<mojom::Channel>* receiver) {
  181. connector_ = std::make_unique<mojo::Connector>(
  182. std::move(handle), mojo::Connector::SINGLE_THREADED_SEND,
  183. "IPC Channel");
  184. connector_->set_incoming_receiver(&dispatcher_);
  185. connector_->set_connection_error_handler(
  186. base::BindOnce(&ChannelAssociatedGroupController::OnPipeError,
  187. base::Unretained(this)));
  188. connector_->set_enforce_errors_from_incoming_receiver(false);
  189. if (quota_checker_)
  190. connector_->SetMessageQuotaChecker(quota_checker_);
  191. // Don't let the Connector do any sort of queuing on our behalf. Individual
  192. // messages bound for the IPC::ChannelProxy thread (i.e. that vast majority
  193. // of messages received by this Connector) are already individually
  194. // scheduled for dispatch by ChannelProxy, so Connector's normal mode of
  195. // operation would only introduce a redundant scheduling step for most
  196. // messages.
  197. connector_->set_force_immediate_dispatch(true);
  198. mojo::InterfaceId sender_id, receiver_id;
  199. if (set_interface_id_namespace_bit_) {
  200. sender_id = 1 | mojo::kInterfaceIdNamespaceMask;
  201. receiver_id = 1;
  202. } else {
  203. sender_id = 1;
  204. receiver_id = 1 | mojo::kInterfaceIdNamespaceMask;
  205. }
  206. {
  207. base::AutoLock locker(lock_);
  208. Endpoint* sender_endpoint = new Endpoint(this, sender_id);
  209. Endpoint* receiver_endpoint = new Endpoint(this, receiver_id);
  210. endpoints_.insert({ sender_id, sender_endpoint });
  211. endpoints_.insert({ receiver_id, receiver_endpoint });
  212. sender_endpoint->set_handle_created();
  213. receiver_endpoint->set_handle_created();
  214. }
  215. mojo::ScopedInterfaceEndpointHandle sender_handle =
  216. CreateScopedInterfaceEndpointHandle(sender_id);
  217. mojo::ScopedInterfaceEndpointHandle receiver_handle =
  218. CreateScopedInterfaceEndpointHandle(receiver_id);
  219. *sender = mojo::PendingAssociatedRemote<mojom::Channel>(
  220. std::move(sender_handle), 0);
  221. *receiver = mojo::PendingAssociatedReceiver<mojom::Channel>(
  222. std::move(receiver_handle));
  223. }
  224. void StartReceiving() { connector_->StartReceiving(task_runner_); }
  225. void ShutDown() {
  226. DCHECK(thread_checker_.CalledOnValidThread());
  227. shut_down_ = true;
  228. if (connector_)
  229. connector_->CloseMessagePipe();
  230. OnPipeError();
  231. connector_.reset();
  232. base::AutoLock lock(outgoing_messages_lock_);
  233. if (quota_checker_ && outgoing_messages_.size())
  234. quota_checker_->AfterMessagesDequeued(outgoing_messages_.size());
  235. outgoing_messages_.clear();
  236. }
  237. // mojo::AssociatedGroupController:
  238. mojo::InterfaceId AssociateInterface(
  239. mojo::ScopedInterfaceEndpointHandle handle_to_send) override {
  240. if (!handle_to_send.pending_association())
  241. return mojo::kInvalidInterfaceId;
  242. uint32_t id = 0;
  243. {
  244. base::AutoLock locker(lock_);
  245. do {
  246. if (next_interface_id_ >= mojo::kInterfaceIdNamespaceMask)
  247. next_interface_id_ = 2;
  248. id = next_interface_id_++;
  249. if (set_interface_id_namespace_bit_)
  250. id |= mojo::kInterfaceIdNamespaceMask;
  251. } while (base::Contains(endpoints_, id));
  252. Endpoint* endpoint = new Endpoint(this, id);
  253. if (encountered_error_)
  254. endpoint->set_peer_closed();
  255. endpoint->set_handle_created();
  256. endpoints_.insert({id, endpoint});
  257. }
  258. if (!NotifyAssociation(&handle_to_send, id)) {
  259. // The peer handle of |handle_to_send|, which is supposed to join this
  260. // associated group, has been closed.
  261. {
  262. base::AutoLock locker(lock_);
  263. Endpoint* endpoint = FindEndpoint(id);
  264. if (endpoint)
  265. MarkClosedAndMaybeRemove(endpoint);
  266. }
  267. control_message_proxy_.NotifyPeerEndpointClosed(
  268. id, handle_to_send.disconnect_reason());
  269. }
  270. return id;
  271. }
  272. mojo::ScopedInterfaceEndpointHandle CreateLocalEndpointHandle(
  273. mojo::InterfaceId id) override {
  274. if (!mojo::IsValidInterfaceId(id))
  275. return mojo::ScopedInterfaceEndpointHandle();
  276. // Unless it is the primary ID, |id| is from the remote side and therefore
  277. // its namespace bit is supposed to be different than the value that this
  278. // router would use.
  279. if (!mojo::IsPrimaryInterfaceId(id) &&
  280. set_interface_id_namespace_bit_ ==
  281. mojo::HasInterfaceIdNamespaceBitSet(id)) {
  282. return mojo::ScopedInterfaceEndpointHandle();
  283. }
  284. base::AutoLock locker(lock_);
  285. bool inserted = false;
  286. Endpoint* endpoint = FindOrInsertEndpoint(id, &inserted);
  287. if (inserted) {
  288. DCHECK(!endpoint->handle_created());
  289. if (encountered_error_)
  290. endpoint->set_peer_closed();
  291. } else {
  292. if (endpoint->handle_created())
  293. return mojo::ScopedInterfaceEndpointHandle();
  294. }
  295. endpoint->set_handle_created();
  296. return CreateScopedInterfaceEndpointHandle(id);
  297. }
  298. void CloseEndpointHandle(
  299. mojo::InterfaceId id,
  300. const absl::optional<mojo::DisconnectReason>& reason) override {
  301. if (!mojo::IsValidInterfaceId(id))
  302. return;
  303. {
  304. base::AutoLock locker(lock_);
  305. DCHECK(base::Contains(endpoints_, id));
  306. Endpoint* endpoint = endpoints_[id].get();
  307. DCHECK(!endpoint->client());
  308. DCHECK(!endpoint->closed());
  309. MarkClosedAndMaybeRemove(endpoint);
  310. }
  311. if (!mojo::IsPrimaryInterfaceId(id) || reason)
  312. control_message_proxy_.NotifyPeerEndpointClosed(id, reason);
  313. }
  314. mojo::InterfaceEndpointController* AttachEndpointClient(
  315. const mojo::ScopedInterfaceEndpointHandle& handle,
  316. mojo::InterfaceEndpointClient* client,
  317. scoped_refptr<base::SequencedTaskRunner> runner) override {
  318. const mojo::InterfaceId id = handle.id();
  319. DCHECK(mojo::IsValidInterfaceId(id));
  320. DCHECK(client);
  321. base::AutoLock locker(lock_);
  322. DCHECK(base::Contains(endpoints_, id));
  323. Endpoint* endpoint = endpoints_[id].get();
  324. endpoint->AttachClient(client, std::move(runner));
  325. if (endpoint->peer_closed())
  326. NotifyEndpointOfError(endpoint, true /* force_async */);
  327. return endpoint;
  328. }
  329. void DetachEndpointClient(
  330. const mojo::ScopedInterfaceEndpointHandle& handle) override {
  331. const mojo::InterfaceId id = handle.id();
  332. DCHECK(mojo::IsValidInterfaceId(id));
  333. base::AutoLock locker(lock_);
  334. DCHECK(base::Contains(endpoints_, id));
  335. Endpoint* endpoint = endpoints_[id].get();
  336. endpoint->DetachClient();
  337. }
  338. void RaiseError() override {
  339. // We ignore errors on channel endpoints, leaving the pipe open. There are
  340. // good reasons for this:
  341. //
  342. // * We should never close a channel endpoint in either process as long as
  343. // the child process is still alive. The child's endpoint should only be
  344. // closed implicitly by process death, and the browser's endpoint should
  345. // only be closed after the child process is confirmed to be dead. Crash
  346. // reporting logic in Chrome relies on this behavior in order to do the
  347. // right thing.
  348. //
  349. // * There are two interesting conditions under which RaiseError() can be
  350. // implicitly reached: an incoming message fails validation, or the
  351. // local endpoint drops a response callback without calling it.
  352. //
  353. // * In the validation case, we also report the message as bad, and this
  354. // will imminently trigger the common bad-IPC path in the browser,
  355. // causing the browser to kill the offending renderer.
  356. //
  357. // * In the dropped response callback case, the net result of ignoring the
  358. // issue is generally innocuous. While indicative of programmer error,
  359. // it's not a severe failure and is already covered by separate DCHECKs.
  360. //
  361. // See https://crbug.com/861607 for additional discussion.
  362. }
  363. bool PrefersSerializedMessages() override { return true; }
  364. private:
  365. class Endpoint;
  366. class ControlMessageProxyThunk;
  367. friend class Endpoint;
  368. friend class ControlMessageProxyThunk;
  369. // MessageWrapper objects are always destroyed under the controller's lock. On
  370. // destruction, if the message it wrappers contains
  371. // ScopedInterfaceEndpointHandles (which cannot be destructed under the
  372. // controller's lock), the wrapper unlocks to clean them up.
  373. class MessageWrapper {
  374. public:
  375. MessageWrapper() = default;
  376. MessageWrapper(ChannelAssociatedGroupController* controller,
  377. mojo::Message message)
  378. : controller_(controller), value_(std::move(message)) {}
  379. MessageWrapper(MessageWrapper&& other)
  380. : controller_(other.controller_), value_(std::move(other.value_)) {}
  381. MessageWrapper(const MessageWrapper&) = delete;
  382. MessageWrapper& operator=(const MessageWrapper&) = delete;
  383. ~MessageWrapper() {
  384. if (value_.associated_endpoint_handles()->empty())
  385. return;
  386. controller_->lock_.AssertAcquired();
  387. {
  388. base::AutoUnlock unlocker(controller_->lock_);
  389. value_.mutable_associated_endpoint_handles()->clear();
  390. }
  391. }
  392. MessageWrapper& operator=(MessageWrapper&& other) {
  393. controller_ = other.controller_;
  394. value_ = std::move(other.value_);
  395. return *this;
  396. }
  397. mojo::Message& value() { return value_; }
  398. private:
  399. raw_ptr<ChannelAssociatedGroupController> controller_ = nullptr;
  400. mojo::Message value_;
  401. };
  402. class Endpoint : public base::RefCountedThreadSafe<Endpoint>,
  403. public mojo::InterfaceEndpointController {
  404. public:
  405. Endpoint(ChannelAssociatedGroupController* controller, mojo::InterfaceId id)
  406. : controller_(controller), id_(id) {}
  407. Endpoint(const Endpoint&) = delete;
  408. Endpoint& operator=(const Endpoint&) = delete;
  409. mojo::InterfaceId id() const { return id_; }
  410. bool closed() const {
  411. controller_->lock_.AssertAcquired();
  412. return closed_;
  413. }
  414. void set_closed() {
  415. controller_->lock_.AssertAcquired();
  416. closed_ = true;
  417. }
  418. bool peer_closed() const {
  419. controller_->lock_.AssertAcquired();
  420. return peer_closed_;
  421. }
  422. void set_peer_closed() {
  423. controller_->lock_.AssertAcquired();
  424. peer_closed_ = true;
  425. }
  426. bool handle_created() const {
  427. controller_->lock_.AssertAcquired();
  428. return handle_created_;
  429. }
  430. void set_handle_created() {
  431. controller_->lock_.AssertAcquired();
  432. handle_created_ = true;
  433. }
  434. const absl::optional<mojo::DisconnectReason>& disconnect_reason() const {
  435. return disconnect_reason_;
  436. }
  437. void set_disconnect_reason(
  438. const absl::optional<mojo::DisconnectReason>& disconnect_reason) {
  439. disconnect_reason_ = disconnect_reason;
  440. }
  441. base::SequencedTaskRunner* task_runner() const {
  442. return task_runner_.get();
  443. }
  444. bool was_bound_off_sequence() const { return was_bound_off_sequence_; }
  445. mojo::InterfaceEndpointClient* client() const {
  446. controller_->lock_.AssertAcquired();
  447. return client_;
  448. }
  449. void AttachClient(mojo::InterfaceEndpointClient* client,
  450. scoped_refptr<base::SequencedTaskRunner> runner) {
  451. controller_->lock_.AssertAcquired();
  452. DCHECK(!client_);
  453. DCHECK(!closed_);
  454. task_runner_ = std::move(runner);
  455. client_ = client;
  456. if (CanBindOffSequence())
  457. was_bound_off_sequence_ = true;
  458. }
  459. void DetachClient() {
  460. controller_->lock_.AssertAcquired();
  461. DCHECK(client_);
  462. DCHECK(!closed_);
  463. task_runner_ = nullptr;
  464. client_ = nullptr;
  465. sync_watcher_.reset();
  466. }
  467. uint32_t EnqueueSyncMessage(MessageWrapper message) {
  468. controller_->lock_.AssertAcquired();
  469. uint32_t id = GenerateSyncMessageId();
  470. sync_messages_.emplace(id, std::move(message));
  471. SignalSyncMessageEvent();
  472. return id;
  473. }
  474. void SignalSyncMessageEvent() {
  475. controller_->lock_.AssertAcquired();
  476. if (sync_watcher_)
  477. sync_watcher_->SignalEvent();
  478. }
  479. MessageWrapper PopSyncMessage(uint32_t id) {
  480. controller_->lock_.AssertAcquired();
  481. if (sync_messages_.empty() || sync_messages_.front().first != id)
  482. return MessageWrapper();
  483. MessageWrapper message = std::move(sync_messages_.front().second);
  484. sync_messages_.pop();
  485. return message;
  486. }
  487. // mojo::InterfaceEndpointController:
  488. bool SendMessage(mojo::Message* message) override {
  489. DCHECK(task_runner_->RunsTasksInCurrentSequence());
  490. message->set_interface_id(id_);
  491. return controller_->SendMessage(message);
  492. }
  493. void AllowWokenUpBySyncWatchOnSameThread() override {
  494. DCHECK(task_runner_->RunsTasksInCurrentSequence());
  495. EnsureSyncWatcherExists();
  496. sync_watcher_->AllowWokenUpBySyncWatchOnSameSequence();
  497. }
  498. bool SyncWatch(const bool& should_stop) override {
  499. DCHECK(task_runner_->RunsTasksInCurrentSequence());
  500. // It's not legal to make sync calls from the primary endpoint's thread,
  501. // and in fact they must only happen from the proxy task runner.
  502. DCHECK(!controller_->task_runner_->BelongsToCurrentThread());
  503. DCHECK(controller_->proxy_task_runner_->BelongsToCurrentThread());
  504. EnsureSyncWatcherExists();
  505. return sync_watcher_->SyncWatch(&should_stop);
  506. }
  507. bool SyncWatchExclusive(uint64_t request_id) override {
  508. // We don't support exclusive waits on Channel-associated interfaces.
  509. NOTREACHED();
  510. return false;
  511. }
  512. void RegisterExternalSyncWaiter(uint64_t request_id) override {}
  513. private:
  514. friend class base::RefCountedThreadSafe<Endpoint>;
  515. ~Endpoint() override {
  516. controller_->lock_.AssertAcquired();
  517. DCHECK(!client_);
  518. DCHECK(closed_);
  519. DCHECK(peer_closed_);
  520. DCHECK(!sync_watcher_);
  521. }
  522. void OnSyncMessageEventReady() {
  523. DCHECK(task_runner_->RunsTasksInCurrentSequence());
  524. scoped_refptr<Endpoint> keepalive(this);
  525. scoped_refptr<AssociatedGroupController> controller_keepalive(
  526. controller_.get());
  527. base::AutoLock locker(controller_->lock_);
  528. bool more_to_process = false;
  529. if (!sync_messages_.empty()) {
  530. MessageWrapper message_wrapper =
  531. std::move(sync_messages_.front().second);
  532. sync_messages_.pop();
  533. bool dispatch_succeeded;
  534. mojo::InterfaceEndpointClient* client = client_;
  535. {
  536. base::AutoUnlock unlocker(controller_->lock_);
  537. dispatch_succeeded =
  538. client->HandleIncomingMessage(&message_wrapper.value());
  539. }
  540. if (!sync_messages_.empty())
  541. more_to_process = true;
  542. if (!dispatch_succeeded)
  543. controller_->RaiseError();
  544. }
  545. if (!more_to_process)
  546. sync_watcher_->ResetEvent();
  547. // If there are no queued sync messages and the peer has closed, there
  548. // there won't be incoming sync messages in the future. If any
  549. // SyncWatch() calls are on the stack for this endpoint, resetting the
  550. // watcher will allow them to exit as the stack undwinds.
  551. if (!more_to_process && peer_closed_)
  552. sync_watcher_.reset();
  553. }
  554. void EnsureSyncWatcherExists() {
  555. DCHECK(task_runner_->RunsTasksInCurrentSequence());
  556. if (sync_watcher_)
  557. return;
  558. base::AutoLock locker(controller_->lock_);
  559. sync_watcher_ = std::make_unique<mojo::SequenceLocalSyncEventWatcher>(
  560. base::BindRepeating(&Endpoint::OnSyncMessageEventReady,
  561. base::Unretained(this)));
  562. if (peer_closed_ || !sync_messages_.empty())
  563. SignalSyncMessageEvent();
  564. }
  565. uint32_t GenerateSyncMessageId() {
  566. // Overflow is fine.
  567. uint32_t id = next_sync_message_id_++;
  568. DCHECK(sync_messages_.empty() || sync_messages_.front().first != id);
  569. return id;
  570. }
  571. const raw_ptr<ChannelAssociatedGroupController> controller_;
  572. const mojo::InterfaceId id_;
  573. bool closed_ = false;
  574. bool peer_closed_ = false;
  575. bool handle_created_ = false;
  576. bool was_bound_off_sequence_ = false;
  577. absl::optional<mojo::DisconnectReason> disconnect_reason_;
  578. raw_ptr<mojo::InterfaceEndpointClient> client_ = nullptr;
  579. scoped_refptr<base::SequencedTaskRunner> task_runner_;
  580. std::unique_ptr<mojo::SequenceLocalSyncEventWatcher> sync_watcher_;
  581. base::queue<std::pair<uint32_t, MessageWrapper>> sync_messages_;
  582. uint32_t next_sync_message_id_ = 0;
  583. };
  584. class ControlMessageProxyThunk : public MessageReceiver {
  585. public:
  586. explicit ControlMessageProxyThunk(
  587. ChannelAssociatedGroupController* controller)
  588. : controller_(controller) {}
  589. ControlMessageProxyThunk(const ControlMessageProxyThunk&) = delete;
  590. ControlMessageProxyThunk& operator=(const ControlMessageProxyThunk&) =
  591. delete;
  592. private:
  593. // MessageReceiver:
  594. bool Accept(mojo::Message* message) override {
  595. return controller_->SendMessage(message);
  596. }
  597. raw_ptr<ChannelAssociatedGroupController> controller_;
  598. };
  599. ~ChannelAssociatedGroupController() override {
  600. DCHECK(!connector_);
  601. base::AutoLock locker(lock_);
  602. for (auto iter = endpoints_.begin(); iter != endpoints_.end();) {
  603. Endpoint* endpoint = iter->second.get();
  604. ++iter;
  605. if (!endpoint->closed()) {
  606. // This happens when a NotifyPeerEndpointClosed message been received,
  607. // but the interface ID hasn't been used to create local endpoint
  608. // handle.
  609. DCHECK(!endpoint->client());
  610. DCHECK(endpoint->peer_closed());
  611. MarkClosedAndMaybeRemove(endpoint);
  612. } else {
  613. MarkPeerClosedAndMaybeRemove(endpoint);
  614. }
  615. }
  616. DCHECK(endpoints_.empty());
  617. GetMemoryDumpProvider().RemoveController(this);
  618. }
  619. bool SendMessage(mojo::Message* message) {
  620. DCHECK(message->heap_profiler_tag());
  621. if (task_runner_->BelongsToCurrentThread()) {
  622. DCHECK(thread_checker_.CalledOnValidThread());
  623. if (!connector_ || paused_) {
  624. if (!shut_down_) {
  625. base::AutoLock lock(outgoing_messages_lock_);
  626. if (quota_checker_)
  627. quota_checker_->BeforeMessagesEnqueued(1);
  628. outgoing_messages_.emplace_back(std::move(*message));
  629. }
  630. return true;
  631. }
  632. return connector_->Accept(message);
  633. } else {
  634. // We always post tasks to the primary endpoint thread when called from
  635. // other threads in order to simulate IPC::ChannelProxy::Send behavior.
  636. task_runner_->PostTask(
  637. FROM_HERE,
  638. base::BindOnce(
  639. &ChannelAssociatedGroupController::SendMessageOnPrimaryThread,
  640. this, std::move(*message)));
  641. return true;
  642. }
  643. }
  644. void SendMessageOnPrimaryThread(mojo::Message message) {
  645. DCHECK(thread_checker_.CalledOnValidThread());
  646. if (!SendMessage(&message))
  647. RaiseError();
  648. }
  649. void OnPipeError() {
  650. DCHECK(thread_checker_.CalledOnValidThread());
  651. // We keep |this| alive here because it's possible for the notifications
  652. // below to release all other references.
  653. scoped_refptr<ChannelAssociatedGroupController> keepalive(this);
  654. base::AutoLock locker(lock_);
  655. encountered_error_ = true;
  656. std::vector<scoped_refptr<Endpoint>> endpoints_to_notify;
  657. for (auto iter = endpoints_.begin(); iter != endpoints_.end();) {
  658. Endpoint* endpoint = iter->second.get();
  659. ++iter;
  660. if (endpoint->client())
  661. endpoints_to_notify.push_back(endpoint);
  662. MarkPeerClosedAndMaybeRemove(endpoint);
  663. }
  664. for (auto& endpoint : endpoints_to_notify) {
  665. // Because a notification may in turn detach any endpoint, we have to
  666. // check each client again here.
  667. if (endpoint->client())
  668. NotifyEndpointOfError(endpoint.get(), false /* force_async */);
  669. }
  670. }
  671. void NotifyEndpointOfError(Endpoint* endpoint, bool force_async) {
  672. lock_.AssertAcquired();
  673. DCHECK(endpoint->task_runner() && endpoint->client());
  674. if (endpoint->task_runner()->RunsTasksInCurrentSequence() && !force_async) {
  675. mojo::InterfaceEndpointClient* client = endpoint->client();
  676. absl::optional<mojo::DisconnectReason> reason(
  677. endpoint->disconnect_reason());
  678. base::AutoUnlock unlocker(lock_);
  679. client->NotifyError(reason);
  680. } else {
  681. endpoint->task_runner()->PostTask(
  682. FROM_HERE,
  683. base::BindOnce(&ChannelAssociatedGroupController::
  684. NotifyEndpointOfErrorOnEndpointThread,
  685. this, endpoint->id(), base::Unretained(endpoint)));
  686. }
  687. }
  688. void NotifyEndpointOfErrorOnEndpointThread(mojo::InterfaceId id,
  689. Endpoint* endpoint) {
  690. base::AutoLock locker(lock_);
  691. auto iter = endpoints_.find(id);
  692. if (iter == endpoints_.end() || iter->second.get() != endpoint)
  693. return;
  694. if (!endpoint->client())
  695. return;
  696. DCHECK(endpoint->task_runner()->RunsTasksInCurrentSequence());
  697. NotifyEndpointOfError(endpoint, false /* force_async */);
  698. }
  699. void MarkClosedAndMaybeRemove(Endpoint* endpoint) {
  700. lock_.AssertAcquired();
  701. endpoint->set_closed();
  702. if (endpoint->closed() && endpoint->peer_closed())
  703. endpoints_.erase(endpoint->id());
  704. }
  705. void MarkPeerClosedAndMaybeRemove(Endpoint* endpoint) {
  706. lock_.AssertAcquired();
  707. endpoint->set_peer_closed();
  708. endpoint->SignalSyncMessageEvent();
  709. if (endpoint->closed() && endpoint->peer_closed())
  710. endpoints_.erase(endpoint->id());
  711. }
  712. Endpoint* FindOrInsertEndpoint(mojo::InterfaceId id, bool* inserted) {
  713. lock_.AssertAcquired();
  714. DCHECK(!inserted || !*inserted);
  715. Endpoint* endpoint = FindEndpoint(id);
  716. if (!endpoint) {
  717. endpoint = new Endpoint(this, id);
  718. endpoints_.insert({id, endpoint});
  719. if (inserted)
  720. *inserted = true;
  721. }
  722. return endpoint;
  723. }
  724. Endpoint* FindEndpoint(mojo::InterfaceId id) {
  725. lock_.AssertAcquired();
  726. auto iter = endpoints_.find(id);
  727. return iter != endpoints_.end() ? iter->second.get() : nullptr;
  728. }
  729. // mojo::MessageReceiver:
  730. bool Accept(mojo::Message* message) override {
  731. DCHECK(thread_checker_.CalledOnValidThread());
  732. if (!message->DeserializeAssociatedEndpointHandles(this))
  733. return false;
  734. if (mojo::PipeControlMessageHandler::IsPipeControlMessage(message))
  735. return control_message_handler_.Accept(message);
  736. mojo::InterfaceId id = message->interface_id();
  737. if (!mojo::IsValidInterfaceId(id))
  738. return false;
  739. base::ReleasableAutoLock locker(&lock_);
  740. Endpoint* endpoint = FindEndpoint(id);
  741. if (!endpoint)
  742. return true;
  743. mojo::InterfaceEndpointClient* client = endpoint->client();
  744. if (!client || !endpoint->task_runner()->RunsTasksInCurrentSequence()) {
  745. // The ChannelProxy for this channel is bound to `proxy_task_runner_` and
  746. // by default legacy IPCs must dispatch to either the IO thread or the
  747. // proxy task runner. We generally impose the same constraint on
  748. // associated interface endpoints so that FIFO can be guaranteed across
  749. // all interfaces without stalling any of them to wait for a pending
  750. // endpoint to be bound.
  751. //
  752. // This allows us to assume that if an endpoint is not yet bound when we
  753. // receive a message targeting it, it *will* be bound on the proxy task
  754. // runner by the time a newly posted task runs there. Hence we simply post
  755. // a hopeful dispatch task to that task runner.
  756. //
  757. // As it turns out, there are even some instances of endpoints binding to
  758. // alternative (non-IO-thread, non-proxy) task runners, but still
  759. // ultimately relying on the fact that we schedule their messages on the
  760. // proxy task runner. So even if the endpoint is already bound, we
  761. // default to scheduling it on the proxy task runner as long as it's not
  762. // bound specifically to the IO task runner.
  763. // TODO(rockot): Try to sort out these cases and maybe eliminate them.
  764. //
  765. // Finally, it's also possible that an endpoint was bound to an
  766. // alternative task runner and it really does want its messages to
  767. // dispatch there. In that case `was_bound_off_sequence()` will be true to
  768. // signal that we should really use that task runner.
  769. const scoped_refptr<base::SequencedTaskRunner> task_runner =
  770. client && endpoint->was_bound_off_sequence()
  771. ? endpoint->task_runner()
  772. : proxy_task_runner_.get();
  773. if (message->has_flag(mojo::Message::kFlagIsSync)) {
  774. MessageWrapper message_wrapper(this, std::move(*message));
  775. // Sync messages may need to be handled by the endpoint if it's blocking
  776. // on a sync reply. We pass ownership of the message to the endpoint's
  777. // sync message queue. If the endpoint was blocking, it will dequeue the
  778. // message and dispatch it. Otherwise the posted |AcceptSyncMessage()|
  779. // call will dequeue the message and dispatch it.
  780. uint32_t message_id =
  781. endpoint->EnqueueSyncMessage(std::move(message_wrapper));
  782. task_runner->PostTask(
  783. FROM_HERE,
  784. base::BindOnce(&ChannelAssociatedGroupController::AcceptSyncMessage,
  785. this, id, message_id));
  786. return true;
  787. }
  788. // If |task_runner| has been torn down already, this PostTask will fail
  789. // and destroy |message|. That operation may need to in turn destroy
  790. // in-transit associated endpoints and thus acquire |lock_|. We no longer
  791. // need the lock to be held now, so we can release it before the PostTask.
  792. {
  793. // Grab interface name from |client| before releasing the lock to ensure
  794. // that |client| is safe to access.
  795. base::TaskAnnotator::ScopedSetIpcHash scoped_set_ipc_hash(
  796. client ? client->interface_name() : "unknown interface");
  797. locker.Release();
  798. task_runner->PostTask(
  799. FROM_HERE,
  800. base::BindOnce(
  801. &ChannelAssociatedGroupController::AcceptOnEndpointThread, this,
  802. std::move(*message)));
  803. }
  804. return true;
  805. }
  806. locker.Release();
  807. // It's safe to access |client| here without holding a lock, because this
  808. // code runs on a proxy thread and |client| can't be destroyed from any
  809. // thread.
  810. return client->HandleIncomingMessage(message);
  811. }
  812. void AcceptOnEndpointThread(mojo::Message message) {
  813. TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mojom"),
  814. "ChannelAssociatedGroupController::AcceptOnEndpointThread");
  815. mojo::InterfaceId id = message.interface_id();
  816. DCHECK(mojo::IsValidInterfaceId(id) && !mojo::IsPrimaryInterfaceId(id));
  817. base::AutoLock locker(lock_);
  818. Endpoint* endpoint = FindEndpoint(id);
  819. if (!endpoint)
  820. return;
  821. mojo::InterfaceEndpointClient* client = endpoint->client();
  822. if (!client)
  823. return;
  824. if (!endpoint->task_runner()->RunsTasksInCurrentSequence() &&
  825. !proxy_task_runner_->RunsTasksInCurrentSequence()) {
  826. return;
  827. }
  828. // TODO(altimin): This event is temporarily kept as a debug fallback. Remove
  829. // it once the new implementation proves to be stable.
  830. TRACE_EVENT(
  831. TRACE_DISABLED_BY_DEFAULT("mojom"),
  832. // Using client->interface_name() is safe here because this is a static
  833. // string defined for each mojo interface.
  834. perfetto::StaticString(client->interface_name()),
  835. [&](perfetto::EventContext& ctx) {
  836. static const uint8_t* toplevel_flow_enabled =
  837. TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("toplevel.flow");
  838. if (!*toplevel_flow_enabled)
  839. return;
  840. perfetto::Flow::Global(message.GetTraceId())(ctx);
  841. });
  842. // Sync messages should never make their way to this method.
  843. DCHECK(!message.has_flag(mojo::Message::kFlagIsSync));
  844. bool result = false;
  845. {
  846. base::AutoUnlock unlocker(lock_);
  847. result = client->HandleIncomingMessage(&message);
  848. }
  849. if (!result)
  850. RaiseError();
  851. }
  852. void AcceptSyncMessage(mojo::InterfaceId interface_id, uint32_t message_id) {
  853. TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("mojom"),
  854. "ChannelAssociatedGroupController::AcceptSyncMessage");
  855. base::AutoLock locker(lock_);
  856. Endpoint* endpoint = FindEndpoint(interface_id);
  857. if (!endpoint)
  858. return;
  859. // Careful, if the endpoint is detached its members are cleared. Check for
  860. // that before dereferencing.
  861. mojo::InterfaceEndpointClient* client = endpoint->client();
  862. if (!client)
  863. return;
  864. if (!endpoint->task_runner()->RunsTasksInCurrentSequence() &&
  865. !proxy_task_runner_->RunsTasksInCurrentSequence()) {
  866. return;
  867. }
  868. // Using client->interface_name() is safe here because this is a static
  869. // string defined for each mojo interface.
  870. TRACE_EVENT0("mojom", client->interface_name());
  871. MessageWrapper message_wrapper = endpoint->PopSyncMessage(message_id);
  872. // The message must have already been dequeued by the endpoint waking up
  873. // from a sync wait. Nothing to do.
  874. if (message_wrapper.value().IsNull())
  875. return;
  876. bool result = false;
  877. {
  878. base::AutoUnlock unlocker(lock_);
  879. result = client->HandleIncomingMessage(&message_wrapper.value());
  880. }
  881. if (!result)
  882. RaiseError();
  883. }
  884. // mojo::PipeControlMessageHandlerDelegate:
  885. bool OnPeerAssociatedEndpointClosed(
  886. mojo::InterfaceId id,
  887. const absl::optional<mojo::DisconnectReason>& reason) override {
  888. DCHECK(thread_checker_.CalledOnValidThread());
  889. scoped_refptr<ChannelAssociatedGroupController> keepalive(this);
  890. base::AutoLock locker(lock_);
  891. scoped_refptr<Endpoint> endpoint = FindOrInsertEndpoint(id, nullptr);
  892. if (reason)
  893. endpoint->set_disconnect_reason(reason);
  894. if (!endpoint->peer_closed()) {
  895. if (endpoint->client())
  896. NotifyEndpointOfError(endpoint.get(), false /* force_async */);
  897. MarkPeerClosedAndMaybeRemove(endpoint.get());
  898. }
  899. return true;
  900. }
  901. bool WaitForFlushToComplete(
  902. mojo::ScopedMessagePipeHandle flush_pipe) override {
  903. // We don't support async flushing on the IPC Channel pipe.
  904. return false;
  905. }
  906. // Checked in places which must be run on the primary endpoint's thread.
  907. base::ThreadChecker thread_checker_;
  908. scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
  909. const scoped_refptr<base::SingleThreadTaskRunner> proxy_task_runner_;
  910. const scoped_refptr<mojo::internal::MessageQuotaChecker> quota_checker_;
  911. const bool set_interface_id_namespace_bit_;
  912. bool paused_ = false;
  913. std::unique_ptr<mojo::Connector> connector_;
  914. mojo::MessageDispatcher dispatcher_;
  915. mojo::PipeControlMessageHandler control_message_handler_;
  916. ControlMessageProxyThunk control_message_proxy_thunk_;
  917. // NOTE: It is unsafe to call into this object while holding |lock_|.
  918. mojo::PipeControlMessageProxy control_message_proxy_;
  919. // Guards access to |outgoing_messages_| only. Used to support memory dumps
  920. // which may be triggered from any thread.
  921. base::Lock outgoing_messages_lock_;
  922. // Outgoing messages that were sent before this controller was bound to a
  923. // real message pipe.
  924. std::vector<mojo::Message> outgoing_messages_;
  925. // Guards the fields below for thread-safe access.
  926. base::Lock lock_;
  927. bool encountered_error_ = false;
  928. bool shut_down_ = false;
  929. // ID #1 is reserved for the mojom::Channel interface.
  930. uint32_t next_interface_id_ = 2;
  931. std::map<uint32_t, scoped_refptr<Endpoint>> endpoints_;
  932. };
  933. bool ControllerMemoryDumpProvider::OnMemoryDump(
  934. const base::trace_event::MemoryDumpArgs& args,
  935. base::trace_event::ProcessMemoryDump* pmd) {
  936. base::AutoLock lock(lock_);
  937. for (auto* controller : controllers_) {
  938. base::trace_event::MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(
  939. base::StringPrintf("mojo/queued_ipc_channel_message/0x%" PRIxPTR,
  940. reinterpret_cast<uintptr_t>(controller)));
  941. dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameObjectCount,
  942. base::trace_event::MemoryAllocatorDump::kUnitsObjects,
  943. controller->GetQueuedMessageCount());
  944. MessageMemoryDumpInfo info;
  945. size_t count = 0;
  946. controller->GetTopQueuedMessageMemoryDumpInfo(&info, &count);
  947. dump->AddScalar("top_message_name", "id", info.id);
  948. dump->AddScalar("top_message_count",
  949. base::trace_event::MemoryAllocatorDump::kUnitsObjects,
  950. count);
  951. if (info.profiler_tag) {
  952. // TODO(ssid): Memory dumps currently do not support adding string
  953. // arguments in background dumps. So, add this value as a trace event for
  954. // now.
  955. TRACE_EVENT2(base::trace_event::MemoryDumpManager::kTraceCategory,
  956. "ControllerMemoryDumpProvider::OnMemoryDump",
  957. "top_queued_message_tag", info.profiler_tag,
  958. "count", count);
  959. }
  960. }
  961. return true;
  962. }
  963. class MojoBootstrapImpl : public MojoBootstrap {
  964. public:
  965. MojoBootstrapImpl(
  966. mojo::ScopedMessagePipeHandle handle,
  967. const scoped_refptr<ChannelAssociatedGroupController> controller)
  968. : controller_(controller),
  969. associated_group_(controller),
  970. handle_(std::move(handle)) {}
  971. MojoBootstrapImpl(const MojoBootstrapImpl&) = delete;
  972. MojoBootstrapImpl& operator=(const MojoBootstrapImpl&) = delete;
  973. ~MojoBootstrapImpl() override {
  974. controller_->ShutDown();
  975. }
  976. private:
  977. void Connect(
  978. mojo::PendingAssociatedRemote<mojom::Channel>* sender,
  979. mojo::PendingAssociatedReceiver<mojom::Channel>* receiver) override {
  980. controller_->Bind(std::move(handle_), sender, receiver);
  981. }
  982. void StartReceiving() override { controller_->StartReceiving(); }
  983. void Pause() override {
  984. controller_->Pause();
  985. }
  986. void Unpause() override {
  987. controller_->Unpause();
  988. }
  989. void Flush() override {
  990. controller_->FlushOutgoingMessages();
  991. }
  992. mojo::AssociatedGroup* GetAssociatedGroup() override {
  993. return &associated_group_;
  994. }
  995. scoped_refptr<ChannelAssociatedGroupController> controller_;
  996. mojo::AssociatedGroup associated_group_;
  997. mojo::ScopedMessagePipeHandle handle_;
  998. };
  999. } // namespace
  1000. ScopedAllowOffSequenceChannelAssociatedBindings::
  1001. ScopedAllowOffSequenceChannelAssociatedBindings()
  1002. : outer_flag_(GetOffSequenceBindingAllowedFlag().Get()) {
  1003. GetOffSequenceBindingAllowedFlag().Set(true);
  1004. }
  1005. ScopedAllowOffSequenceChannelAssociatedBindings::
  1006. ~ScopedAllowOffSequenceChannelAssociatedBindings() {
  1007. GetOffSequenceBindingAllowedFlag().Set(outer_flag_);
  1008. }
  1009. // static
  1010. std::unique_ptr<MojoBootstrap> MojoBootstrap::Create(
  1011. mojo::ScopedMessagePipeHandle handle,
  1012. Channel::Mode mode,
  1013. const scoped_refptr<base::SingleThreadTaskRunner>& ipc_task_runner,
  1014. const scoped_refptr<base::SingleThreadTaskRunner>& proxy_task_runner,
  1015. const scoped_refptr<mojo::internal::MessageQuotaChecker>& quota_checker) {
  1016. return std::make_unique<MojoBootstrapImpl>(
  1017. std::move(handle), base::MakeRefCounted<ChannelAssociatedGroupController>(
  1018. mode == Channel::MODE_SERVER, ipc_task_runner,
  1019. proxy_task_runner, quota_checker));
  1020. }
  1021. } // namespace IPC