v8-platform.h 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092
  1. // Copyright 2013 the V8 project authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #ifndef V8_V8_PLATFORM_H_
  5. #define V8_V8_PLATFORM_H_
  6. #include <stddef.h>
  7. #include <stdint.h>
  8. #include <stdlib.h> // For abort.
  9. #include <memory>
  10. #include <string>
  11. #include "v8config.h" // NOLINT(build/include_directory)
  12. namespace v8 {
  13. class Isolate;
  14. // Valid priorities supported by the task scheduling infrastructure.
  15. enum class TaskPriority : uint8_t {
  16. /**
  17. * Best effort tasks are not critical for performance of the application. The
  18. * platform implementation should preempt such tasks if higher priority tasks
  19. * arrive.
  20. */
  21. kBestEffort,
  22. /**
  23. * User visible tasks are long running background tasks that will
  24. * improve performance and memory usage of the application upon completion.
  25. * Example: background compilation and garbage collection.
  26. */
  27. kUserVisible,
  28. /**
  29. * User blocking tasks are highest priority tasks that block the execution
  30. * thread (e.g. major garbage collection). They must be finished as soon as
  31. * possible.
  32. */
  33. kUserBlocking,
  34. };
  35. /**
  36. * A Task represents a unit of work.
  37. */
  38. class Task {
  39. public:
  40. virtual ~Task() = default;
  41. virtual void Run() = 0;
  42. };
  43. /**
  44. * An IdleTask represents a unit of work to be performed in idle time.
  45. * The Run method is invoked with an argument that specifies the deadline in
  46. * seconds returned by MonotonicallyIncreasingTime().
  47. * The idle task is expected to complete by this deadline.
  48. */
  49. class IdleTask {
  50. public:
  51. virtual ~IdleTask() = default;
  52. virtual void Run(double deadline_in_seconds) = 0;
  53. };
  54. /**
  55. * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
  56. * post tasks after the isolate gets destructed, but these tasks may not get
  57. * executed anymore. All tasks posted to a given TaskRunner will be invoked in
  58. * sequence. Tasks can be posted from any thread.
  59. */
  60. class TaskRunner {
  61. public:
  62. /**
  63. * Schedules a task to be invoked by this TaskRunner. The TaskRunner
  64. * implementation takes ownership of |task|.
  65. */
  66. virtual void PostTask(std::unique_ptr<Task> task) = 0;
  67. /**
  68. * Schedules a task to be invoked by this TaskRunner. The TaskRunner
  69. * implementation takes ownership of |task|. The |task| cannot be nested
  70. * within other task executions.
  71. *
  72. * Tasks which shouldn't be interleaved with JS execution must be posted with
  73. * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
  74. * embedder may process tasks in a callback which is called during JS
  75. * execution.
  76. *
  77. * In particular, tasks which execute JS must be non-nestable, since JS
  78. * execution is not allowed to nest.
  79. *
  80. * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
  81. */
  82. virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
  83. /**
  84. * Schedules a task to be invoked by this TaskRunner. The task is scheduled
  85. * after the given number of seconds |delay_in_seconds|. The TaskRunner
  86. * implementation takes ownership of |task|.
  87. */
  88. virtual void PostDelayedTask(std::unique_ptr<Task> task,
  89. double delay_in_seconds) = 0;
  90. /**
  91. * Schedules a task to be invoked by this TaskRunner. The task is scheduled
  92. * after the given number of seconds |delay_in_seconds|. The TaskRunner
  93. * implementation takes ownership of |task|. The |task| cannot be nested
  94. * within other task executions.
  95. *
  96. * Tasks which shouldn't be interleaved with JS execution must be posted with
  97. * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
  98. * embedder may process tasks in a callback which is called during JS
  99. * execution.
  100. *
  101. * In particular, tasks which execute JS must be non-nestable, since JS
  102. * execution is not allowed to nest.
  103. *
  104. * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
  105. */
  106. virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
  107. double delay_in_seconds) {}
  108. /**
  109. * Schedules an idle task to be invoked by this TaskRunner. The task is
  110. * scheduled when the embedder is idle. Requires that
  111. * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
  112. * relative to other task types and may be starved for an arbitrarily long
  113. * time if no idle time is available. The TaskRunner implementation takes
  114. * ownership of |task|.
  115. */
  116. virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
  117. /**
  118. * Returns true if idle tasks are enabled for this TaskRunner.
  119. */
  120. virtual bool IdleTasksEnabled() = 0;
  121. /**
  122. * Returns true if non-nestable tasks are enabled for this TaskRunner.
  123. */
  124. virtual bool NonNestableTasksEnabled() const { return false; }
  125. /**
  126. * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
  127. */
  128. virtual bool NonNestableDelayedTasksEnabled() const { return false; }
  129. TaskRunner() = default;
  130. virtual ~TaskRunner() = default;
  131. TaskRunner(const TaskRunner&) = delete;
  132. TaskRunner& operator=(const TaskRunner&) = delete;
  133. };
  134. /**
  135. * Delegate that's passed to Job's worker task, providing an entry point to
  136. * communicate with the scheduler.
  137. */
  138. class JobDelegate {
  139. public:
  140. /**
  141. * Returns true if this thread should return from the worker task on the
  142. * current thread ASAP. Workers should periodically invoke ShouldYield (or
  143. * YieldIfNeeded()) as often as is reasonable.
  144. */
  145. virtual bool ShouldYield() = 0;
  146. /**
  147. * Notifies the scheduler that max concurrency was increased, and the number
  148. * of worker should be adjusted accordingly. See Platform::PostJob() for more
  149. * details.
  150. */
  151. virtual void NotifyConcurrencyIncrease() = 0;
  152. /**
  153. * Returns a task_id unique among threads currently running this job, such
  154. * that GetTaskId() < worker count. To achieve this, the same task_id may be
  155. * reused by a different thread after a worker_task returns.
  156. */
  157. virtual uint8_t GetTaskId() = 0;
  158. /**
  159. * Returns true if the current task is called from the thread currently
  160. * running JobHandle::Join().
  161. */
  162. virtual bool IsJoiningThread() const = 0;
  163. };
  164. /**
  165. * Handle returned when posting a Job. Provides methods to control execution of
  166. * the posted Job.
  167. */
  168. class JobHandle {
  169. public:
  170. virtual ~JobHandle() = default;
  171. /**
  172. * Notifies the scheduler that max concurrency was increased, and the number
  173. * of worker should be adjusted accordingly. See Platform::PostJob() for more
  174. * details.
  175. */
  176. virtual void NotifyConcurrencyIncrease() = 0;
  177. /**
  178. * Contributes to the job on this thread. Doesn't return until all tasks have
  179. * completed and max concurrency becomes 0. When Join() is called and max
  180. * concurrency reaches 0, it should not increase again. This also promotes
  181. * this Job's priority to be at least as high as the calling thread's
  182. * priority.
  183. */
  184. virtual void Join() = 0;
  185. /**
  186. * Forces all existing workers to yield ASAP. Waits until they have all
  187. * returned from the Job's callback before returning.
  188. */
  189. virtual void Cancel() = 0;
  190. /*
  191. * Forces all existing workers to yield ASAP but doesn’t wait for them.
  192. * Warning, this is dangerous if the Job's callback is bound to or has access
  193. * to state which may be deleted after this call.
  194. */
  195. virtual void CancelAndDetach() = 0;
  196. /**
  197. * Returns true if there's any work pending or any worker running.
  198. */
  199. virtual bool IsActive() = 0;
  200. /**
  201. * Returns true if associated with a Job and other methods may be called.
  202. * Returns false after Join() or Cancel() was called. This may return true
  203. * even if no workers are running and IsCompleted() returns true
  204. */
  205. virtual bool IsValid() = 0;
  206. /**
  207. * Returns true if job priority can be changed.
  208. */
  209. virtual bool UpdatePriorityEnabled() const { return false; }
  210. /**
  211. * Update this Job's priority.
  212. */
  213. virtual void UpdatePriority(TaskPriority new_priority) {}
  214. };
  215. /**
  216. * A JobTask represents work to run in parallel from Platform::PostJob().
  217. */
  218. class JobTask {
  219. public:
  220. virtual ~JobTask() = default;
  221. virtual void Run(JobDelegate* delegate) = 0;
  222. /**
  223. * Controls the maximum number of threads calling Run() concurrently, given
  224. * the number of threads currently assigned to this job and executing Run().
  225. * Run() is only invoked if the number of threads previously running Run() was
  226. * less than the value returned. Since GetMaxConcurrency() is a leaf function,
  227. * it must not call back any JobHandle methods.
  228. */
  229. virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
  230. };
  231. /**
  232. * The interface represents complex arguments to trace events.
  233. */
  234. class ConvertableToTraceFormat {
  235. public:
  236. virtual ~ConvertableToTraceFormat() = default;
  237. /**
  238. * Append the class info to the provided |out| string. The appended
  239. * data must be a valid JSON object. Strings must be properly quoted, and
  240. * escaped. There is no processing applied to the content after it is
  241. * appended.
  242. */
  243. virtual void AppendAsTraceFormat(std::string* out) const = 0;
  244. };
  245. /**
  246. * V8 Tracing controller.
  247. *
  248. * Can be implemented by an embedder to record trace events from V8.
  249. */
  250. class TracingController {
  251. public:
  252. virtual ~TracingController() = default;
  253. // In Perfetto mode, trace events are written using Perfetto's Track Event
  254. // API directly without going through the embedder. However, it is still
  255. // possible to observe tracing being enabled and disabled.
  256. #if !defined(V8_USE_PERFETTO)
  257. /**
  258. * Called by TRACE_EVENT* macros, don't call this directly.
  259. * The name parameter is a category group for example:
  260. * TRACE_EVENT0("v8,parse", "V8.Parse")
  261. * The pointer returned points to a value with zero or more of the bits
  262. * defined in CategoryGroupEnabledFlags.
  263. **/
  264. virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
  265. static uint8_t no = 0;
  266. return &no;
  267. }
  268. /**
  269. * Adds a trace event to the platform tracing system. These function calls are
  270. * usually the result of a TRACE_* macro from trace_event_common.h when
  271. * tracing and the category of the particular trace are enabled. It is not
  272. * advisable to call these functions on their own; they are really only meant
  273. * to be used by the trace macros. The returned handle can be used by
  274. * UpdateTraceEventDuration to update the duration of COMPLETE events.
  275. */
  276. virtual uint64_t AddTraceEvent(
  277. char phase, const uint8_t* category_enabled_flag, const char* name,
  278. const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
  279. const char** arg_names, const uint8_t* arg_types,
  280. const uint64_t* arg_values,
  281. std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
  282. unsigned int flags) {
  283. return 0;
  284. }
  285. virtual uint64_t AddTraceEventWithTimestamp(
  286. char phase, const uint8_t* category_enabled_flag, const char* name,
  287. const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
  288. const char** arg_names, const uint8_t* arg_types,
  289. const uint64_t* arg_values,
  290. std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
  291. unsigned int flags, int64_t timestamp) {
  292. return 0;
  293. }
  294. /**
  295. * Sets the duration field of a COMPLETE trace event. It must be called with
  296. * the handle returned from AddTraceEvent().
  297. **/
  298. virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
  299. const char* name, uint64_t handle) {}
  300. #endif // !defined(V8_USE_PERFETTO)
  301. class TraceStateObserver {
  302. public:
  303. virtual ~TraceStateObserver() = default;
  304. virtual void OnTraceEnabled() = 0;
  305. virtual void OnTraceDisabled() = 0;
  306. };
  307. /** Adds tracing state change observer. */
  308. virtual void AddTraceStateObserver(TraceStateObserver*) {}
  309. /** Removes tracing state change observer. */
  310. virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
  311. };
  312. /**
  313. * A V8 memory page allocator.
  314. *
  315. * Can be implemented by an embedder to manage large host OS allocations.
  316. */
  317. class PageAllocator {
  318. public:
  319. virtual ~PageAllocator() = default;
  320. /**
  321. * Gets the page granularity for AllocatePages and FreePages. Addresses and
  322. * lengths for those calls should be multiples of AllocatePageSize().
  323. */
  324. virtual size_t AllocatePageSize() = 0;
  325. /**
  326. * Gets the page granularity for SetPermissions and ReleasePages. Addresses
  327. * and lengths for those calls should be multiples of CommitPageSize().
  328. */
  329. virtual size_t CommitPageSize() = 0;
  330. /**
  331. * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
  332. * sequences of random mmap addresses.
  333. */
  334. virtual void SetRandomMmapSeed(int64_t seed) = 0;
  335. /**
  336. * Returns a randomized address, suitable for memory allocation under ASLR.
  337. * The address will be aligned to AllocatePageSize.
  338. */
  339. virtual void* GetRandomMmapAddr() = 0;
  340. /**
  341. * Memory permissions.
  342. */
  343. enum Permission {
  344. kNoAccess,
  345. kRead,
  346. kReadWrite,
  347. kReadWriteExecute,
  348. kReadExecute,
  349. // Set this when reserving memory that will later require kReadWriteExecute
  350. // permissions. The resulting behavior is platform-specific, currently
  351. // this is used to set the MAP_JIT flag on Apple Silicon.
  352. // TODO(jkummerow): Remove this when Wasm has a platform-independent
  353. // w^x implementation.
  354. // TODO(saelo): Remove this once all JIT pages are allocated through the
  355. // VirtualAddressSpace API.
  356. kNoAccessWillJitLater
  357. };
  358. /**
  359. * Allocates memory in range with the given alignment and permission.
  360. */
  361. virtual void* AllocatePages(void* address, size_t length, size_t alignment,
  362. Permission permissions) = 0;
  363. /**
  364. * Frees memory in a range that was allocated by a call to AllocatePages.
  365. */
  366. virtual bool FreePages(void* address, size_t length) = 0;
  367. /**
  368. * Releases memory in a range that was allocated by a call to AllocatePages.
  369. */
  370. virtual bool ReleasePages(void* address, size_t length,
  371. size_t new_length) = 0;
  372. /**
  373. * Sets permissions on pages in an allocated range.
  374. */
  375. virtual bool SetPermissions(void* address, size_t length,
  376. Permission permissions) = 0;
  377. /**
  378. * Frees memory in the given [address, address + size) range. address and size
  379. * should be operating system page-aligned. The next write to this
  380. * memory area brings the memory transparently back. This should be treated as
  381. * a hint to the OS that the pages are no longer needed. It does not guarantee
  382. * that the pages will be discarded immediately or at all.
  383. */
  384. virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
  385. /**
  386. * Decommits any wired memory pages in the given range, allowing the OS to
  387. * reclaim them, and marks the region as inacessible (kNoAccess). The address
  388. * range stays reserved and can be accessed again later by changing its
  389. * permissions. However, in that case the memory content is guaranteed to be
  390. * zero-initialized again. The memory must have been previously allocated by a
  391. * call to AllocatePages. Returns true on success, false otherwise.
  392. */
  393. virtual bool DecommitPages(void* address, size_t size) = 0;
  394. /**
  395. * INTERNAL ONLY: This interface has not been stabilised and may change
  396. * without notice from one release to another without being deprecated first.
  397. */
  398. class SharedMemoryMapping {
  399. public:
  400. // Implementations are expected to free the shared memory mapping in the
  401. // destructor.
  402. virtual ~SharedMemoryMapping() = default;
  403. virtual void* GetMemory() const = 0;
  404. };
  405. /**
  406. * INTERNAL ONLY: This interface has not been stabilised and may change
  407. * without notice from one release to another without being deprecated first.
  408. */
  409. class SharedMemory {
  410. public:
  411. // Implementations are expected to free the shared memory in the destructor.
  412. virtual ~SharedMemory() = default;
  413. virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
  414. void* new_address) const = 0;
  415. virtual void* GetMemory() const = 0;
  416. virtual size_t GetSize() const = 0;
  417. };
  418. /**
  419. * INTERNAL ONLY: This interface has not been stabilised and may change
  420. * without notice from one release to another without being deprecated first.
  421. *
  422. * Reserve pages at a fixed address returning whether the reservation is
  423. * possible. The reserved memory is detached from the PageAllocator and so
  424. * should not be freed by it. It's intended for use with
  425. * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
  426. */
  427. virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
  428. return false;
  429. }
  430. /**
  431. * INTERNAL ONLY: This interface has not been stabilised and may change
  432. * without notice from one release to another without being deprecated first.
  433. *
  434. * Allocates shared memory pages. Not all PageAllocators need support this and
  435. * so this method need not be overridden.
  436. * Allocates a new read-only shared memory region of size |length| and copies
  437. * the memory at |original_address| into it.
  438. */
  439. virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
  440. size_t length, const void* original_address) {
  441. return {};
  442. }
  443. /**
  444. * INTERNAL ONLY: This interface has not been stabilised and may change
  445. * without notice from one release to another without being deprecated first.
  446. *
  447. * If not overridden and changed to return true, V8 will not attempt to call
  448. * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
  449. * and RemapSharedPages must also be overridden.
  450. */
  451. virtual bool CanAllocateSharedPages() { return false; }
  452. };
  453. // Opaque type representing a handle to a shared memory region.
  454. using PlatformSharedMemoryHandle = intptr_t;
  455. static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1;
  456. // Conversion routines from the platform-dependent shared memory identifiers
  457. // into the opaque PlatformSharedMemoryHandle type. These use the underlying
  458. // types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t)
  459. // to avoid pulling in large OS header files into this header file. Instead,
  460. // the users of these routines are expected to include the respecitve OS
  461. // headers in addition to this one.
  462. #if V8_OS_MACOS
  463. // Convert between a shared memory handle and a mach_port_t referencing a memory
  464. // entry object.
  465. inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry(
  466. unsigned int port) {
  467. return static_cast<PlatformSharedMemoryHandle>(port);
  468. }
  469. inline unsigned int MachMemoryEntryFromSharedMemoryHandle(
  470. PlatformSharedMemoryHandle handle) {
  471. return static_cast<unsigned int>(handle);
  472. }
  473. #elif V8_OS_FUCHSIA
  474. // Convert between a shared memory handle and a zx_handle_t to a VMO.
  475. inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) {
  476. return static_cast<PlatformSharedMemoryHandle>(handle);
  477. }
  478. inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) {
  479. return static_cast<uint32_t>(handle);
  480. }
  481. #elif V8_OS_WIN
  482. // Convert between a shared memory handle and a Windows HANDLE to a file mapping
  483. // object.
  484. inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping(
  485. void* handle) {
  486. return reinterpret_cast<PlatformSharedMemoryHandle>(handle);
  487. }
  488. inline void* FileMappingFromSharedMemoryHandle(
  489. PlatformSharedMemoryHandle handle) {
  490. return reinterpret_cast<void*>(handle);
  491. }
  492. #else
  493. // Convert between a shared memory handle and a file descriptor.
  494. inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) {
  495. return static_cast<PlatformSharedMemoryHandle>(fd);
  496. }
  497. inline int FileDescriptorFromSharedMemoryHandle(
  498. PlatformSharedMemoryHandle handle) {
  499. return static_cast<int>(handle);
  500. }
  501. #endif
  502. /**
  503. * Possible permissions for memory pages.
  504. */
  505. enum class PagePermissions {
  506. kNoAccess,
  507. kRead,
  508. kReadWrite,
  509. kReadWriteExecute,
  510. kReadExecute,
  511. };
  512. /**
  513. * Class to manage a virtual memory address space.
  514. *
  515. * This class represents a contiguous region of virtual address space in which
  516. * sub-spaces and (private or shared) memory pages can be allocated, freed, and
  517. * modified. This interface is meant to eventually replace the PageAllocator
  518. * interface, and can be used as an alternative in the meantime.
  519. *
  520. * This API is not yet stable and may change without notice!
  521. */
  522. class VirtualAddressSpace {
  523. public:
  524. using Address = uintptr_t;
  525. VirtualAddressSpace(size_t page_size, size_t allocation_granularity,
  526. Address base, size_t size,
  527. PagePermissions max_page_permissions)
  528. : page_size_(page_size),
  529. allocation_granularity_(allocation_granularity),
  530. base_(base),
  531. size_(size),
  532. max_page_permissions_(max_page_permissions) {}
  533. virtual ~VirtualAddressSpace() = default;
  534. /**
  535. * The page size used inside this space. Guaranteed to be a power of two.
  536. * Used as granularity for all page-related operations except for allocation,
  537. * which use the allocation_granularity(), see below.
  538. *
  539. * \returns the page size in bytes.
  540. */
  541. size_t page_size() const { return page_size_; }
  542. /**
  543. * The granularity of page allocations and, by extension, of subspace
  544. * allocations. This is guaranteed to be a power of two and a multiple of the
  545. * page_size(). In practice, this is equal to the page size on most OSes, but
  546. * on Windows it is usually 64KB, while the page size is 4KB.
  547. *
  548. * \returns the allocation granularity in bytes.
  549. */
  550. size_t allocation_granularity() const { return allocation_granularity_; }
  551. /**
  552. * The base address of the address space managed by this instance.
  553. *
  554. * \returns the base address of this address space.
  555. */
  556. Address base() const { return base_; }
  557. /**
  558. * The size of the address space managed by this instance.
  559. *
  560. * \returns the size of this address space in bytes.
  561. */
  562. size_t size() const { return size_; }
  563. /**
  564. * The maximum page permissions that pages allocated inside this space can
  565. * obtain.
  566. *
  567. * \returns the maximum page permissions.
  568. */
  569. PagePermissions max_page_permissions() const { return max_page_permissions_; }
  570. /**
  571. * Sets the random seed so that GetRandomPageAddress() will generate
  572. * repeatable sequences of random addresses.
  573. *
  574. * \param The seed for the PRNG.
  575. */
  576. virtual void SetRandomSeed(int64_t seed) = 0;
  577. /**
  578. * Returns a random address inside this address space, suitable for page
  579. * allocations hints.
  580. *
  581. * \returns a random address aligned to allocation_granularity().
  582. */
  583. virtual Address RandomPageAddress() = 0;
  584. /**
  585. * Allocates private memory pages with the given alignment and permissions.
  586. *
  587. * \param hint If nonzero, the allocation is attempted to be placed at the
  588. * given address first. If that fails, the allocation is attempted to be
  589. * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying
  590. * zero for the hint always causes this function to choose a random address.
  591. * The hint, if specified, must be aligned to the specified alignment.
  592. *
  593. * \param size The size of the allocation in bytes. Must be a multiple of the
  594. * allocation_granularity().
  595. *
  596. * \param alignment The alignment of the allocation in bytes. Must be a
  597. * multiple of the allocation_granularity() and should be a power of two.
  598. *
  599. * \param permissions The page permissions of the newly allocated pages.
  600. *
  601. * \returns the start address of the allocated pages on success, zero on
  602. * failure.
  603. */
  604. static constexpr Address kNoHint = 0;
  605. virtual V8_WARN_UNUSED_RESULT Address
  606. AllocatePages(Address hint, size_t size, size_t alignment,
  607. PagePermissions permissions) = 0;
  608. /**
  609. * Frees previously allocated pages.
  610. *
  611. * This function will terminate the process on failure as this implies a bug
  612. * in the client. As such, there is no return value.
  613. *
  614. * \param address The start address of the pages to free. This address must
  615. * have been obtained through a call to AllocatePages.
  616. *
  617. * \param size The size in bytes of the region to free. This must match the
  618. * size passed to AllocatePages when the pages were allocated.
  619. */
  620. virtual void FreePages(Address address, size_t size) = 0;
  621. /**
  622. * Sets permissions of all allocated pages in the given range.
  623. *
  624. * \param address The start address of the range. Must be aligned to
  625. * page_size().
  626. *
  627. * \param size The size in bytes of the range. Must be a multiple
  628. * of page_size().
  629. *
  630. * \param permissions The new permissions for the range.
  631. *
  632. * \returns true on success, false otherwise.
  633. */
  634. virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions(
  635. Address address, size_t size, PagePermissions permissions) = 0;
  636. /**
  637. * Creates a guard region at the specified address.
  638. *
  639. * Guard regions are guaranteed to cause a fault when accessed and generally
  640. * do not count towards any memory consumption limits. Further, allocating
  641. * guard regions can usually not fail in subspaces if the region does not
  642. * overlap with another region, subspace, or page allocation.
  643. *
  644. * \param address The start address of the guard region. Must be aligned to
  645. * the allocation_granularity().
  646. *
  647. * \param size The size of the guard region in bytes. Must be a multiple of
  648. * the allocation_granularity().
  649. *
  650. * \returns true on success, false otherwise.
  651. */
  652. virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address,
  653. size_t size) = 0;
  654. /**
  655. * Frees an existing guard region.
  656. *
  657. * This function will terminate the process on failure as this implies a bug
  658. * in the client. As such, there is no return value.
  659. *
  660. * \param address The start address of the guard region to free. This address
  661. * must have previously been used as address parameter in a successful
  662. * invocation of AllocateGuardRegion.
  663. *
  664. * \param size The size in bytes of the guard region to free. This must match
  665. * the size passed to AllocateGuardRegion when the region was created.
  666. */
  667. virtual void FreeGuardRegion(Address address, size_t size) = 0;
  668. /**
  669. * Allocates shared memory pages with the given permissions.
  670. *
  671. * \param hint Placement hint. See AllocatePages.
  672. *
  673. * \param size The size of the allocation in bytes. Must be a multiple of the
  674. * allocation_granularity().
  675. *
  676. * \param permissions The page permissions of the newly allocated pages.
  677. *
  678. * \param handle A platform-specific handle to a shared memory object. See
  679. * the SharedMemoryHandleFromX routines above for ways to obtain these.
  680. *
  681. * \param offset The offset in the shared memory object at which the mapping
  682. * should start. Must be a multiple of the allocation_granularity().
  683. *
  684. * \returns the start address of the allocated pages on success, zero on
  685. * failure.
  686. */
  687. virtual V8_WARN_UNUSED_RESULT Address
  688. AllocateSharedPages(Address hint, size_t size, PagePermissions permissions,
  689. PlatformSharedMemoryHandle handle, uint64_t offset) = 0;
  690. /**
  691. * Frees previously allocated shared pages.
  692. *
  693. * This function will terminate the process on failure as this implies a bug
  694. * in the client. As such, there is no return value.
  695. *
  696. * \param address The start address of the pages to free. This address must
  697. * have been obtained through a call to AllocateSharedPages.
  698. *
  699. * \param size The size in bytes of the region to free. This must match the
  700. * size passed to AllocateSharedPages when the pages were allocated.
  701. */
  702. virtual void FreeSharedPages(Address address, size_t size) = 0;
  703. /**
  704. * Whether this instance can allocate subspaces or not.
  705. *
  706. * \returns true if subspaces can be allocated, false if not.
  707. */
  708. virtual bool CanAllocateSubspaces() = 0;
  709. /*
  710. * Allocate a subspace.
  711. *
  712. * The address space of a subspace stays reserved in the parent space for the
  713. * lifetime of the subspace. As such, it is guaranteed that page allocations
  714. * on the parent space cannot end up inside a subspace.
  715. *
  716. * \param hint Hints where the subspace should be allocated. See
  717. * AllocatePages() for more details.
  718. *
  719. * \param size The size in bytes of the subspace. Must be a multiple of the
  720. * allocation_granularity().
  721. *
  722. * \param alignment The alignment of the subspace in bytes. Must be a multiple
  723. * of the allocation_granularity() and should be a power of two.
  724. *
  725. * \param max_page_permissions The maximum permissions that pages allocated in
  726. * the subspace can obtain.
  727. *
  728. * \returns a new subspace or nullptr on failure.
  729. */
  730. virtual std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
  731. Address hint, size_t size, size_t alignment,
  732. PagePermissions max_page_permissions) = 0;
  733. //
  734. // TODO(v8) maybe refactor the methods below before stabilizing the API. For
  735. // example by combining them into some form of page operation method that
  736. // takes a command enum as parameter.
  737. //
  738. /**
  739. * Frees memory in the given [address, address + size) range. address and
  740. * size should be aligned to the page_size(). The next write to this memory
  741. * area brings the memory transparently back. This should be treated as a
  742. * hint to the OS that the pages are no longer needed. It does not guarantee
  743. * that the pages will be discarded immediately or at all.
  744. *
  745. * \returns true on success, false otherwise. Since this method is only a
  746. * hint, a successful invocation does not imply that pages have been removed.
  747. */
  748. virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address,
  749. size_t size) {
  750. return true;
  751. }
  752. /**
  753. * Decommits any wired memory pages in the given range, allowing the OS to
  754. * reclaim them, and marks the region as inacessible (kNoAccess). The address
  755. * range stays reserved and can be accessed again later by changing its
  756. * permissions. However, in that case the memory content is guaranteed to be
  757. * zero-initialized again. The memory must have been previously allocated by a
  758. * call to AllocatePages.
  759. *
  760. * \returns true on success, false otherwise.
  761. */
  762. virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address,
  763. size_t size) = 0;
  764. private:
  765. const size_t page_size_;
  766. const size_t allocation_granularity_;
  767. const Address base_;
  768. const size_t size_;
  769. const PagePermissions max_page_permissions_;
  770. };
  771. /**
  772. * V8 Allocator used for allocating zone backings.
  773. */
  774. class ZoneBackingAllocator {
  775. public:
  776. using MallocFn = void* (*)(size_t);
  777. using FreeFn = void (*)(void*);
  778. virtual MallocFn GetMallocFn() const { return ::malloc; }
  779. virtual FreeFn GetFreeFn() const { return ::free; }
  780. };
  781. /**
  782. * Observer used by V8 to notify the embedder about entering/leaving sections
  783. * with high throughput of malloc/free operations.
  784. */
  785. class HighAllocationThroughputObserver {
  786. public:
  787. virtual void EnterSection() {}
  788. virtual void LeaveSection() {}
  789. };
  790. /**
  791. * V8 Platform abstraction layer.
  792. *
  793. * The embedder has to provide an implementation of this interface before
  794. * initializing the rest of V8.
  795. */
  796. class Platform {
  797. public:
  798. virtual ~Platform() = default;
  799. /**
  800. * Allows the embedder to manage memory page allocations.
  801. */
  802. virtual PageAllocator* GetPageAllocator() {
  803. // TODO(bbudge) Make this abstract after all embedders implement this.
  804. return nullptr;
  805. }
  806. /**
  807. * Allows the embedder to specify a custom allocator used for zones.
  808. */
  809. virtual ZoneBackingAllocator* GetZoneBackingAllocator() {
  810. static ZoneBackingAllocator default_allocator;
  811. return &default_allocator;
  812. }
  813. /**
  814. * Enables the embedder to respond in cases where V8 can't allocate large
  815. * blocks of memory. V8 retries the failed allocation once after calling this
  816. * method. On success, execution continues; otherwise V8 exits with a fatal
  817. * error.
  818. * Embedder overrides of this function must NOT call back into V8.
  819. */
  820. virtual void OnCriticalMemoryPressure() {
  821. // TODO(bbudge) Remove this when embedders override the following method.
  822. // See crbug.com/634547.
  823. }
  824. /**
  825. * Enables the embedder to respond in cases where V8 can't allocate large
  826. * memory regions. The |length| parameter is the amount of memory needed.
  827. * Returns true if memory is now available. Returns false if no memory could
  828. * be made available. V8 will retry allocations until this method returns
  829. * false.
  830. *
  831. * Embedder overrides of this function must NOT call back into V8.
  832. */
  833. virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
  834. /**
  835. * Gets the number of worker threads used by
  836. * Call(BlockingTask)OnWorkerThread(). This can be used to estimate the number
  837. * of tasks a work package should be split into. A return value of 0 means
  838. * that there are no worker threads available. Note that a value of 0 won't
  839. * prohibit V8 from posting tasks using |CallOnWorkerThread|.
  840. */
  841. virtual int NumberOfWorkerThreads() = 0;
  842. /**
  843. * Returns a TaskRunner which can be used to post a task on the foreground.
  844. * The TaskRunner's NonNestableTasksEnabled() must be true. This function
  845. * should only be called from a foreground thread.
  846. */
  847. virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
  848. Isolate* isolate) = 0;
  849. /**
  850. * Schedules a task to be invoked on a worker thread.
  851. */
  852. virtual void CallOnWorkerThread(std::unique_ptr<Task> task) = 0;
  853. /**
  854. * Schedules a task that blocks the main thread to be invoked with
  855. * high-priority on a worker thread.
  856. */
  857. virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
  858. // Embedders may optionally override this to process these tasks in a high
  859. // priority pool.
  860. CallOnWorkerThread(std::move(task));
  861. }
  862. /**
  863. * Schedules a task to be invoked with low-priority on a worker thread.
  864. */
  865. virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
  866. // Embedders may optionally override this to process these tasks in a low
  867. // priority pool.
  868. CallOnWorkerThread(std::move(task));
  869. }
  870. /**
  871. * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
  872. * expires.
  873. */
  874. virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
  875. double delay_in_seconds) = 0;
  876. /**
  877. * Returns true if idle tasks are enabled for the given |isolate|.
  878. */
  879. virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
  880. /**
  881. * Posts |job_task| to run in parallel. Returns a JobHandle associated with
  882. * the Job, which can be joined or canceled.
  883. * This avoids degenerate cases:
  884. * - Calling CallOnWorkerThread() for each work item, causing significant
  885. * overhead.
  886. * - Fixed number of CallOnWorkerThread() calls that split the work and might
  887. * run for a long time. This is problematic when many components post
  888. * "num cores" tasks and all expect to use all the cores. In these cases,
  889. * the scheduler lacks context to be fair to multiple same-priority requests
  890. * and/or ability to request lower priority work to yield when high priority
  891. * work comes in.
  892. * A canonical implementation of |job_task| looks like:
  893. * class MyJobTask : public JobTask {
  894. * public:
  895. * MyJobTask(...) : worker_queue_(...) {}
  896. * // JobTask:
  897. * void Run(JobDelegate* delegate) override {
  898. * while (!delegate->ShouldYield()) {
  899. * // Smallest unit of work.
  900. * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
  901. * if (!work_item) return;
  902. * ProcessWork(work_item);
  903. * }
  904. * }
  905. *
  906. * size_t GetMaxConcurrency() const override {
  907. * return worker_queue_.GetSize(); // Thread safe.
  908. * }
  909. * };
  910. * auto handle = PostJob(TaskPriority::kUserVisible,
  911. * std::make_unique<MyJobTask>(...));
  912. * handle->Join();
  913. *
  914. * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
  915. * called while holding a lock that could be acquired by JobTask::Run or
  916. * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
  917. * because [1] JobTask::GetMaxConcurrency may be invoked while holding
  918. * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
  919. * if that lock is *never* held while calling back into JobHandle from any
  920. * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
  921. * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
  922. * (B=>JobHandle::foo=>B deadlock).
  923. *
  924. * A sufficient PostJob() implementation that uses the default Job provided in
  925. * libplatform looks like:
  926. * std::unique_ptr<JobHandle> PostJob(
  927. * TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
  928. * return v8::platform::NewDefaultJobHandle(
  929. * this, priority, std::move(job_task), NumberOfWorkerThreads());
  930. * }
  931. */
  932. virtual std::unique_ptr<JobHandle> PostJob(
  933. TaskPriority priority, std::unique_ptr<JobTask> job_task) = 0;
  934. /**
  935. * Monotonically increasing time in seconds from an arbitrary fixed point in
  936. * the past. This function is expected to return at least
  937. * millisecond-precision values. For this reason,
  938. * it is recommended that the fixed point be no further in the past than
  939. * the epoch.
  940. **/
  941. virtual double MonotonicallyIncreasingTime() = 0;
  942. /**
  943. * Current wall-clock time in milliseconds since epoch.
  944. * This function is expected to return at least millisecond-precision values.
  945. */
  946. virtual double CurrentClockTimeMillis() = 0;
  947. typedef void (*StackTracePrinter)();
  948. /**
  949. * Returns a function pointer that print a stack trace of the current stack
  950. * on invocation. Disables printing of the stack trace if nullptr.
  951. */
  952. virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
  953. /**
  954. * Returns an instance of a v8::TracingController. This must be non-nullptr.
  955. */
  956. virtual TracingController* GetTracingController() = 0;
  957. /**
  958. * Tells the embedder to generate and upload a crashdump during an unexpected
  959. * but non-critical scenario.
  960. */
  961. virtual void DumpWithoutCrashing() {}
  962. /**
  963. * Allows the embedder to observe sections with high throughput allocation
  964. * operations.
  965. */
  966. virtual HighAllocationThroughputObserver*
  967. GetHighAllocationThroughputObserver() {
  968. static HighAllocationThroughputObserver default_observer;
  969. return &default_observer;
  970. }
  971. protected:
  972. /**
  973. * Default implementation of current wall-clock time in milliseconds
  974. * since epoch. Useful for implementing |CurrentClockTimeMillis| if
  975. * nothing special needed.
  976. */
  977. V8_EXPORT static double SystemClockTimeMillis();
  978. };
  979. } // namespace v8
  980. #endif // V8_V8_PLATFORM_H_