gpu_timing.cc 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. // Copyright (c) 2015 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include "ui/gl/gpu_timing.h"
  5. #include <utility>
  6. #include "base/containers/circular_deque.h"
  7. #include "base/memory/ptr_util.h"
  8. #include "base/time/time.h"
  9. #include "ui/gl/gl_bindings.h"
  10. #include "ui/gl/gl_context.h"
  11. #include "ui/gl/gl_version_info.h"
  12. namespace gl {
  13. class TimeElapsedTimerQuery;
  14. class TimerQuery;
  15. int64_t NanoToMicro(uint64_t nano_seconds) {
  16. const uint64_t up = nano_seconds + base::Time::kNanosecondsPerMicrosecond / 2;
  17. return static_cast<int64_t>(up / base::Time::kNanosecondsPerMicrosecond);
  18. }
  19. int32_t QueryTimestampBits() {
  20. GLint timestamp_bits = 0;
  21. glGetQueryiv(GL_TIMESTAMP, GL_QUERY_COUNTER_BITS, &timestamp_bits);
  22. return static_cast<int32_t>(timestamp_bits);
  23. }
  24. class GPUTimingImpl : public GPUTiming {
  25. public:
  26. explicit GPUTimingImpl(GLContextReal* context);
  27. GPUTimingImpl(const GPUTimingImpl&) = delete;
  28. GPUTimingImpl& operator=(const GPUTimingImpl&) = delete;
  29. ~GPUTimingImpl() override;
  30. void ForceTimeElapsedQuery() { force_time_elapsed_query_ = true; }
  31. bool IsForceTimeElapsedQuery() { return force_time_elapsed_query_; }
  32. GPUTiming::TimerType GetTimerType() const { return timer_type_; }
  33. uint32_t GetDisjointCount();
  34. int64_t CalculateTimerOffset();
  35. scoped_refptr<QueryResult> BeginElapsedTimeQuery();
  36. void EndElapsedTimeQuery(scoped_refptr<QueryResult> result);
  37. scoped_refptr<QueryResult> DoTimeStampQuery();
  38. int64_t GetCurrentCPUTime() {
  39. return cpu_time_for_testing_.is_null()
  40. ? (base::TimeTicks::Now() - base::TimeTicks()).InMicroseconds()
  41. : cpu_time_for_testing_.Run();
  42. }
  43. void SetCpuTimeForTesting(base::RepeatingCallback<int64_t(void)> cpu_time) {
  44. cpu_time_for_testing_ = std::move(cpu_time);
  45. }
  46. void UpdateQueryResults();
  47. int64_t GetMaxTimeStamp() { return max_time_stamp_; }
  48. void UpdateMaxTimeStamp(int64_t value) {
  49. max_time_stamp_ = std::max(max_time_stamp_, value);
  50. }
  51. uint32_t GetElapsedQueryCount() { return elapsed_query_count_; }
  52. void IncElapsedQueryCount() { elapsed_query_count_++; }
  53. void DecElapsedQueryCount() { elapsed_query_count_--; }
  54. void SetLastElapsedQuery(scoped_refptr<TimeElapsedTimerQuery> query);
  55. scoped_refptr<TimeElapsedTimerQuery> GetLastElapsedQuery();
  56. void HandleBadQuery();
  57. bool IsGoodQueryID(uint32_t query_id);
  58. private:
  59. scoped_refptr<GPUTimingClient> CreateGPUTimingClient() override;
  60. base::RepeatingCallback<int64_t(void)> cpu_time_for_testing_;
  61. GPUTiming::TimerType timer_type_ = GPUTiming::kTimerTypeInvalid;
  62. uint32_t disjoint_counter_ = 0;
  63. int64_t offset_ = 0; // offset cache when timer_type_ == kTimerTypeARB
  64. bool offset_valid_ = false;
  65. bool force_time_elapsed_query_ = false;
  66. int32_t timestamp_bit_count_gl_ = -1; // gl implementation timestamp bits
  67. uint32_t next_timer_query_id_ = 0;
  68. uint32_t next_good_timer_query_id_ = 0; // identify bad ids for disjoints.
  69. uint32_t query_disjoint_count_ = 0;
  70. // Extra state tracking data for elapsed timer queries.
  71. int64_t max_time_stamp_ = 0;
  72. uint32_t elapsed_query_count_ = 0;
  73. scoped_refptr<TimeElapsedTimerQuery> last_elapsed_query_;
  74. base::circular_deque<scoped_refptr<TimerQuery>> queries_;
  75. };
  76. class QueryResult : public base::RefCounted<QueryResult> {
  77. public:
  78. QueryResult() {}
  79. QueryResult(const QueryResult&) = delete;
  80. QueryResult& operator=(const QueryResult&) = delete;
  81. bool IsAvailable() const { return available_; }
  82. int64_t GetDelta() const { return end_value_ - start_value_; }
  83. int64_t GetStartValue() const { return start_value_; }
  84. int64_t GetEndValue() const { return end_value_; }
  85. void SetStartValue(int64_t value) { start_value_ = value; }
  86. void SetEndValue(int64_t value) { available_ = true; end_value_ = value; }
  87. private:
  88. friend class base::RefCounted<QueryResult>;
  89. ~QueryResult() {}
  90. bool available_ = false;
  91. int64_t start_value_ = 0;
  92. int64_t end_value_ = 0;
  93. };
  94. class TimerQuery : public base::RefCounted<TimerQuery> {
  95. public:
  96. explicit TimerQuery(uint32_t next_id);
  97. TimerQuery(const TimerQuery&) = delete;
  98. TimerQuery& operator=(const TimerQuery&) = delete;
  99. virtual void Destroy() = 0;
  100. // Returns true when UpdateQueryResults() is ready to be called.
  101. virtual bool IsAvailable(GPUTimingImpl* gpu_timing) = 0;
  102. // Fills out query result start and end, called after IsAvailable() is true.
  103. virtual void UpdateQueryResults(GPUTimingImpl* gpu_timing) = 0;
  104. // Called when Query is next in line, used to transition states.
  105. virtual void PrepareNextUpdate(scoped_refptr<TimerQuery> prev) {}
  106. uint32_t timer_query_id_ = 0;
  107. int64_t time_stamp_ = 0; // Timestamp of the query, could be estimated.
  108. protected:
  109. friend class base::RefCounted<TimerQuery>;
  110. virtual ~TimerQuery();
  111. };
  112. TimerQuery::TimerQuery(uint32_t next_id)
  113. : timer_query_id_(next_id) {
  114. }
  115. TimerQuery::~TimerQuery() {
  116. }
  117. class TimeElapsedTimerQuery : public TimerQuery {
  118. public:
  119. TimeElapsedTimerQuery(GPUTimingImpl* gpu_timing, uint32_t next_id)
  120. : TimerQuery(next_id) {
  121. glGenQueries(1, &gl_query_id_);
  122. }
  123. void Destroy() override {
  124. glDeleteQueries(1, &gl_query_id_);
  125. }
  126. scoped_refptr<QueryResult> StartQuery(GPUTimingImpl* gpu_timing) {
  127. DCHECK(query_result_start_.get() == nullptr);
  128. query_begin_cpu_time_ = gpu_timing->GetCurrentCPUTime();
  129. if (gpu_timing->GetElapsedQueryCount() == 0) {
  130. first_top_level_query_ = true;
  131. } else {
  132. // Stop the current timer query.
  133. glEndQuery(GL_TIME_ELAPSED);
  134. }
  135. // begin a new one time elapsed query.
  136. glBeginQuery(GL_TIME_ELAPSED, gl_query_id_);
  137. query_result_start_ = new QueryResult();
  138. // Update GPUTiming state.
  139. gpu_timing->SetLastElapsedQuery(this);
  140. gpu_timing->IncElapsedQueryCount();
  141. return query_result_start_;
  142. }
  143. void EndQuery(GPUTimingImpl* gpu_timing,
  144. scoped_refptr<QueryResult> result) {
  145. DCHECK(gpu_timing->GetElapsedQueryCount() != 0);
  146. scoped_refptr<TimeElapsedTimerQuery> last_query =
  147. gpu_timing->GetLastElapsedQuery();
  148. DCHECK(last_query.get());
  149. DCHECK(last_query->query_result_end_.get() == nullptr);
  150. last_query->query_result_end_ = result;
  151. gpu_timing->DecElapsedQueryCount();
  152. if (gpu_timing->GetElapsedQueryCount() != 0) {
  153. // Continue timer if there are still ongoing queries.
  154. glEndQuery(GL_TIME_ELAPSED);
  155. glBeginQuery(GL_TIME_ELAPSED, gl_query_id_);
  156. gpu_timing->SetLastElapsedQuery(this);
  157. } else {
  158. // Simply end the query and reset the current offset
  159. glEndQuery(GL_TIME_ELAPSED);
  160. gpu_timing->SetLastElapsedQuery(nullptr);
  161. }
  162. }
  163. // Returns true when UpdateQueryResults() is ready to be called.
  164. bool IsAvailable(GPUTimingImpl* gpu_timing) override {
  165. if (gpu_timing->GetElapsedQueryCount() != 0 &&
  166. gpu_timing->GetLastElapsedQuery() == this) {
  167. // Cannot query if result is available if EndQuery has not been called.
  168. // Since only one query is going on at a time, the end query is only not
  169. // called for the very last query when ongoing query counter is not 0.
  170. return false;
  171. }
  172. GLuint done = 0;
  173. glGetQueryObjectuiv(gl_query_id_, GL_QUERY_RESULT_AVAILABLE, &done);
  174. return !!done;
  175. }
  176. // Fills out query result start and end, called after IsAvailable() is true.
  177. void UpdateQueryResults(GPUTimingImpl* gpu_timing) override {
  178. GLuint64 result_value = 0;
  179. glGetQueryObjectui64v(gl_query_id_, GL_QUERY_RESULT, &result_value);
  180. const int64_t micro_results = NanoToMicro(result_value);
  181. // Adjust prev query end time if it is before the current max.
  182. const int64_t start_time =
  183. std::max(first_top_level_query_ ? query_begin_cpu_time_ : 0,
  184. std::max(prev_query_end_time_,
  185. gpu_timing->GetMaxTimeStamp()));
  186. // As a sanity check, is result value is greater than the time allotted we
  187. // can safely say this is garbage data
  188. const int64_t max_possible_time =
  189. gpu_timing->GetCurrentCPUTime() - query_begin_cpu_time_;
  190. if (micro_results > max_possible_time) {
  191. gpu_timing->HandleBadQuery();
  192. }
  193. // Elapsed queries need to be adjusted so they are relative to one another.
  194. // Absolute timer queries are already relative to one another absolutely.
  195. time_stamp_ = start_time + micro_results;
  196. if (query_result_start_.get()) {
  197. query_result_start_->SetStartValue(start_time);
  198. }
  199. if (query_result_end_.get()) {
  200. query_result_end_->SetEndValue(time_stamp_);
  201. }
  202. }
  203. // Called when Query is next in line, used to transition states.
  204. void PrepareNextUpdate(scoped_refptr<TimerQuery> prev) override {
  205. prev_query_end_time_ = prev->time_stamp_;
  206. }
  207. private:
  208. ~TimeElapsedTimerQuery() override {}
  209. bool first_top_level_query_ = false;
  210. uint32_t gl_query_id_ = 0;
  211. int64_t prev_query_end_time_ = 0;
  212. int64_t query_begin_cpu_time_ = 0;
  213. scoped_refptr<QueryResult> query_result_start_;
  214. scoped_refptr<QueryResult> query_result_end_;
  215. };
  216. class TimeStampTimerQuery : public TimerQuery {
  217. public:
  218. explicit TimeStampTimerQuery(uint32_t next_id) : TimerQuery(next_id) {
  219. glGenQueries(1, &gl_query_id_);
  220. }
  221. void Destroy() override {
  222. glDeleteQueries(1, &gl_query_id_);
  223. }
  224. scoped_refptr<QueryResult> DoQuery() {
  225. glQueryCounter(gl_query_id_, GL_TIMESTAMP);
  226. query_result_ = new QueryResult();
  227. return query_result_;
  228. }
  229. // Returns true when UpdateQueryResults() is ready to be called.
  230. bool IsAvailable(GPUTimingImpl* gpu_timing) override {
  231. GLuint done = 0;
  232. glGetQueryObjectuiv(gl_query_id_, GL_QUERY_RESULT_AVAILABLE, &done);
  233. return !!done;
  234. }
  235. // Fills out query result start and end, called after IsAvailable() is true.
  236. void UpdateQueryResults(GPUTimingImpl* gpu_timing) override {
  237. DCHECK(IsAvailable(gpu_timing));
  238. GLuint64 result_value = 0;
  239. glGetQueryObjectui64v(gl_query_id_, GL_QUERY_RESULT, &result_value);
  240. const int64_t micro_results = NanoToMicro(result_value);
  241. const int64_t offset = gpu_timing->CalculateTimerOffset();
  242. const int64_t adjusted_result = micro_results + offset;
  243. DCHECK(query_result_.get());
  244. query_result_->SetStartValue(adjusted_result);
  245. query_result_->SetEndValue(adjusted_result);
  246. time_stamp_ = adjusted_result;
  247. }
  248. private:
  249. ~TimeStampTimerQuery() override {}
  250. uint32_t gl_query_id_ = 0;
  251. scoped_refptr<QueryResult> query_result_;
  252. };
  253. GPUTimingImpl::GPUTimingImpl(GLContextReal* context) {
  254. DCHECK(context);
  255. const GLVersionInfo* version_info = context->GetVersionInfo();
  256. DCHECK(version_info);
  257. if (context->HasExtension("GL_EXT_disjoint_timer_query")) {
  258. timer_type_ = GPUTiming::kTimerTypeDisjoint;
  259. } else if (context->HasExtension("GL_ARB_timer_query")) {
  260. timer_type_ = GPUTiming::kTimerTypeARB;
  261. } else if (context->HasExtension("GL_EXT_timer_query")) {
  262. timer_type_ = GPUTiming::kTimerTypeEXT;
  263. force_time_elapsed_query_ = true;
  264. timestamp_bit_count_gl_ = 0;
  265. }
  266. // The command glGetInteger64v is only supported under ES3 and GL3.2. Since it
  267. // is only used for timestamps, we workaround this by emulating timestamps
  268. // so WebGL 1.0 will still have access to the extension.
  269. if (!version_info->IsAtLeastGLES(3, 0) && !version_info->IsAtLeastGL(3, 2)) {
  270. force_time_elapsed_query_ = true;
  271. timestamp_bit_count_gl_ = 0;
  272. }
  273. }
  274. GPUTimingImpl::~GPUTimingImpl() {
  275. }
  276. uint32_t GPUTimingImpl::GetDisjointCount() {
  277. if (timer_type_ == GPUTiming::kTimerTypeDisjoint) {
  278. GLint disjoint_value = 0;
  279. glGetIntegerv(GL_GPU_DISJOINT_EXT, &disjoint_value);
  280. if (disjoint_value) {
  281. offset_valid_ = false;
  282. disjoint_counter_++;
  283. }
  284. }
  285. return disjoint_counter_;
  286. }
  287. int64_t GPUTimingImpl::CalculateTimerOffset() {
  288. if (!offset_valid_) {
  289. if (timer_type_ == GPUTiming::kTimerTypeDisjoint ||
  290. timer_type_ == GPUTiming::kTimerTypeARB) {
  291. GLint64 gl_now = 0;
  292. glGetInteger64v(GL_TIMESTAMP, &gl_now);
  293. const int64_t cpu_time = GetCurrentCPUTime();
  294. const int64_t micro_offset = cpu_time - NanoToMicro(gl_now);
  295. // We cannot expect these instructions to run with the accuracy
  296. // within 1 microsecond, instead discard differences which are less
  297. // than a single millisecond.
  298. base::TimeDelta delta = base::Microseconds(micro_offset - offset_);
  299. if (delta.magnitude().InMilliseconds() >= 1) {
  300. offset_ = micro_offset;
  301. offset_valid_ = (timer_type_ == GPUTiming::kTimerTypeARB);
  302. }
  303. } else {
  304. offset_ = 0;
  305. offset_valid_ = true;
  306. }
  307. }
  308. return offset_;
  309. }
  310. scoped_refptr<QueryResult> GPUTimingImpl::BeginElapsedTimeQuery() {
  311. DCHECK(timer_type_ != GPUTiming::kTimerTypeInvalid);
  312. queries_.push_back(new TimeElapsedTimerQuery(this, next_timer_query_id_++));
  313. return static_cast<TimeElapsedTimerQuery*>(
  314. queries_.back().get())->StartQuery(this);
  315. }
  316. void GPUTimingImpl::EndElapsedTimeQuery(scoped_refptr<QueryResult> result) {
  317. DCHECK(timer_type_ != GPUTiming::kTimerTypeInvalid);
  318. DCHECK(result.get());
  319. if (GetElapsedQueryCount() > 1) {
  320. // Create new elapsed timer query if there are still ongoing queries.
  321. queries_.push_back(new TimeElapsedTimerQuery(this,
  322. next_timer_query_id_++));
  323. static_cast<TimeElapsedTimerQuery*>(
  324. queries_.back().get())->EndQuery(this, result);
  325. } else {
  326. // Simply end the query and reset the current offset
  327. DCHECK(GetLastElapsedQuery().get());
  328. GetLastElapsedQuery()->EndQuery(this, result);
  329. DCHECK(GetLastElapsedQuery().get() == nullptr);
  330. }
  331. }
  332. scoped_refptr<QueryResult> GPUTimingImpl::DoTimeStampQuery() {
  333. DCHECK(timer_type_ != GPUTiming::kTimerTypeInvalid);
  334. // Certain GL drivers have timestamp bit count set to 0 which means timestamps
  335. // aren't supported. Emulate them with time elapsed queries if that is the
  336. // case.
  337. if (timestamp_bit_count_gl_ == -1) {
  338. DCHECK(timer_type_ != GPUTiming::kTimerTypeEXT);
  339. timestamp_bit_count_gl_ = QueryTimestampBits();
  340. force_time_elapsed_query_ |= (timestamp_bit_count_gl_ == 0);
  341. }
  342. if (force_time_elapsed_query_) {
  343. // Replace with elapsed timer queries instead.
  344. scoped_refptr<QueryResult> result = BeginElapsedTimeQuery();
  345. EndElapsedTimeQuery(result);
  346. return result;
  347. }
  348. queries_.push_back(new TimeStampTimerQuery(next_timer_query_id_++));
  349. return static_cast<TimeStampTimerQuery*>(queries_.back().get())->DoQuery();
  350. }
  351. void GPUTimingImpl::UpdateQueryResults() {
  352. // Query availability of and count the queries that are available.
  353. int available_queries = 0;
  354. for (const scoped_refptr<TimerQuery>& query : queries_) {
  355. if (!query->IsAvailable(this))
  356. break;
  357. available_queries++;
  358. }
  359. // Check for disjoints, this must be done after we checked for availability.
  360. const uint32_t disjoint_counter = GetDisjointCount();
  361. if (disjoint_counter != query_disjoint_count_) {
  362. next_good_timer_query_id_ = next_timer_query_id_;
  363. query_disjoint_count_ = disjoint_counter;
  364. }
  365. // Fill in the query result data once we know the disjoint value is updated.
  366. // Note that even if disjoint happened and the values may or may not be
  367. // garbage, we still fill it in and let GPUTimingClient's detect and disgard
  368. // bad query data. The only thing we need to account for here is to not
  369. // use garbade timer data to fill states such as max query times.
  370. for (int i = 0; i < available_queries; ++i) {
  371. scoped_refptr<TimerQuery> query = queries_.front();
  372. query->UpdateQueryResults(this);
  373. DCHECK(query->time_stamp_) << "Query Timestamp was not updated.";
  374. // For good queries, keep track of the max valid time stamps.
  375. if (IsGoodQueryID(query->timer_query_id_))
  376. UpdateMaxTimeStamp(query->time_stamp_);
  377. query->Destroy();
  378. queries_.pop_front();
  379. if (!queries_.empty())
  380. queries_.front()->PrepareNextUpdate(query);
  381. }
  382. }
  383. void GPUTimingImpl::SetLastElapsedQuery(
  384. scoped_refptr<TimeElapsedTimerQuery> query) {
  385. last_elapsed_query_ = query;
  386. }
  387. scoped_refptr<TimeElapsedTimerQuery> GPUTimingImpl::GetLastElapsedQuery() {
  388. return last_elapsed_query_;
  389. }
  390. void GPUTimingImpl::HandleBadQuery() {
  391. // Mark all queries as bad and signal an artificial disjoint value.
  392. next_good_timer_query_id_ = next_timer_query_id_;
  393. offset_valid_ = false;
  394. query_disjoint_count_ = ++disjoint_counter_;
  395. }
  396. bool GPUTimingImpl::IsGoodQueryID(uint32_t query_id) {
  397. return query_id >= next_good_timer_query_id_;
  398. }
  399. scoped_refptr<GPUTimingClient> GPUTimingImpl::CreateGPUTimingClient() {
  400. return new GPUTimingClient(this);
  401. }
  402. GPUTiming* GPUTiming::CreateGPUTiming(GLContextReal* context) {
  403. return new GPUTimingImpl(context);
  404. }
  405. GPUTiming::GPUTiming() {
  406. }
  407. GPUTiming::~GPUTiming() {
  408. }
  409. GPUTimer::~GPUTimer() {
  410. }
  411. void GPUTimer::Destroy(bool have_context) {
  412. if (have_context) {
  413. if (timer_state_ == kTimerState_WaitingForEnd) {
  414. DCHECK(gpu_timing_client_->gpu_timing_);
  415. DCHECK(elapsed_timer_result_.get());
  416. gpu_timing_client_->gpu_timing_->EndElapsedTimeQuery(
  417. elapsed_timer_result_);
  418. }
  419. }
  420. }
  421. void GPUTimer::Reset() {
  422. // We can reset from any state other than when a Start() is waiting for End().
  423. DCHECK(timer_state_ != kTimerState_WaitingForEnd);
  424. time_stamp_result_ = nullptr;
  425. elapsed_timer_result_ = nullptr;
  426. timer_state_ = kTimerState_Ready;
  427. }
  428. void GPUTimer::QueryTimeStamp() {
  429. DCHECK(gpu_timing_client_->gpu_timing_);
  430. Reset();
  431. time_stamp_result_ = gpu_timing_client_->gpu_timing_->DoTimeStampQuery();
  432. timer_state_ = kTimerState_WaitingForResult;
  433. }
  434. void GPUTimer::Start() {
  435. DCHECK(gpu_timing_client_->gpu_timing_);
  436. Reset();
  437. if (!use_elapsed_timer_)
  438. time_stamp_result_ = gpu_timing_client_->gpu_timing_->DoTimeStampQuery();
  439. elapsed_timer_result_ =
  440. gpu_timing_client_->gpu_timing_->BeginElapsedTimeQuery();
  441. timer_state_ = kTimerState_WaitingForEnd;
  442. }
  443. void GPUTimer::End() {
  444. DCHECK(timer_state_ == kTimerState_WaitingForEnd);
  445. DCHECK(elapsed_timer_result_.get());
  446. gpu_timing_client_->gpu_timing_->EndElapsedTimeQuery(elapsed_timer_result_);
  447. timer_state_ = kTimerState_WaitingForResult;
  448. }
  449. bool GPUTimer::IsAvailable() {
  450. if (timer_state_ == kTimerState_WaitingForResult) {
  451. // Elapsed timer are only used during start/end queries and always after
  452. // the timestamp query. Otherwise only the timestamp is used.
  453. scoped_refptr<QueryResult> result =
  454. elapsed_timer_result_.get() ?
  455. elapsed_timer_result_ :
  456. time_stamp_result_;
  457. DCHECK(result.get());
  458. if (result->IsAvailable()) {
  459. timer_state_ = kTimerState_ResultAvailable;
  460. } else {
  461. gpu_timing_client_->gpu_timing_->UpdateQueryResults();
  462. if (result->IsAvailable())
  463. timer_state_ = kTimerState_ResultAvailable;
  464. }
  465. }
  466. return (timer_state_ == kTimerState_ResultAvailable);
  467. }
  468. void GPUTimer::GetStartEndTimestamps(int64_t* start, int64_t* end) {
  469. DCHECK(start && end);
  470. DCHECK(elapsed_timer_result_.get() || time_stamp_result_.get());
  471. DCHECK(IsAvailable());
  472. const int64_t time_stamp = time_stamp_result_.get() ?
  473. time_stamp_result_->GetStartValue() :
  474. elapsed_timer_result_->GetStartValue();
  475. const int64_t elapsed_time = elapsed_timer_result_.get() ?
  476. elapsed_timer_result_->GetDelta() :
  477. 0;
  478. *start = time_stamp;
  479. *end = time_stamp + elapsed_time;
  480. }
  481. int64_t GPUTimer::GetDeltaElapsed() {
  482. DCHECK(IsAvailable());
  483. if (elapsed_timer_result_.get())
  484. return elapsed_timer_result_->GetDelta();
  485. return 0;
  486. }
  487. GPUTimer::GPUTimer(scoped_refptr<GPUTimingClient> gpu_timing_client,
  488. bool use_elapsed_timer)
  489. : use_elapsed_timer_(use_elapsed_timer),
  490. gpu_timing_client_(gpu_timing_client) {
  491. }
  492. GPUTimingClient::GPUTimingClient(GPUTimingImpl* gpu_timing)
  493. : gpu_timing_(gpu_timing) {
  494. if (gpu_timing) {
  495. timer_type_ = gpu_timing->GetTimerType();
  496. disjoint_counter_ = gpu_timing_->GetDisjointCount();
  497. }
  498. }
  499. std::unique_ptr<GPUTimer> GPUTimingClient::CreateGPUTimer(
  500. bool prefer_elapsed_time) {
  501. prefer_elapsed_time |= (timer_type_ == GPUTiming::kTimerTypeEXT);
  502. if (gpu_timing_)
  503. prefer_elapsed_time |= gpu_timing_->IsForceTimeElapsedQuery();
  504. return base::WrapUnique(new GPUTimer(this, prefer_elapsed_time));
  505. }
  506. bool GPUTimingClient::IsAvailable() {
  507. return timer_type_ != GPUTiming::kTimerTypeInvalid;
  508. }
  509. const char* GPUTimingClient::GetTimerTypeName() const {
  510. switch (timer_type_) {
  511. case GPUTiming::kTimerTypeDisjoint:
  512. return "GL_EXT_disjoint_timer_query";
  513. case GPUTiming::kTimerTypeARB:
  514. return "GL_ARB_timer_query";
  515. case GPUTiming::kTimerTypeEXT:
  516. return "GL_EXT_timer_query";
  517. default:
  518. return "Unknown";
  519. }
  520. }
  521. bool GPUTimingClient::CheckAndResetTimerErrors() {
  522. if (timer_type_ == GPUTiming::kTimerTypeDisjoint) {
  523. DCHECK(gpu_timing_ != nullptr);
  524. const uint32_t total_disjoint_count = gpu_timing_->GetDisjointCount();
  525. const bool disjoint_triggered = total_disjoint_count != disjoint_counter_;
  526. disjoint_counter_ = total_disjoint_count;
  527. return disjoint_triggered;
  528. }
  529. return false;
  530. }
  531. int64_t GPUTimingClient::GetCurrentCPUTime() {
  532. DCHECK(gpu_timing_);
  533. return gpu_timing_->GetCurrentCPUTime();
  534. }
  535. void GPUTimingClient::SetCpuTimeForTesting(
  536. base::RepeatingCallback<int64_t(void)> cpu_time) {
  537. DCHECK(gpu_timing_);
  538. gpu_timing_->SetCpuTimeForTesting(std::move(cpu_time));
  539. }
  540. bool GPUTimingClient::IsForceTimeElapsedQuery() {
  541. DCHECK(gpu_timing_);
  542. return gpu_timing_->IsForceTimeElapsedQuery();
  543. }
  544. void GPUTimingClient::ForceTimeElapsedQuery() {
  545. DCHECK(gpu_timing_);
  546. gpu_timing_->ForceTimeElapsedQuery();
  547. }
  548. GPUTimingClient::~GPUTimingClient() {
  549. }
  550. } // namespace gl