aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLalit Maganti <lalitm@google.com>2024-04-26 15:33:19 +0100
committerLalit Maganti <lalitm@google.com>2024-04-26 15:33:19 +0100
commit1eae2b1826d48b2a286e371d15fb51ede1c6eae8 (patch)
treeb5c285a52ce750b0f971a4fd14d4df94d93163aa
parent6e0ff97cfca7698faf1b029b1cf2082540087516 (diff)
downloadperfetto-1eae2b1826d48b2a286e371d15fb51ede1c6eae8.tar.gz
ui: utilise segment forest to improve slice tracks on large traces
This CL introduces the use of the segment forest to significantly speed up the queries of slice tracks on large traces. Specifically the data structure is exposed to Typescript via the use of a new "SliceMipmap" trace processor operator. This CL only ports base_slice_track to the operator: other tracks will be ported in followup CLs. Change-Id: Icf5d5a620b01dece7b102f9bdae181db0ee145fc
-rw-r--r--Android.bp1
-rw-r--r--BUILD2
-rw-r--r--src/trace_processor/containers/implicit_segment_forest.h3
-rw-r--r--src/trace_processor/perfetto_sql/intrinsics/operators/BUILD.gn2
-rw-r--r--src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.cc275
-rw-r--r--src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.h128
-rw-r--r--src/trace_processor/trace_processor_impl.cc4
-rw-r--r--ui/src/frontend/base_slice_track.ts212
-rw-r--r--ui/src/frontend/base_slice_track_unittest.ts16
-rw-r--r--ui/src/public/index.ts6
-rw-r--r--ui/src/tracks/chrome_scroll_jank/event_latency_track.ts4
-rw-r--r--ui/src/tracks/chrome_scroll_jank/scroll_jank_v3_track.ts4
-rw-r--r--ui/src/tracks/chrome_scroll_jank/scroll_track.ts4
13 files changed, 510 insertions, 151 deletions
diff --git a/Android.bp b/Android.bp
index 6a37b9211..e435f9902 100644
--- a/Android.bp
+++ b/Android.bp
@@ -12291,6 +12291,7 @@ filegroup {
name: "perfetto_src_trace_processor_perfetto_sql_intrinsics_operators_operators",
srcs: [
"src/trace_processor/perfetto_sql/intrinsics/operators/counter_mipmap_operator.cc",
+ "src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.cc",
"src/trace_processor/perfetto_sql/intrinsics/operators/span_join_operator.cc",
"src/trace_processor/perfetto_sql/intrinsics/operators/window_operator.cc",
],
diff --git a/BUILD b/BUILD
index 1b4778ec5..c6408f179 100644
--- a/BUILD
+++ b/BUILD
@@ -2316,6 +2316,8 @@ perfetto_filegroup(
srcs = [
"src/trace_processor/perfetto_sql/intrinsics/operators/counter_mipmap_operator.cc",
"src/trace_processor/perfetto_sql/intrinsics/operators/counter_mipmap_operator.h",
+ "src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.cc",
+ "src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.h",
"src/trace_processor/perfetto_sql/intrinsics/operators/span_join_operator.cc",
"src/trace_processor/perfetto_sql/intrinsics/operators/span_join_operator.h",
"src/trace_processor/perfetto_sql/intrinsics/operators/window_operator.cc",
diff --git a/src/trace_processor/containers/implicit_segment_forest.h b/src/trace_processor/containers/implicit_segment_forest.h
index f69918197..5b58cb485 100644
--- a/src/trace_processor/containers/implicit_segment_forest.h
+++ b/src/trace_processor/containers/implicit_segment_forest.h
@@ -114,6 +114,9 @@ class ImplicitSegmentForest {
// element |Push|-ed into the tree.
const T& operator[](uint32_t n) { return values_[n * 2]; }
+ // Returns the number of elements pushed into the forest.
+ uint32_t size() const { return static_cast<uint32_t>(values_.size() / 2); }
+
private:
static uint32_t Lsp(uint32_t x) { return x & -x; }
static uint32_t Msp(uint32_t x) {
diff --git a/src/trace_processor/perfetto_sql/intrinsics/operators/BUILD.gn b/src/trace_processor/perfetto_sql/intrinsics/operators/BUILD.gn
index 2dca8b627..49a4a81ad 100644
--- a/src/trace_processor/perfetto_sql/intrinsics/operators/BUILD.gn
+++ b/src/trace_processor/perfetto_sql/intrinsics/operators/BUILD.gn
@@ -20,6 +20,8 @@ source_set("operators") {
sources = [
"counter_mipmap_operator.cc",
"counter_mipmap_operator.h",
+ "slice_mipmap_operator.cc",
+ "slice_mipmap_operator.h",
"span_join_operator.cc",
"span_join_operator.h",
"window_operator.cc",
diff --git a/src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.cc b/src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.cc
new file mode 100644
index 000000000..6fe695640
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.cc
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.h"
+
+#include <sqlite3.h>
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "perfetto/base/logging.h"
+#include "perfetto/base/status.h"
+#include "perfetto/ext/base/status_or.h"
+#include "perfetto/public/compiler.h"
+#include "src/trace_processor/containers/implicit_segment_forest.h"
+#include "src/trace_processor/sqlite/bindings/sqlite_result.h"
+#include "src/trace_processor/sqlite/module_lifecycle_manager.h"
+#include "src/trace_processor/sqlite/sql_source.h"
+#include "src/trace_processor/sqlite/sqlite_utils.h"
+
+namespace perfetto::trace_processor {
+namespace {
+
+constexpr char kSliceSchema[] = R"(
+ CREATE TABLE x(
+ in_window_start BIGINT HIDDEN,
+ in_window_end BIGINT HIDDEN,
+ in_window_step BIGINT HIDDEN,
+ ts BIGINT,
+ id BIGINT,
+ dur BIGINT,
+ depth INTEGER,
+ PRIMARY KEY(id)
+ ) WITHOUT ROWID
+)";
+
+enum ColumnIndex : size_t {
+ kInWindowStart = 0,
+ kInWindowEnd,
+ kInWindowStep,
+
+ kTs,
+ kId,
+ kDur,
+ kDepth,
+};
+
+constexpr size_t kArgCount = kInWindowStep + 1;
+
+bool IsArgColumn(size_t index) {
+ return index < kArgCount;
+}
+
+} // namespace
+
+int SliceMipmapOperator::Create(sqlite3* db,
+ void* raw_ctx,
+ int argc,
+ const char* const* argv,
+ sqlite3_vtab** vtab,
+ char** zErr) {
+ if (argc != 4) {
+ *zErr = sqlite3_mprintf("zoom_index_operator: wrong number of arguments");
+ return SQLITE_ERROR;
+ }
+
+ if (int ret = sqlite3_declare_vtab(db, kSliceSchema); ret != SQLITE_OK) {
+ return ret;
+ }
+
+ auto* ctx = GetContext(raw_ctx);
+ auto state = std::make_unique<State>();
+
+ std::string sql = "SELECT * FROM ";
+ sql.append(argv[3]);
+ auto res = ctx->engine->ExecuteUntilLastStatement(
+ SqlSource::FromTraceProcessorImplementation(std::move(sql)));
+ if (!res.ok()) {
+ *zErr =
+ sqlite3_mprintf("zoom_index_operator: %s", res.status().c_message());
+ return SQLITE_ERROR;
+ }
+ do {
+ auto id =
+ static_cast<uint32_t>(sqlite3_column_int64(res->stmt.sqlite_stmt(), 0));
+ int64_t ts = sqlite3_column_int64(res->stmt.sqlite_stmt(), 1);
+ int64_t dur = sqlite3_column_int64(res->stmt.sqlite_stmt(), 2);
+ auto depth =
+ static_cast<uint32_t>(sqlite3_column_int64(res->stmt.sqlite_stmt(), 3));
+ if (PERFETTO_UNLIKELY(depth >= state->by_depth.size())) {
+ state->by_depth.resize(depth + 1);
+ }
+ auto& by_depth = state->by_depth[depth];
+ by_depth.forest.Push(
+ Slice{dur, id, static_cast<uint32_t>(by_depth.forest.size())});
+ by_depth.timestamps.push_back(ts);
+ } while (res->stmt.Step());
+ if (!res->stmt.status().ok()) {
+ *zErr = sqlite3_mprintf("zoom_index_operator: %s",
+ res->stmt.status().c_message());
+ return SQLITE_ERROR;
+ }
+
+ std::unique_ptr<Vtab> vtab_res = std::make_unique<Vtab>();
+ vtab_res->state = ctx->manager.OnCreate(argv, std::move(state));
+ *vtab = vtab_res.release();
+ return SQLITE_OK;
+}
+
+int SliceMipmapOperator::Destroy(sqlite3_vtab* vtab) {
+ std::unique_ptr<Vtab> tab(GetVtab(vtab));
+ sqlite::ModuleStateManager<SliceMipmapOperator>::OnDestroy(tab->state);
+ return SQLITE_OK;
+}
+
+int SliceMipmapOperator::Connect(sqlite3* db,
+ void* raw_ctx,
+ int argc,
+ const char* const* argv,
+ sqlite3_vtab** vtab,
+ char**) {
+ PERFETTO_CHECK(argc == 4);
+ if (int ret = sqlite3_declare_vtab(db, kSliceSchema); ret != SQLITE_OK) {
+ return ret;
+ }
+ auto* ctx = GetContext(raw_ctx);
+ std::unique_ptr<Vtab> res = std::make_unique<Vtab>();
+ res->state = ctx->manager.OnConnect(argv);
+ *vtab = res.release();
+ return SQLITE_OK;
+}
+
+int SliceMipmapOperator::Disconnect(sqlite3_vtab* vtab) {
+ std::unique_ptr<Vtab> tab(GetVtab(vtab));
+ sqlite::ModuleStateManager<SliceMipmapOperator>::OnDisconnect(tab->state);
+ return SQLITE_OK;
+}
+
+int SliceMipmapOperator::BestIndex(sqlite3_vtab*, sqlite3_index_info* info) {
+ base::Status status =
+ sqlite::utils::ValidateFunctionArguments(info, kArgCount, IsArgColumn);
+ if (!status.ok()) {
+ return SQLITE_CONSTRAINT;
+ }
+ if (info->nConstraint != kArgCount) {
+ return SQLITE_CONSTRAINT;
+ }
+ return SQLITE_OK;
+}
+
+int SliceMipmapOperator::Open(sqlite3_vtab*, sqlite3_vtab_cursor** cursor) {
+ std::unique_ptr<Cursor> c = std::make_unique<Cursor>();
+ *cursor = c.release();
+ return SQLITE_OK;
+}
+
+int SliceMipmapOperator::Close(sqlite3_vtab_cursor* cursor) {
+ std::unique_ptr<Cursor> c(GetCursor(cursor));
+ return SQLITE_OK;
+}
+
+int SliceMipmapOperator::Filter(sqlite3_vtab_cursor* cursor,
+ int,
+ const char*,
+ int argc,
+ sqlite3_value** argv) {
+ auto* c = GetCursor(cursor);
+ auto* t = GetVtab(c->pVtab);
+ auto* state =
+ sqlite::ModuleStateManager<SliceMipmapOperator>::GetState(t->state);
+ PERFETTO_CHECK(argc == kArgCount);
+
+ c->results.clear();
+ c->index = 0;
+
+ int64_t start = sqlite3_value_int64(argv[0]);
+ int64_t end = sqlite3_value_int64(argv[1]);
+ int64_t step = sqlite3_value_int64(argv[2]);
+
+ if (start == end) {
+ return sqlite::utils::SetError(t, "slice_mipmap: empty range provided");
+ }
+
+ for (uint32_t depth = 0; depth < state->by_depth.size(); ++depth) {
+ auto& by_depth = state->by_depth[depth];
+ const auto& tses = by_depth.timestamps;
+
+ // If the slice before this window overlaps with the current window, move
+ // the iterator back one to consider it as well.
+ auto start_idx = static_cast<uint32_t>(std::distance(
+ tses.begin(), std::lower_bound(tses.begin(), tses.end(), start)));
+ if (start_idx != 0 &&
+ (static_cast<size_t>(start_idx) == tses.size() ||
+ tses[start_idx] != start) &&
+ (tses[start_idx] + by_depth.forest[start_idx].dur > start)) {
+ --start_idx;
+ }
+
+ for (int64_t s = start; s < end; s += step) {
+ auto end_idx = static_cast<uint32_t>(std::distance(
+ tses.begin(),
+ std::lower_bound(tses.begin() + static_cast<int64_t>(start_idx),
+ tses.end(), s + step)));
+ if (start_idx == end_idx) {
+ continue;
+ }
+ auto res = by_depth.forest.Query(start_idx, end_idx);
+ c->results.emplace_back(Cursor::Result{
+ tses[res.idx],
+ res.dur,
+ res.id,
+ depth,
+ });
+ start_idx = end_idx;
+ }
+ }
+ return SQLITE_OK;
+}
+
+int SliceMipmapOperator::Next(sqlite3_vtab_cursor* cursor) {
+ GetCursor(cursor)->index++;
+ return SQLITE_OK;
+}
+
+int SliceMipmapOperator::Eof(sqlite3_vtab_cursor* cursor) {
+ auto* c = GetCursor(cursor);
+ return c->index >= c->results.size();
+}
+
+int SliceMipmapOperator::Column(sqlite3_vtab_cursor* cursor,
+ sqlite3_context* ctx,
+ int N) {
+ auto* t = GetVtab(cursor->pVtab);
+ auto* c = GetCursor(cursor);
+ switch (N) {
+ case ColumnIndex::kTs:
+ sqlite::result::Long(ctx, c->results[c->index].timestamp);
+ return SQLITE_OK;
+ case ColumnIndex::kId:
+ sqlite::result::Long(ctx, c->results[c->index].id);
+ return SQLITE_OK;
+ case ColumnIndex::kDur:
+ sqlite::result::Long(ctx, c->results[c->index].dur);
+ return SQLITE_OK;
+ case ColumnIndex::kDepth:
+ sqlite::result::Long(ctx, c->results[c->index].depth);
+ return SQLITE_OK;
+ default:
+ return sqlite::utils::SetError(t, "Bad column");
+ }
+ PERFETTO_FATAL("For GCC");
+}
+
+int SliceMipmapOperator::Rowid(sqlite3_vtab_cursor*, sqlite_int64*) {
+ return SQLITE_ERROR;
+}
+
+} // namespace perfetto::trace_processor
diff --git a/src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.h b/src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.h
new file mode 100644
index 000000000..329122582
--- /dev/null
+++ b/src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACE_PROCESSOR_PERFETTO_SQL_INTRINSICS_OPERATORS_SLICE_MIPMAP_OPERATOR_H_
+#define SRC_TRACE_PROCESSOR_PERFETTO_SQL_INTRINSICS_OPERATORS_SLICE_MIPMAP_OPERATOR_H_
+
+#include <sqlite3.h>
+#include <cstdint>
+#include <vector>
+
+#include "src/trace_processor/containers/implicit_segment_forest.h"
+#include "src/trace_processor/perfetto_sql/engine/perfetto_sql_engine.h"
+#include "src/trace_processor/sqlite/bindings/sqlite_module.h"
+#include "src/trace_processor/sqlite/module_lifecycle_manager.h"
+
+namespace perfetto::trace_processor {
+
+// Operator for building "mipmaps" [1] over the slices in the trace.
+//
+// In this context mipmap really means aggregating the slices in a given
+// time period by max(dur) for that period, allowing UIs to efficiently display
+// the contents of slice tracks when very zoomed out.
+//
+// Specifically, we are computing the query:
+// ```
+// select
+// depth,
+// max(dur) as dur,
+// id,
+// ts
+// from $input in
+// where in.ts_end >= $window_start and in.ts <= $window_end
+// group by depth, ts / $window_resolution
+// order by ts
+// ```
+// but in O(logn) time by using a segment-tree like data structure (see
+// ImplicitSegmentForest).
+//
+// [1] https://en.wikipedia.org/wiki/Mipmap
+struct SliceMipmapOperator : sqlite::Module<SliceMipmapOperator> {
+ struct Slice {
+ int64_t dur;
+ uint32_t id;
+ uint32_t idx;
+ };
+ struct Agg {
+ Slice operator()(const Slice& a, const Slice& b) {
+ return a.dur < b.dur ? b : a;
+ }
+ };
+ struct PerDepth {
+ ImplicitSegmentForest<Slice, Agg> forest;
+ std::vector<int64_t> timestamps;
+ };
+ struct State {
+ std::vector<PerDepth> by_depth;
+ };
+ struct Context {
+ explicit Context(PerfettoSqlEngine* _engine) : engine(_engine) {}
+ PerfettoSqlEngine* engine;
+ sqlite::ModuleStateManager<SliceMipmapOperator> manager;
+ };
+ struct Vtab : sqlite::Module<SliceMipmapOperator>::Vtab {
+ sqlite::ModuleStateManager<SliceMipmapOperator>::PerVtabState* state;
+ };
+ struct Cursor : sqlite::Module<SliceMipmapOperator>::Cursor {
+ struct Result {
+ int64_t timestamp;
+ int64_t dur;
+ uint32_t id;
+ uint32_t depth;
+ };
+ std::vector<Result> results;
+ uint32_t index = 0;
+ };
+
+ static constexpr auto kType = kCreateOnly;
+ static constexpr bool kSupportsWrites = false;
+ static constexpr bool kDoesOverloadFunctions = false;
+
+ static int Create(sqlite3*,
+ void*,
+ int,
+ const char* const*,
+ sqlite3_vtab**,
+ char**);
+ static int Destroy(sqlite3_vtab*);
+
+ static int Connect(sqlite3*,
+ void*,
+ int,
+ const char* const*,
+ sqlite3_vtab**,
+ char**);
+ static int Disconnect(sqlite3_vtab*);
+
+ static int BestIndex(sqlite3_vtab*, sqlite3_index_info*);
+
+ static int Open(sqlite3_vtab*, sqlite3_vtab_cursor**);
+ static int Close(sqlite3_vtab_cursor*);
+
+ static int Filter(sqlite3_vtab_cursor*,
+ int,
+ const char*,
+ int,
+ sqlite3_value**);
+ static int Next(sqlite3_vtab_cursor*);
+ static int Eof(sqlite3_vtab_cursor*);
+ static int Column(sqlite3_vtab_cursor*, sqlite3_context*, int);
+ static int Rowid(sqlite3_vtab_cursor*, sqlite_int64*);
+};
+
+} // namespace perfetto::trace_processor
+
+#endif // SRC_TRACE_PROCESSOR_PERFETTO_SQL_INTRINSICS_OPERATORS_SLICE_MIPMAP_OPERATOR_H_
diff --git a/src/trace_processor/trace_processor_impl.cc b/src/trace_processor/trace_processor_impl.cc
index 2deba7ac7..30c83e67b 100644
--- a/src/trace_processor/trace_processor_impl.cc
+++ b/src/trace_processor/trace_processor_impl.cc
@@ -82,6 +82,7 @@
#include "src/trace_processor/perfetto_sql/intrinsics/functions/utils.h"
#include "src/trace_processor/perfetto_sql/intrinsics/functions/window_functions.h"
#include "src/trace_processor/perfetto_sql/intrinsics/operators/counter_mipmap_operator.h"
+#include "src/trace_processor/perfetto_sql/intrinsics/operators/slice_mipmap_operator.h"
#include "src/trace_processor/perfetto_sql/intrinsics/operators/span_join_operator.h"
#include "src/trace_processor/perfetto_sql/intrinsics/operators/window_operator.h"
#include "src/trace_processor/perfetto_sql/intrinsics/table_functions/ancestor.h"
@@ -751,6 +752,9 @@ void TraceProcessorImpl::InitPerfettoSqlEngine() {
engine_->sqlite_engine()->RegisterVirtualTableModule<CounterMipmapOperator>(
"__intrinsic_counter_mipmap",
std::make_unique<CounterMipmapOperator::Context>(engine_.get()));
+ engine_->sqlite_engine()->RegisterVirtualTableModule<SliceMipmapOperator>(
+ "__intrinsic_slice_mipmap",
+ std::make_unique<SliceMipmapOperator::Context>(engine_.get()));
// Initalize the tables and views in the prelude.
InitializePreludeTablesViews(db);
diff --git a/ui/src/frontend/base_slice_track.ts b/ui/src/frontend/base_slice_track.ts
index ef3b60bf2..44597253c 100644
--- a/ui/src/frontend/base_slice_track.ts
+++ b/ui/src/frontend/base_slice_track.ts
@@ -39,13 +39,8 @@ import {checkerboardExcept} from './checkerboard';
import {globals} from './globals';
import {PanelSize} from './panel';
import {DEFAULT_SLICE_LAYOUT, SliceLayout} from './slice_layout';
-import {constraintsToQuerySuffix} from './sql_utils';
import {NewTrackArgs} from './track';
-import {
- BUCKETS_PER_PIXEL,
- CacheKey,
- TimelineCache,
-} from '../core/timeline_cache';
+import {BUCKETS_PER_PIXEL, CacheKey} from '../core/timeline_cache';
// The common class that underpins all tracks drawing slices.
@@ -125,13 +120,11 @@ function filterVisibleSlices<S extends Slice>(
// to the right).
// Since the slices are sorted by startS we can check this easily:
const maybeFirstSlice: S | undefined = slices[0];
- if (exists(maybeFirstSlice) && maybeFirstSlice.startNsQ > end) {
+ if (exists(maybeFirstSlice) && maybeFirstSlice.startNs > end) {
return [];
}
- return slices.filter(
- (slice) => slice.startNsQ <= end && slice.endNsQ >= start,
- );
+ return slices.filter((slice) => slice.startNs <= end && slice.endNs >= start);
}
export const filterVisibleSlicesForTesting = filterVisibleSlices;
@@ -148,10 +141,6 @@ export const BASE_ROW = {
ts: LONG, // Start time in nanoseconds.
dur: LONG, // Duration in nanoseconds. -1 = incomplete, 0 = instant.
depth: NUM, // Vertical depth.
-
- // These are computed by the base class:
- tsq: LONG, // Quantized |ts|. This class owns the quantization logic.
- tsqEnd: LONG, // Quantized |ts+dur|. The end bucket.
};
export type BaseRow = typeof BASE_ROW;
@@ -192,17 +181,11 @@ export abstract class BaseSliceTrack<
// This is the currently 'cached' slices:
private slices = new Array<CastInternal<T['slice']>>();
- // This is the slices cache:
- private cache: TimelineCache<Array<CastInternal<T['slice']>>> =
- new TimelineCache(5);
-
- private hasOneOffData: boolean = false;
// Incomplete slices (dur = -1). Rather than adding a lot of logic to
// the SQL queries to handle this case we materialise them one off
// then unconditionally render them. This should be efficient since
// there are at most |depth| slices.
private incomplete = new Array<CastInternal<T['slice']>>();
- private maxDurNs: duration = 0n;
// The currently selected slice.
// TODO(hjd): We should fetch this from the underlying data rather
@@ -326,6 +309,61 @@ export abstract class BaseSliceTrack<
async onCreate(): Promise<void> {
this.initState = await this.onInit();
+
+ // TODO(hjd): Consider case below:
+ // raw:
+ // 0123456789
+ // [A did not end)
+ // [B ]
+ //
+ //
+ // quantised:
+ // 0123456789
+ // [A did not end)
+ // [ B ]
+ // Does it lead to odd results?
+ const extraCols = this.extraSqlColumns.join(',');
+ let queryRes;
+ if (CROP_INCOMPLETE_SLICE_FLAG.get()) {
+ queryRes = await this.engine.query(`
+ select
+ ${this.depthColumn()},
+ ts,
+ -1 as dur,
+ id
+ ${extraCols ? ',' + extraCols : ''}
+ from (${this.getSqlSource()})
+ where dur = -1;
+ `);
+ } else {
+ queryRes = await this.engine.query(`
+ select
+ ${this.depthColumn()},
+ max(ts) as ts,
+ -1 as dur,
+ id
+ ${extraCols ? ',' + extraCols : ''}
+ from (${this.getSqlSource()})
+ where dur = -1
+ group by 1
+ `);
+ }
+ const incomplete = new Array<CastInternal<T['slice']>>(queryRes.numRows());
+ const it = queryRes.iter(this.getRowSpec());
+ for (let i = 0; it.valid(); it.next(), ++i) {
+ incomplete[i] = this.rowToSliceInternal(it);
+ }
+ this.onUpdatedSlices(incomplete);
+ this.incomplete = incomplete;
+
+ await this.engine.query(`
+ create virtual table slice_${this.trackKey}
+ using __intrinsic_slice_mipmap((
+ select id, ts, dur, ${this.depthColumn()}
+ from (${this.getSqlSource()})
+ where dur != -1
+ ));
+ `);
}
async onUpdate(): Promise<void> {
@@ -394,8 +432,8 @@ export abstract class BaseSliceTrack<
// partially visible. This might end up with a negative x if the
// slice starts before the visible time or with a width that overflows
// pxEnd.
- slice.x = timeScale.timeToPx(slice.startNsQ);
- slice.w = timeScale.durationToPx(slice.durNsQ);
+ slice.x = timeScale.timeToPx(slice.startNs);
+ slice.w = timeScale.durationToPx(slice.durNs);
if (slice.flags & SLICE_FLAGS_INSTANT) {
// In the case of an instant slice, set the slice geometry on the
@@ -611,82 +649,18 @@ export abstract class BaseSliceTrack<
} // if (hoveredSlice)
}
- onDestroy() {
+ async onDestroy(): Promise<void> {
if (this.initState) {
this.initState.dispose();
this.initState = undefined;
}
+ await this.engine.execute(`drop table slice_${this.trackKey}`);
}
// This method figures out if the visible window is outside the bounds of
// the cached data and if so issues new queries (i.e. sorta subsumes the
// onBoundsChange).
private async maybeRequestData(rawSlicesKey: CacheKey) {
- if (!this.hasOneOffData) {
- // TODO(hjd): This could be done in onInit maybe?
- const queryRes = await this.engine.query(`select
- ifnull(max(dur), 0) as maxDur, count(1) as rowCount
- from (${this.getSqlSource()})`);
- const row = queryRes.firstRow({maxDur: LONG, rowCount: NUM});
- this.maxDurNs = row.maxDur;
-
- {
- // TODO(hjd): Consider case below:
- // raw:
- // 0123456789
- // [A did not end)
- // [B ]
- //
- //
- // quantised:
- // 0123456789
- // [A did not end)
- // [ B ]
- // Does it lead to odd results?
- const extraCols = this.extraSqlColumns.join(',');
- let queryRes;
- if (CROP_INCOMPLETE_SLICE_FLAG.get()) {
- queryRes = await this.engine.query(`
- select
- ${this.depthColumn()},
- ts as tsq,
- ts as tsqEnd,
- ts,
- -1 as dur,
- id
- ${extraCols ? ',' + extraCols : ''}
- from (${this.getSqlSource()})
- where dur = -1;
- `);
- } else {
- queryRes = await this.engine.query(`
- select
- ${this.depthColumn()},
- max(ts) as tsq,
- max(ts) as tsqEnd,
- max(ts) as ts,
- -1 as dur,
- id
- ${extraCols ? ',' + extraCols : ''}
- from (${this.getSqlSource()})
- group by 1
- having dur = -1;
- `);
- }
- const incomplete = new Array<CastInternal<T['slice']>>(
- queryRes.numRows(),
- );
- const it = queryRes.iter(this.getRowSpec());
- for (let i = 0; it.valid(); it.next(), ++i) {
- incomplete[i] = this.rowToSliceInternal(it);
- }
- this.onUpdatedSlices(incomplete);
- this.incomplete = incomplete;
- }
-
- this.hasOneOffData = true;
- }
-
if (rawSlicesKey.isCoveredBy(this.slicesKey)) {
return; // We have the data already, no need to re-query
}
@@ -699,52 +673,23 @@ export abstract class BaseSliceTrack<
);
}
- const maybeCachedSlices = this.cache.lookup(slicesKey);
- if (maybeCachedSlices) {
- this.slicesKey = slicesKey;
- this.onUpdatedSlices(maybeCachedSlices);
- this.slices = maybeCachedSlices;
- return;
- }
-
- const bucketNs = slicesKey.bucketSize;
- let queryTsq;
- let queryTsqEnd;
- // When we're zoomed into the level of single ns there is no point
- // doing quantization (indeed it causes bad artifacts) so instead
- // we use ts / ts+dur directly.
- if (bucketNs === 1n) {
- queryTsq = 'ts';
- queryTsqEnd = 'ts + dur';
- } else {
- queryTsq = `(ts + ${bucketNs / 2n}) / ${bucketNs} * ${bucketNs}`;
- queryTsqEnd = `(ts + dur + ${bucketNs / 2n}) / ${bucketNs} * ${bucketNs}`;
- }
-
const extraCols = this.extraSqlColumns.join(',');
- const maybeDepth = this.isFlat() ? undefined : 'depth';
-
- const constraint = constraintsToQuerySuffix({
- filters: [
- `ts >= ${slicesKey.start - this.maxDurNs}`,
- `ts <= ${slicesKey.end}`,
- ],
- groupBy: [maybeDepth, 'tsq'],
- orderBy: [maybeDepth, 'tsq'],
- });
// TODO(hjd): Count and expose the number of slices summarized in
// each bucket?
const queryRes = await this.engine.query(`
SELECT
- ${queryTsq} AS tsq,
- ${queryTsqEnd} AS tsqEnd,
- ts,
- MAX(dur) AS dur,
- id,
- ${this.depthColumn()}
+ z.ts,
+ iif(s.dur = -1, s.dur, z.dur) as dur,
+ s.id,
+ z.depth
${extraCols ? ',' + extraCols : ''}
- FROM (${this.getSqlSource()}) ${constraint}
+ FROM slice_${this.trackKey}(
+ ${slicesKey.start},
+ ${slicesKey.end},
+ ${slicesKey.bucketSize}
+ ) z
+ CROSS JOIN (${this.getSqlSource()}) s using (id)
`);
// Here convert each row to a Slice. We do what we can do
@@ -768,7 +713,6 @@ export abstract class BaseSliceTrack<
}
this.maxDataDepth = maxDataDepth;
this.onUpdatedSlices(slices);
- this.cache.insert(slicesKey, slices);
this.slices = slices;
raf.scheduleRedraw();
@@ -789,8 +733,8 @@ export abstract class BaseSliceTrack<
}
rowToSlice(row: T['row']): T['slice'] {
- const startNsQ = Time.fromRaw(row.tsq);
- const endNsQ = Time.fromRaw(row.tsqEnd);
+ const startNs = Time.fromRaw(row.ts);
+ const endNs = Time.fromRaw(row.ts + row.dur);
const ts = Time.fromRaw(row.ts);
const dur: duration = row.dur;
@@ -803,9 +747,9 @@ export abstract class BaseSliceTrack<
return {
id: row.id,
- startNsQ,
- endNsQ,
- durNsQ: endNsQ - startNsQ,
+ startNs,
+ endNs,
+ durNs: row.dur,
ts,
dur,
flags,
@@ -846,7 +790,7 @@ export abstract class BaseSliceTrack<
for (const slice of this.incomplete) {
const visibleTimeScale = globals.timeline.visibleTimeScale;
const startPx = CROP_INCOMPLETE_SLICE_FLAG.get()
- ? visibleTimeScale.timeToPx(slice.startNsQ)
+ ? visibleTimeScale.timeToPx(slice.startNs)
: slice.x;
const cropUnfinishedSlicesCondition = CROP_INCOMPLETE_SLICE_FLAG.get()
? startPx + INCOMPLETE_SLICE_WIDTH_PX >= x
diff --git a/ui/src/frontend/base_slice_track_unittest.ts b/ui/src/frontend/base_slice_track_unittest.ts
index 196cc2ea5..4e8fd5182 100644
--- a/ui/src/frontend/base_slice_track_unittest.ts
+++ b/ui/src/frontend/base_slice_track_unittest.ts
@@ -19,16 +19,16 @@ import {Slice} from '../public';
import {filterVisibleSlicesForTesting as filterVisibleSlices} from './base_slice_track';
function slice(start: number, duration: number, depth: number = 0): Slice {
- const startNsQ = Time.fromRaw(BigInt(start));
- const durNsQ = Time.fromRaw(BigInt(duration));
- const endNsQ = Time.fromRaw(startNsQ + durNsQ);
+ const startNs = Time.fromRaw(BigInt(start));
+ const durNs = Time.fromRaw(BigInt(duration));
+ const endNs = Time.fromRaw(startNs + durNs);
return {
id: 42,
- startNsQ,
- endNsQ,
- durNsQ,
- ts: startNsQ,
- dur: durNsQ,
+ startNs,
+ endNs,
+ durNs,
+ ts: startNs,
+ dur: durNs,
depth,
flags: 0,
title: '',
diff --git a/ui/src/public/index.ts b/ui/src/public/index.ts
index a8d29499f..ece6ea0ed 100644
--- a/ui/src/public/index.ts
+++ b/ui/src/public/index.ts
@@ -45,9 +45,9 @@ export interface Slice {
// These properties are updated only once per query result when the Slice
// object is created and don't change afterwards.
readonly id: number;
- readonly startNsQ: time;
- readonly endNsQ: time;
- readonly durNsQ: duration;
+ readonly startNs: time;
+ readonly endNs: time;
+ readonly durNs: duration;
readonly ts: time;
readonly dur: duration;
readonly depth: number;
diff --git a/ui/src/tracks/chrome_scroll_jank/event_latency_track.ts b/ui/src/tracks/chrome_scroll_jank/event_latency_track.ts
index 85a2a703d..11cc6d159 100644
--- a/ui/src/tracks/chrome_scroll_jank/event_latency_track.ts
+++ b/ui/src/tracks/chrome_scroll_jank/event_latency_track.ts
@@ -53,8 +53,8 @@ export class EventLatencyTrack extends CustomSqlTableSliceTrack<EventLatencyTrac
});
}
- onDestroy() {
- super.onDestroy();
+ async onDestroy(): Promise<void> {
+ await super.onDestroy();
ScrollJankPluginState.getInstance().unregisterTrack(EventLatencyTrack.kind);
}
diff --git a/ui/src/tracks/chrome_scroll_jank/scroll_jank_v3_track.ts b/ui/src/tracks/chrome_scroll_jank/scroll_jank_v3_track.ts
index ba44b9b65..fdb5ad1c0 100644
--- a/ui/src/tracks/chrome_scroll_jank/scroll_jank_v3_track.ts
+++ b/ui/src/tracks/chrome_scroll_jank/scroll_jank_v3_track.ts
@@ -76,8 +76,8 @@ export class ScrollJankV3Track extends CustomSqlTableSliceTrack<NamedSliceTrackT
};
}
- onDestroy() {
- super.onDestroy();
+ async onDestroy(): Promise<void> {
+ await super.onDestroy();
ScrollJankPluginState.getInstance().unregisterTrack(ScrollJankV3Track.kind);
}
diff --git a/ui/src/tracks/chrome_scroll_jank/scroll_track.ts b/ui/src/tracks/chrome_scroll_jank/scroll_track.ts
index 2967b97ba..8b475694e 100644
--- a/ui/src/tracks/chrome_scroll_jank/scroll_track.ts
+++ b/ui/src/tracks/chrome_scroll_jank/scroll_track.ts
@@ -62,8 +62,8 @@ export class TopLevelScrollTrack extends CustomSqlTableSliceTrack<NamedSliceTrac
});
}
- onDestroy() {
- super.onDestroy();
+ async onDestroy(): Promise<void> {
+ await super.onDestroy();
ScrollJankPluginState.getInstance().unregisterTrack(
TopLevelScrollTrack.kind,
);