aboutsummaryrefslogtreecommitdiff
path: root/src/traced/probes/ftrace/ftrace_controller.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/traced/probes/ftrace/ftrace_controller.cc')
-rw-r--r--src/traced/probes/ftrace/ftrace_controller.cc97
1 files changed, 76 insertions, 21 deletions
diff --git a/src/traced/probes/ftrace/ftrace_controller.cc b/src/traced/probes/ftrace/ftrace_controller.cc
index 53ac1e58f..fc25d3b69 100644
--- a/src/traced/probes/ftrace/ftrace_controller.cc
+++ b/src/traced/probes/ftrace/ftrace_controller.cc
@@ -33,6 +33,7 @@
#include "perfetto/base/time.h"
#include "perfetto/ext/base/file_utils.h"
#include "perfetto/ext/base/metatrace.h"
+#include "perfetto/ext/base/string_utils.h"
#include "perfetto/ext/tracing/core/trace_writer.h"
#include "src/kallsyms/kernel_symbol_map.h"
#include "src/kallsyms/lazy_kernel_symbolizer.h"
@@ -84,15 +85,31 @@ uint32_t ClampDrainPeriodMs(uint32_t drain_period_ms) {
return drain_period_ms;
}
-void WriteToFile(const char* path, const char* str) {
+bool WriteToFile(const char* path, const char* str) {
auto fd = base::OpenFile(path, O_WRONLY);
if (!fd)
- return;
- base::ignore_result(base::WriteAll(*fd, str, strlen(str)));
+ return false;
+ const size_t str_len = strlen(str);
+ return base::WriteAll(*fd, str, str_len) == static_cast<ssize_t>(str_len);
}
-void ClearFile(const char* path) {
+bool ClearFile(const char* path) {
auto fd = base::OpenFile(path, O_WRONLY | O_TRUNC);
+ return !!fd;
+}
+
+base::Optional<int64_t> ReadFtraceNowTs(const base::ScopedFile& cpu_stats_fd) {
+ PERFETTO_CHECK(cpu_stats_fd);
+
+ char buf[512];
+ ssize_t res = PERFETTO_EINTR(pread(*cpu_stats_fd, buf, sizeof(buf) - 1, 0));
+ if (res <= 0)
+ return base::nullopt;
+ buf[res] = '\0';
+
+ FtraceCpuStats stats{};
+ DumpCpuStats(buf, &stats);
+ return static_cast<int64_t>(stats.now_ts * 1000 * 1000 * 1000);
}
} // namespace
@@ -100,18 +117,21 @@ void ClearFile(const char* path) {
// Method of last resort to reset ftrace state.
// We don't know what state the rest of the system and process is so as far
// as possible avoid allocations.
-void HardResetFtraceState() {
- PERFETTO_LOG("Hard resetting ftrace state.");
-
- WriteToFile("/sys/kernel/debug/tracing/tracing_on", "0");
- WriteToFile("/sys/kernel/debug/tracing/buffer_size_kb", "4");
- WriteToFile("/sys/kernel/debug/tracing/events/enable", "0");
- ClearFile("/sys/kernel/debug/tracing/trace");
-
- WriteToFile("/sys/kernel/tracing/tracing_on", "0");
- WriteToFile("/sys/kernel/tracing/buffer_size_kb", "4");
- WriteToFile("/sys/kernel/tracing/events/enable", "0");
- ClearFile("/sys/kernel/tracing/trace");
+bool HardResetFtraceState() {
+ for (const char* const* item = FtraceProcfs::kTracingPaths; *item; ++item) {
+ std::string prefix(*item);
+ PERFETTO_CHECK(base::EndsWith(prefix, "/"));
+ bool res = true;
+ res &= WriteToFile((prefix + "tracing_on").c_str(), "0");
+ res &= WriteToFile((prefix + "buffer_size_kb").c_str(), "4");
+ // We deliberately don't check for this as on some older versions of Android
+ // events/enable was not writable by the shell user.
+ WriteToFile((prefix + "events/enable").c_str(), "0");
+ res &= ClearFile((prefix + "trace").c_str());
+ if (res)
+ return true;
+ }
+ return false;
}
// static
@@ -152,6 +172,7 @@ FtraceController::FtraceController(std::unique_ptr<FtraceProcfs> ftrace_procfs,
ftrace_procfs_(std::move(ftrace_procfs)),
table_(std::move(table)),
ftrace_config_muxer_(std::move(model)),
+ ftrace_clock_snapshot_(new FtraceClockSnapshot()),
weak_factory_(this) {}
FtraceController::~FtraceController() {
@@ -167,6 +188,7 @@ uint64_t FtraceController::NowMs() const {
}
void FtraceController::StartIfNeeded() {
+ using FtraceClock = protos::pbzero::FtraceClock;
if (started_data_sources_.size() > 1)
return;
PERFETTO_DCHECK(!started_data_sources_.empty());
@@ -178,13 +200,20 @@ void FtraceController::StartIfNeeded() {
base::PagedMemory::Allocate(base::kPageSize * kParsingBufferSizePages);
}
+ // If we're not using the boot clock, snapshot the ftrace clock.
+ FtraceClock clock = ftrace_config_muxer_->ftrace_clock();
+ if (clock != FtraceClock::FTRACE_CLOCK_UNSPECIFIED) {
+ cpu_zero_stats_fd_ = ftrace_procfs_->OpenCpuStats(0 /* cpu */);
+ MaybeSnapshotFtraceClock();
+ }
+
per_cpu_.clear();
per_cpu_.reserve(ftrace_procfs_->NumberOfCpus());
size_t period_page_quota = ftrace_config_muxer_->GetPerCpuBufferSizePages();
for (size_t cpu = 0; cpu < ftrace_procfs_->NumberOfCpus(); cpu++) {
- auto reader = std::unique_ptr<CpuReader>(
- new CpuReader(cpu, table_.get(), symbolizer_.get(),
- ftrace_procfs_->OpenPipeForCpu(cpu)));
+ auto reader = std::unique_ptr<CpuReader>(new CpuReader(
+ cpu, table_.get(), symbolizer_.get(), ftrace_clock_snapshot_.get(),
+ ftrace_procfs_->OpenPipeForCpu(cpu)));
per_cpu_.emplace_back(std::move(reader), period_page_quota);
}
@@ -243,13 +272,16 @@ void FtraceController::ReadTick(int generation) {
// Read all cpu buffers with remaining per-period quota.
bool all_cpus_done = true;
uint8_t* parsing_buf = reinterpret_cast<uint8_t*>(parsing_mem_.Get());
+ const auto ftrace_clock = ftrace_config_muxer_->ftrace_clock();
for (size_t i = 0; i < per_cpu_.size(); i++) {
size_t orig_quota = per_cpu_[i].period_page_quota;
if (orig_quota == 0)
continue;
size_t max_pages = std::min(orig_quota, kMaxPagesPerCpuPerReadTick);
- size_t pages_read = per_cpu_[i].reader->ReadCycle(
+ CpuReader& cpu_reader = *per_cpu_[i].reader;
+ cpu_reader.set_ftrace_clock(ftrace_clock);
+ size_t pages_read = cpu_reader.ReadCycle(
parsing_buf, kParsingBufferSizePages, max_pages, started_data_sources_);
size_t new_quota = (pages_read >= orig_quota) ? 0 : orig_quota - pages_read;
@@ -282,6 +314,10 @@ void FtraceController::ReadTick(int generation) {
for (auto& per_cpu : per_cpu_)
per_cpu.period_page_quota = period_page_quota;
+ // Snapshot the clock so the data in the next period will be clock synced as
+ // well.
+ MaybeSnapshotFtraceClock();
+
auto drain_period_ms = GetDrainPeriodMs();
task_runner_->PostDelayedTask(
[weak_this, generation] {
@@ -346,6 +382,7 @@ void FtraceController::StopIfNeeded() {
per_cpu_.clear();
symbolizer_->Destroy();
+ cpu_zero_stats_fd_.reset();
if (parsing_mem_.IsValid()) {
parsing_mem_.AdviseDontNeed(parsing_mem_.Get(), parsing_mem_.size());
@@ -356,7 +393,8 @@ bool FtraceController::AddDataSource(FtraceDataSource* data_source) {
if (!ValidConfig(data_source->config()))
return false;
- auto config_id = ftrace_config_muxer_->SetupConfig(data_source->config());
+ auto config_id = ftrace_config_muxer_->SetupConfig(
+ data_source->config(), data_source->mutable_setup_errors());
if (!config_id)
return false;
@@ -419,6 +457,23 @@ void FtraceController::DumpFtraceStats(FtraceStats* stats) {
}
}
+void FtraceController::MaybeSnapshotFtraceClock() {
+ if (!cpu_zero_stats_fd_)
+ return;
+
+ auto ftrace_clock = ftrace_config_muxer_->ftrace_clock();
+ PERFETTO_DCHECK(ftrace_clock != protos::pbzero::FTRACE_CLOCK_UNSPECIFIED);
+
+ // Snapshot the boot clock *before* reading CPU stats so that
+ // two clocks are as close togher as possible (i.e. if it was the
+ // other way round, we'd skew by the const of string parsing).
+ ftrace_clock_snapshot_->boot_clock_ts = base::GetBootTimeNs().count();
+
+ // A value of zero will cause this snapshot to be skipped.
+ ftrace_clock_snapshot_->ftrace_clock_ts =
+ ReadFtraceNowTs(cpu_zero_stats_fd_).value_or(0);
+}
+
FtraceController::Observer::~Observer() = default;
} // namespace perfetto