Merge "Client library: make code gc-sections friendly"
diff --git a/Android.bp b/Android.bp
index e60a9b7..0b1651b 100644
--- a/Android.bp
+++ b/Android.bp
@@ -6521,6 +6521,7 @@
"src/tracing/tracing.cc",
"src/tracing/track.cc",
"src/tracing/track_event_category_registry.cc",
+ "src/tracing/track_event_legacy.cc",
"src/tracing/virtual_destructors.cc",
],
}
@@ -6607,6 +6608,7 @@
name: "perfetto_src_tracing_ipc_common",
srcs: [
"src/tracing/ipc/default_socket.cc",
+ "src/tracing/ipc/memfd.cc",
"src/tracing/ipc/posix_shared_memory.cc",
],
}
diff --git a/BUILD b/BUILD
index 5c0cb4b..82d6dba 100644
--- a/BUILD
+++ b/BUILD
@@ -1217,6 +1217,8 @@
name = "src_tracing_ipc_common",
srcs = [
"src/tracing/ipc/default_socket.cc",
+ "src/tracing/ipc/memfd.cc",
+ "src/tracing/ipc/memfd.h",
"src/tracing/ipc/posix_shared_memory.cc",
"src/tracing/ipc/posix_shared_memory.h",
],
@@ -1236,6 +1238,7 @@
"src/tracing/tracing.cc",
"src/tracing/track.cc",
"src/tracing/track_event_category_registry.cc",
+ "src/tracing/track_event_legacy.cc",
"src/tracing/virtual_destructors.cc",
],
)
diff --git a/include/perfetto/ext/base/unix_socket.h b/include/perfetto/ext/base/unix_socket.h
index 07e0f58..df0cad8 100644
--- a/include/perfetto/ext/base/unix_socket.h
+++ b/include/perfetto/ext/base/unix_socket.h
@@ -181,8 +181,6 @@
kListening // After Listen(), until Shutdown().
};
- enum class BlockingMode { kNonBlocking, kBlocking };
-
// Creates a socket and starts listening. If SockFamily::kUnix and
// |socket_name| starts with a '@', an abstract UNIX dmoain socket will be
// created instead of a filesystem-linked UNIX socket (Linux/Android only).
@@ -242,26 +240,16 @@
// EventListener::OnDisconnect() will be called.
// If the socket is not connected, Send() will just return false.
// Does not append a null string terminator to msg in any case.
- //
- // DO NOT PASS kNonBlocking, it is broken.
- bool Send(const void* msg,
- size_t len,
- const int* send_fds,
- size_t num_fds,
- BlockingMode blocking = BlockingMode::kNonBlocking);
+ bool Send(const void* msg, size_t len, const int* send_fds, size_t num_fds);
- inline bool Send(const void* msg,
- size_t len,
- int send_fd = -1,
- BlockingMode blocking = BlockingMode::kNonBlocking) {
+ inline bool Send(const void* msg, size_t len, int send_fd = -1) {
if (send_fd != -1)
- return Send(msg, len, &send_fd, 1, blocking);
- return Send(msg, len, nullptr, 0, blocking);
+ return Send(msg, len, &send_fd, 1);
+ return Send(msg, len, nullptr, 0);
}
- inline bool Send(const std::string& msg,
- BlockingMode blocking = BlockingMode::kNonBlocking) {
- return Send(msg.c_str(), msg.size() + 1, -1, blocking);
+ inline bool Send(const std::string& msg) {
+ return Send(msg.c_str(), msg.size() + 1, -1);
}
// Returns the number of bytes (<= |len|) written in |msg| or 0 if there
diff --git a/include/perfetto/trace_processor/basic_types.h b/include/perfetto/trace_processor/basic_types.h
index ca98d1c..cc41758 100644
--- a/include/perfetto/trace_processor/basic_types.h
+++ b/include/perfetto/trace_processor/basic_types.h
@@ -85,19 +85,19 @@
return value;
}
- double AsDouble() {
+ double AsDouble() const {
PERFETTO_CHECK(type == kDouble);
return double_value;
}
- int64_t AsLong() {
+ int64_t AsLong() const {
PERFETTO_CHECK(type == kLong);
return long_value;
}
- const char* AsString() {
+ const char* AsString() const {
PERFETTO_CHECK(type == kString);
return string_value;
}
- const void* AsBytes() {
+ const void* AsBytes() const {
PERFETTO_CHECK(type == kBytes);
return bytes_value;
}
diff --git a/include/perfetto/tracing/internal/track_event_data_source.h b/include/perfetto/tracing/internal/track_event_data_source.h
index e835155..210514a 100644
--- a/include/perfetto/tracing/internal/track_event_data_source.h
+++ b/include/perfetto/tracing/internal/track_event_data_source.h
@@ -124,6 +124,7 @@
//
// - None
// - Lambda
+ // - Lambda + timestamp
// - One debug annotation
// - Two debug annotations
// - Track
@@ -151,9 +152,26 @@
ArgumentFunction arg_function)
PERFETTO_NO_INLINE {
TraceForCategoryImpl<CategoryIndex>(instances, event_name, type, Track(),
+ TrackEventInternal::GetTimeNs(),
std::move(arg_function));
}
+ // Trace point which takes a lambda function argument and an overridden
+ // timestamp. |timestamp| must be in nanoseconds in the trace clock timebase.
+ template <size_t CategoryIndex,
+ typename ArgumentFunction = void (*)(EventContext),
+ typename ArgumentFunctionCheck = typename std::enable_if<
+ IsValidTraceLambda<ArgumentFunction>()>::type>
+ static void TraceForCategory(uint32_t instances,
+ const char* event_name,
+ perfetto::protos::pbzero::TrackEvent::Type type,
+ uint64_t timestamp,
+ ArgumentFunction arg_function)
+ PERFETTO_NO_INLINE {
+ TraceForCategoryImpl<CategoryIndex>(instances, event_name, type, Track(),
+ timestamp, std::move(arg_function));
+ }
+
// This variant of the inner trace point takes a Track argument which can be
// used to emit events on a non-default track.
template <size_t CategoryIndex,
@@ -182,6 +200,7 @@
ArgumentFunction arg_function)
PERFETTO_NO_INLINE {
TraceForCategoryImpl<CategoryIndex>(instances, event_name, type, track,
+ TrackEventInternal::GetTimeNs(),
std::move(arg_function));
}
@@ -382,6 +401,7 @@
const char* event_name,
perfetto::protos::pbzero::TrackEvent::Type type,
const TrackType& track = Track(),
+ uint64_t timestamp = TrackEventInternal::GetTimeNs(),
ArgumentFunction arg_function = [](EventContext) {
}) PERFETTO_ALWAYS_INLINE {
Base::template TraceWithInstances<CategoryTracePointTraits<CategoryIndex>>(
@@ -390,7 +410,8 @@
// TODO(skyostil): Intern categories at compile time.
auto event_ctx = TrackEventInternal::WriteEvent(
ctx.tls_inst_->trace_writer.get(), ctx.GetIncrementalState(),
- Registry->GetCategory(CategoryIndex)->name, event_name, type);
+ Registry->GetCategory(CategoryIndex)->name, event_name, type,
+ timestamp);
if (track)
event_ctx.event()->set_track_uuid(track.uuid);
arg_function(std::move(event_ctx));
diff --git a/include/perfetto/tracing/internal/track_event_internal.h b/include/perfetto/tracing/internal/track_event_internal.h
index a4ee050..906f777 100644
--- a/include/perfetto/tracing/internal/track_event_internal.h
+++ b/include/perfetto/tracing/internal/track_event_internal.h
@@ -98,7 +98,8 @@
TrackEventIncrementalState*,
const char* category,
const char* name,
- perfetto::protos::pbzero::TrackEvent::Type);
+ perfetto::protos::pbzero::TrackEvent::Type,
+ uint64_t timestamp = GetTimeNs());
template <typename T>
static void AddDebugAnnotation(perfetto::EventContext* event_ctx,
@@ -130,8 +131,10 @@
track, NewTracePacket(trace_writer, GetTimeNs()));
}
- private:
+ // Get the current time in nanoseconds in the trace clock timebase.
static uint64_t GetTimeNs();
+
+ private:
static void ResetIncrementalState(TraceWriterBase*, uint64_t timestamp);
static protozero::MessageHandle<protos::pbzero::TracePacket> NewTracePacket(
TraceWriterBase*,
diff --git a/include/perfetto/tracing/track_event_legacy.h b/include/perfetto/tracing/track_event_legacy.h
index 318b1be..5095335 100644
--- a/include/perfetto/tracing/track_event_legacy.h
+++ b/include/perfetto/tracing/track_event_legacy.h
@@ -31,8 +31,6 @@
#define PERFETTO_ENABLE_LEGACY_TRACE_EVENTS 0
#endif
-#if PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
-
// Ignore GCC warning about a missing argument for a variadic macro parameter.
#pragma GCC system_header
@@ -113,13 +111,152 @@
static constexpr char TRACE_EVENT_SCOPE_NAME_PROCESS = 'p';
static constexpr char TRACE_EVENT_SCOPE_NAME_THREAD = 't';
+enum PerfettoLegacyCurrentThreadId { TRACE_EVENT_API_CURRENT_THREAD_ID };
+
// ----------------------------------------------------------------------------
// Internal legacy trace point implementation.
// ----------------------------------------------------------------------------
namespace perfetto {
+namespace legacy {
+
+// The following user-provided adaptors are used to serialize user-defined
+// thread id and time types into track events. For full compatibility, the user
+// should also define the following macros appropriately:
+//
+// #define TRACE_TIME_TICKS_NOW() ...
+// #define TRACE_TIME_NOW() ...
+
+// User-provided function to convert an abstract thread id into either a track
+// uuid or a pid/tid override. Return true if the conversion succeeded.
+template <typename T>
+bool ConvertThreadId(const T&,
+ uint64_t* track_uuid_out,
+ int32_t* pid_override_out,
+ int32_t* tid_override_out);
+
+// User-provided function to convert an abstract timestamp into the trace clock
+// timebase in nanoseconds.
+template <typename T>
+uint64_t ConvertTimestampToTraceTimeNs(const T&);
+
+// Built-in implementation for events referring to the current thread.
+template <>
+bool ConvertThreadId(const PerfettoLegacyCurrentThreadId&,
+ uint64_t*,
+ int32_t*,
+ int32_t*);
+
+} // namespace legacy
+
namespace internal {
+// LegacyTraceId encapsulates an ID that can either be an integer or pointer.
+class LegacyTraceId {
+ public:
+ // Can be combined with WithScope.
+ class LocalId {
+ public:
+ explicit LocalId(const void* raw_id)
+ : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {}
+ explicit LocalId(uint64_t raw_id) : raw_id_(raw_id) {}
+ uint64_t raw_id() const { return raw_id_; }
+
+ private:
+ uint64_t raw_id_;
+ };
+
+ // Can be combined with WithScope.
+ class GlobalId {
+ public:
+ explicit GlobalId(uint64_t raw_id) : raw_id_(raw_id) {}
+ uint64_t raw_id() const { return raw_id_; }
+
+ private:
+ uint64_t raw_id_;
+ };
+
+ class WithScope {
+ public:
+ WithScope(const char* scope, uint64_t raw_id)
+ : scope_(scope), raw_id_(raw_id) {}
+ WithScope(const char* scope, LocalId local_id)
+ : scope_(scope), raw_id_(local_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
+ }
+ WithScope(const char* scope, GlobalId global_id)
+ : scope_(scope), raw_id_(global_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+ }
+ WithScope(const char* scope, uint64_t prefix, uint64_t raw_id)
+ : scope_(scope), has_prefix_(true), prefix_(prefix), raw_id_(raw_id) {}
+ WithScope(const char* scope, uint64_t prefix, GlobalId global_id)
+ : scope_(scope),
+ has_prefix_(true),
+ prefix_(prefix),
+ raw_id_(global_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+ }
+ uint64_t raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
+ bool has_prefix() const { return has_prefix_; }
+ uint64_t prefix() const { return prefix_; }
+ uint32_t id_flags() const { return id_flags_; }
+
+ private:
+ const char* scope_ = nullptr;
+ bool has_prefix_ = false;
+ uint64_t prefix_;
+ uint64_t raw_id_;
+ uint32_t id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
+ };
+
+ LegacyTraceId(const void* raw_id)
+ : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
+ }
+ explicit LegacyTraceId(uint64_t raw_id) : raw_id_(raw_id) {}
+ explicit LegacyTraceId(uint32_t raw_id) : raw_id_(raw_id) {}
+ explicit LegacyTraceId(uint16_t raw_id) : raw_id_(raw_id) {}
+ explicit LegacyTraceId(uint8_t raw_id) : raw_id_(raw_id) {}
+ explicit LegacyTraceId(int64_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit LegacyTraceId(int32_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit LegacyTraceId(int16_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit LegacyTraceId(int8_t raw_id)
+ : raw_id_(static_cast<uint64_t>(raw_id)) {}
+ explicit LegacyTraceId(LocalId raw_id) : raw_id_(raw_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
+ }
+ explicit LegacyTraceId(GlobalId raw_id) : raw_id_(raw_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+ }
+ explicit LegacyTraceId(WithScope scoped_id)
+ : scope_(scoped_id.scope()),
+ has_prefix_(scoped_id.has_prefix()),
+ prefix_(scoped_id.prefix()),
+ raw_id_(scoped_id.raw_id()),
+ id_flags_(scoped_id.id_flags()) {}
+
+ uint64_t raw_id() const { return raw_id_; }
+ const char* scope() const { return scope_; }
+ bool has_prefix() const { return has_prefix_; }
+ uint64_t prefix() const { return prefix_; }
+ uint32_t id_flags() const { return id_flags_; }
+
+ void Write(protos::pbzero::TrackEvent::LegacyEvent*,
+ uint32_t event_flags) const;
+
+ private:
+ const char* scope_ = nullptr;
+ bool has_prefix_ = false;
+ uint64_t prefix_;
+ uint64_t raw_id_;
+ uint32_t id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
+};
+
class TrackEventLegacy {
public:
static constexpr protos::pbzero::TrackEvent::Type PhaseToType(char phase) {
@@ -143,9 +280,57 @@
Args&&... args) PERFETTO_NO_INLINE {
AddDebugAnnotations(&ctx, std::forward<Args>(args)...);
SetTrackIfNeeded(&ctx, flags);
- if (PhaseToType(phase) == protos::pbzero::TrackEvent::TYPE_UNSPECIFIED) {
+ if (NeedLegacyFlags(phase, flags)) {
auto legacy_event = ctx.event()->set_legacy_event();
- legacy_event->set_phase(phase);
+ SetLegacyFlags(legacy_event, phase, flags);
+ }
+ }
+
+ template <typename ThreadIdType, typename... Args>
+ static void WriteLegacyEventWithIdAndTid(EventContext ctx,
+ char phase,
+ uint32_t flags,
+ const LegacyTraceId& id,
+ const ThreadIdType& thread_id,
+ Args&&... args) PERFETTO_NO_INLINE {
+ //
+ // Overrides to consider:
+ //
+ // 1. If we have an id, we need to write {unscoped,local,global}_id and/or
+ // bind_id.
+ // 2. If we have a thread id, we need to write track_uuid() or
+ // {pid,tid}_override. This happens in embedder code since the thread id
+ // is embedder-specified.
+ // 3. If we have a timestamp, we need to write a different timestamp in the
+ // trace packet itself and make sure TrackEvent won't write one
+ // internally. This is already done at the call site.
+ //
+ flags |= id.id_flags();
+ AddDebugAnnotations(&ctx, std::forward<Args>(args)...);
+ int32_t pid_override = 0;
+ int32_t tid_override = 0;
+ uint64_t track_uuid = 0;
+ if (legacy::ConvertThreadId(thread_id, &track_uuid, &pid_override,
+ &tid_override) &&
+ track_uuid) {
+ if (track_uuid != ThreadTrack::Current().uuid)
+ ctx.event()->set_track_uuid(track_uuid);
+ } else if (pid_override || tid_override) {
+ // Explicitly clear the track so the overrides below take effect.
+ ctx.event()->set_track_uuid(0);
+ } else {
+ // No pid/tid/track overrides => obey the flags instead.
+ SetTrackIfNeeded(&ctx, flags);
+ }
+ if (NeedLegacyFlags(phase, flags) || pid_override || tid_override) {
+ auto legacy_event = ctx.event()->set_legacy_event();
+ SetLegacyFlags(legacy_event, phase, flags);
+ if (id.id_flags())
+ id.Write(legacy_event, flags);
+ if (pid_override)
+ legacy_event->set_pid_override(pid_override);
+ if (tid_override)
+ legacy_event->set_tid_override(tid_override);
}
}
@@ -173,6 +358,7 @@
private:
static void SetTrackIfNeeded(EventContext* ctx, uint32_t flags) {
+ // Note: This avoids the need to set LegacyEvent::instant_event_scope.
auto scope = flags & TRACE_EVENT_FLAG_SCOPE_MASK;
switch (scope) {
case TRACE_EVENT_SCOPE_GLOBAL:
@@ -187,19 +373,54 @@
break;
}
}
+
+ static bool NeedLegacyFlags(char phase, uint32_t flags) {
+ if (PhaseToType(phase) == protos::pbzero::TrackEvent::TYPE_UNSPECIFIED)
+ return true;
+ // TODO(skyostil): Implement/deprecate:
+ // - TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP
+ // - TRACE_EVENT_FLAG_HAS_CONTEXT_ID
+ // - TRACE_EVENT_FLAG_HAS_PROCESS_ID
+ // - TRACE_EVENT_FLAG_TYPED_PROTO_ARGS
+ // - TRACE_EVENT_FLAG_JAVA_STRING_LITERALS
+ return flags &
+ (TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_HAS_LOCAL_ID |
+ TRACE_EVENT_FLAG_HAS_GLOBAL_ID | TRACE_EVENT_FLAG_ASYNC_TTS |
+ TRACE_EVENT_FLAG_BIND_TO_ENCLOSING | TRACE_EVENT_FLAG_FLOW_IN |
+ TRACE_EVENT_FLAG_FLOW_OUT);
+ }
+
+ static void SetLegacyFlags(
+ protos::pbzero::TrackEvent::LegacyEvent* legacy_event,
+ char phase,
+ uint32_t flags) {
+ if (PhaseToType(phase) == protos::pbzero::TrackEvent::TYPE_UNSPECIFIED)
+ legacy_event->set_phase(phase);
+ if (flags & TRACE_EVENT_FLAG_ASYNC_TTS)
+ legacy_event->set_use_async_tts(true);
+ if (flags & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+ legacy_event->set_bind_to_enclosing(true);
+
+ const auto kFlowIn = TRACE_EVENT_FLAG_FLOW_IN;
+ const auto kFlowOut = TRACE_EVENT_FLAG_FLOW_OUT;
+ const auto kFlowInOut = kFlowIn | kFlowOut;
+ if ((flags & kFlowInOut) == kFlowInOut) {
+ legacy_event->set_flow_direction(
+ protos::pbzero::TrackEvent::LegacyEvent::FLOW_INOUT);
+ } else if (flags & kFlowIn) {
+ legacy_event->set_flow_direction(
+ protos::pbzero::TrackEvent::LegacyEvent::FLOW_IN);
+ } else if (flags & kFlowOut) {
+ legacy_event->set_flow_direction(
+ protos::pbzero::TrackEvent::LegacyEvent::FLOW_OUT);
+ }
+ }
};
} // namespace internal
} // namespace perfetto
-// A black hole trace point where unsupported trace events are routed.
-#define PERFETTO_INTERNAL_EVENT_NOOP(cat, name, ...) \
- do { \
- if (false) { \
- ::perfetto::base::ignore_result(cat); \
- ::perfetto::base::ignore_result(name); \
- } \
- } while (false)
+#if PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
// Implementations for the INTERNAL_* adapter macros used by the trace points
// below.
@@ -220,19 +441,58 @@
TrackEventLegacy::AddDebugAnnotations(&ctx, ##__VA_ARGS__); \
})
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(...) \
- PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
-#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(...) \
- PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
-#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(...) \
- PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
-#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(...) \
- PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
-#define INTERNAL_TRACE_EVENT_METADATA_ADD(...) \
- PERFETTO_INTERNAL_EVENT_NOOP(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category, name, bind_id, \
+ flags, ...) \
+ PERFETTO_INTERNAL_SCOPED_TRACK_EVENT( \
+ category, name, [&](perfetto::EventContext ctx) { \
+ using ::perfetto::internal::TrackEventLegacy; \
+ ::perfetto::internal::LegacyTraceId trace_id{bind_id}; \
+ TrackEventLegacy::WriteLegacyEventWithIdAndTid( \
+ std::move(ctx), TRACE_EVENT_PHASE_BEGIN, flags, trace_id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, ##__VA_ARGS__); \
+ })
-#define INTERNAL_TRACE_TIME_TICKS_NOW() 0
-#define INTERNAL_TRACE_TIME_NOW() 0
+#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category, name, \
+ timestamp, flags, ...) \
+ PERFETTO_INTERNAL_TRACK_EVENT( \
+ category, name, \
+ ::perfetto::internal::TrackEventLegacy::PhaseToType(phase), \
+ ::perfetto::legacy::ConvertTimestampToTraceTimeNs(timestamp), \
+ [&](perfetto::EventContext ctx) { \
+ using ::perfetto::internal::TrackEventLegacy; \
+ TrackEventLegacy::WriteLegacyEvent(std::move(ctx), phase, flags, \
+ ##__VA_ARGS__); \
+ })
+
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ phase, category, name, id, thread_id, timestamp, flags, ...) \
+ PERFETTO_INTERNAL_TRACK_EVENT( \
+ category, name, \
+ ::perfetto::internal::TrackEventLegacy::PhaseToType(phase), \
+ ::perfetto::legacy::ConvertTimestampToTraceTimeNs(timestamp), \
+ [&](perfetto::EventContext ctx) { \
+ using ::perfetto::internal::TrackEventLegacy; \
+ ::perfetto::internal::LegacyTraceId trace_id{id}; \
+ TrackEventLegacy::WriteLegacyEventWithIdAndTid( \
+ std::move(ctx), phase, flags, trace_id, thread_id, ##__VA_ARGS__); \
+ })
+
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category, name, id, flags, \
+ ...) \
+ PERFETTO_INTERNAL_TRACK_EVENT( \
+ category, name, \
+ ::perfetto::internal::TrackEventLegacy::PhaseToType(phase), \
+ [&](perfetto::EventContext ctx) { \
+ using ::perfetto::internal::TrackEventLegacy; \
+ ::perfetto::internal::LegacyTraceId trace_id{id}; \
+ TrackEventLegacy::WriteLegacyEventWithIdAndTid( \
+ std::move(ctx), phase, flags, trace_id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, ##__VA_ARGS__); \
+ })
+
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category, name, ...) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_METADATA, category, name, \
+ TRACE_EVENT_FLAG_NONE)
// ----------------------------------------------------------------------------
// Legacy tracing common API (adapted from trace_event_common.h).
@@ -907,20 +1167,27 @@
*ret = false; \
} while (0)
-// Time queries.
-#define TRACE_TIME_TICKS_NOW() INTERNAL_TRACE_TIME_TICKS_NOW()
-#define TRACE_TIME_NOW() INTERNAL_TRACE_TIME_NOW()
-
// ----------------------------------------------------------------------------
// Legacy tracing API (adapted from trace_event.h).
// ----------------------------------------------------------------------------
// We can implement the following subset of the legacy tracing API without
-// involvement from the embedder. APIs such as TraceId and
-// TRACE_EVENT_API_ADD_TRACE_EVENT are still up to the embedder to define.
+// involvement from the embedder. APIs such as TRACE_EVENT_API_ADD_TRACE_EVENT
+// are still up to the embedder to define.
#define TRACE_STR_COPY(str) (str)
+#define TRACE_ID_WITH_SCOPE(scope, ...) \
+ ::perfetto::internal::LegacyTraceId::WithScope(scope, ##__VA_ARGS__)
+
+// Use this for ids that are unique across processes. This allows different
+// processes to use the same id to refer to the same event.
+#define TRACE_ID_GLOBAL(id) ::perfetto::internal::LegacyTraceId::GlobalId(id)
+
+// Use this for ids that are unique within a single process. This allows
+// different processes to use the same id to refer to different events.
+#define TRACE_ID_LOCAL(id) ::perfetto::internal::LegacyTraceId::LocalId(id)
+
// TODO(skyostil): Implement properly using CategoryRegistry.
#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category) \
[&] { \
diff --git a/src/base/unix_socket.cc b/src/base/unix_socket.cc
index f6db62a..3a92198 100644
--- a/src/base/unix_socket.cc
+++ b/src/base/unix_socket.cc
@@ -669,23 +669,16 @@
bool UnixSocket::Send(const void* msg,
size_t len,
const int* send_fds,
- size_t num_fds,
- BlockingMode blocking_mode) {
- // TODO(b/117139237): Non-blocking sends are broken because we do not
- // properly handle partial sends.
- PERFETTO_DCHECK(blocking_mode == BlockingMode::kBlocking);
-
+ size_t num_fds) {
if (state_ != State::kConnected) {
errno = last_error_ = ENOTCONN;
return false;
}
- if (blocking_mode == BlockingMode::kBlocking)
- sock_raw_.SetBlocking(true);
+ sock_raw_.SetBlocking(true);
const ssize_t sz = sock_raw_.Send(msg, len, send_fds, num_fds);
int saved_errno = errno;
- if (blocking_mode == BlockingMode::kBlocking)
- sock_raw_.SetBlocking(false);
+ sock_raw_.SetBlocking(false);
if (sz == static_cast<ssize_t>(len)) {
last_error_ = 0;
diff --git a/src/base/unix_socket_unittest.cc b/src/base/unix_socket_unittest.cc
index 138f631..2a43200 100644
--- a/src/base/unix_socket_unittest.cc
+++ b/src/base/unix_socket_unittest.cc
@@ -45,7 +45,6 @@
using ::testing::Mock;
constexpr char kSocketName[] = TEST_SOCK_NAME("unix_socket_unittest");
-constexpr auto kBlocking = UnixSocket::BlockingMode::kBlocking;
class MockEventListener : public UnixSocket::EventListener {
public:
@@ -123,7 +122,7 @@
auto cli_disconnected = task_runner_.CreateCheckpoint("cli_disconnected");
EXPECT_CALL(event_listener_, OnDisconnect(cli.get()))
.WillOnce(InvokeWithoutArgs(cli_disconnected));
- EXPECT_FALSE(cli->Send("whatever", kBlocking));
+ EXPECT_FALSE(cli->Send("whatever"));
task_runner_.RunUntilCheckpoint("cli_disconnected");
}
@@ -163,8 +162,8 @@
ASSERT_EQ("cli>srv", s->ReceiveString());
srv_did_recv();
}));
- ASSERT_TRUE(cli->Send("cli>srv", kBlocking));
- ASSERT_TRUE(srv_conn->Send("srv>cli", kBlocking));
+ ASSERT_TRUE(cli->Send("cli>srv"));
+ ASSERT_TRUE(srv_conn->Send("srv>cli"));
task_runner_.RunUntilCheckpoint("cli_did_recv");
task_runner_.RunUntilCheckpoint("srv_did_recv");
@@ -178,8 +177,8 @@
ASSERT_EQ("", cli->ReceiveString());
ASSERT_EQ(0u, srv_conn->Receive(&msg, sizeof(msg)));
ASSERT_EQ("", srv_conn->ReceiveString());
- ASSERT_FALSE(cli->Send("foo", kBlocking));
- ASSERT_FALSE(srv_conn->Send("bar", kBlocking));
+ ASSERT_FALSE(cli->Send("foo"));
+ ASSERT_FALSE(srv_conn->Send("bar"));
srv->Shutdown(true);
task_runner_.RunUntilCheckpoint("cli_disconnected");
task_runner_.RunUntilCheckpoint("srv_disconnected");
@@ -256,10 +255,10 @@
int buf_fd[2] = {null_fd.get(), zero_fd.get()};
- ASSERT_TRUE(cli->Send(cli_str, sizeof(cli_str), buf_fd,
- base::ArraySize(buf_fd), kBlocking));
+ ASSERT_TRUE(
+ cli->Send(cli_str, sizeof(cli_str), buf_fd, base::ArraySize(buf_fd)));
ASSERT_TRUE(srv_conn->Send(srv_str, sizeof(srv_str), buf_fd,
- base::ArraySize(buf_fd), kBlocking));
+ base::ArraySize(buf_fd)));
task_runner_.RunUntilCheckpoint("srv_did_recv");
task_runner_.RunUntilCheckpoint("cli_did_recv");
@@ -318,7 +317,7 @@
EXPECT_CALL(event_listener_, OnDataAvailable(s))
.WillOnce(Invoke([](UnixSocket* t) {
ASSERT_EQ("PING", t->ReceiveString());
- ASSERT_TRUE(t->Send("PONG", kBlocking));
+ ASSERT_TRUE(t->Send("PONG"));
}));
}));
@@ -328,7 +327,7 @@
EXPECT_CALL(event_listener_, OnConnect(cli[i].get(), true))
.WillOnce(Invoke([](UnixSocket* s, bool success) {
ASSERT_TRUE(success);
- ASSERT_TRUE(s->Send("PING", kBlocking));
+ ASSERT_TRUE(s->Send("PING"));
}));
auto checkpoint = task_runner_.CreateCheckpoint(std::to_string(i));
@@ -374,7 +373,7 @@
.WillOnce(Invoke(
[this, tmp_fd, checkpoint, mem](UnixSocket*, UnixSocket* new_conn) {
ASSERT_EQ(geteuid(), static_cast<uint32_t>(new_conn->peer_uid()));
- ASSERT_TRUE(new_conn->Send("txfd", 5, tmp_fd, kBlocking));
+ ASSERT_TRUE(new_conn->Send("txfd", 5, tmp_fd));
// Wait for the client to change this again.
EXPECT_CALL(event_listener_, OnDataAvailable(new_conn))
.WillOnce(Invoke([checkpoint, mem](UnixSocket* s) {
@@ -409,7 +408,7 @@
// Now change the shared memory and ping the other process.
memcpy(mem, "rock more", 10);
- ASSERT_TRUE(s->Send("change notify", kBlocking));
+ ASSERT_TRUE(s->Send("change notify"));
checkpoint();
}));
task_runner_.RunUntilCheckpoint("change_seen_by_client");
@@ -513,7 +512,7 @@
char buf[1024 * 32] = {};
tx_task_runner.PostTask([&cli, &buf, all_sent] {
for (size_t i = 0; i < kTotalBytes / sizeof(buf); i++)
- cli->Send(buf, sizeof(buf), -1 /*fd*/, kBlocking);
+ cli->Send(buf, sizeof(buf));
all_sent();
});
tx_task_runner.RunUntilCheckpoint("all_sent", kTimeoutMs);
@@ -562,7 +561,7 @@
static constexpr size_t kBufSize = 32 * 1024 * 1024;
std::unique_ptr<char[]> buf(new char[kBufSize]());
tx_task_runner.PostTask([&cli, &buf, send_done] {
- bool send_res = cli->Send(buf.get(), kBufSize, -1 /*fd*/, kBlocking);
+ bool send_res = cli->Send(buf.get(), kBufSize);
ASSERT_FALSE(send_res);
send_done();
});
@@ -754,7 +753,7 @@
task_runner_.RunUntilCheckpoint("connected");
srv->Shutdown(true);
- cli->Send("test", UnixSocket::BlockingMode::kBlocking);
+ cli->Send("test");
ASSERT_NE(peer, nullptr);
auto raw_sock = peer->ReleaseSocket();
@@ -794,7 +793,7 @@
.WillRepeatedly(Invoke([](UnixSocket* cli_sock) {
cli_sock->ReceiveString(); // Read connection EOF;
}));
- ASSERT_TRUE(s->Send("welcome", kBlocking));
+ ASSERT_TRUE(s->Send("welcome"));
}));
for (size_t i = 0; i < kNumClients; i++) {
diff --git a/src/ipc/client_impl.cc b/src/ipc/client_impl.cc
index f38ef6d..c569eb4 100644
--- a/src/ipc/client_impl.cc
+++ b/src/ipc/client_impl.cc
@@ -123,8 +123,7 @@
// socket buffer is full? We might want to either drop the request or throttle
// the send and PostTask the reply later? Right now we are making Send()
// blocking as a workaround. Propagate bakpressure to the caller instead.
- bool res = sock_->Send(buf.data(), buf.size(), fd,
- base::UnixSocket::BlockingMode::kBlocking);
+ bool res = sock_->Send(buf.data(), buf.size(), fd);
PERFETTO_CHECK(res || !sock_->is_connected());
return res;
}
diff --git a/src/ipc/client_impl_unittest.cc b/src/ipc/client_impl_unittest.cc
index 08ea09c..183fbf0 100644
--- a/src/ipc/client_impl_unittest.cc
+++ b/src/ipc/client_impl_unittest.cc
@@ -187,8 +187,7 @@
void Reply(const Frame& frame) {
auto buf = BufferedFrameDeserializer::Serialize(frame);
ASSERT_TRUE(client_sock->is_connected());
- EXPECT_TRUE(client_sock->Send(buf.data(), buf.size(), next_reply_fd,
- base::UnixSocket::BlockingMode::kBlocking));
+ EXPECT_TRUE(client_sock->Send(buf.data(), buf.size(), next_reply_fd));
next_reply_fd = -1;
}
diff --git a/src/ipc/host_impl.cc b/src/ipc/host_impl.cc
index 8635f22..2806a53 100644
--- a/src/ipc/host_impl.cc
+++ b/src/ipc/host_impl.cc
@@ -236,8 +236,7 @@
// socket buffer is full? We might want to either drop the request or throttle
// the send and PostTask the reply later? Right now we are making Send()
// blocking as a workaround. Propagate bakpressure to the caller instead.
- bool res = client->sock->Send(buf.data(), buf.size(), fd,
- base::UnixSocket::BlockingMode::kBlocking);
+ bool res = client->sock->Send(buf.data(), buf.size(), fd);
PERFETTO_CHECK(res || !client->sock->is_connected());
}
diff --git a/src/ipc/host_impl_unittest.cc b/src/ipc/host_impl_unittest.cc
index 1222bd7..d030a02 100644
--- a/src/ipc/host_impl_unittest.cc
+++ b/src/ipc/host_impl_unittest.cc
@@ -156,8 +156,7 @@
void SendFrame(const Frame& frame, int fd = -1) {
std::string buf = BufferedFrameDeserializer::Serialize(frame);
- ASSERT_TRUE(sock_->Send(buf.data(), buf.size(), fd,
- base::UnixSocket::BlockingMode::kBlocking));
+ ASSERT_TRUE(sock_->Send(buf.data(), buf.size(), fd));
}
BufferedFrameDeserializer frame_deserializer_;
diff --git a/src/profiling/memory/heapprofd_producer.cc b/src/profiling/memory/heapprofd_producer.cc
index e077f34..91db341 100644
--- a/src/profiling/memory/heapprofd_producer.cc
+++ b/src/profiling/memory/heapprofd_producer.cc
@@ -757,8 +757,7 @@
int raw_fd = pending_process.shmem.fd();
// TODO(fmayer): Full buffer could deadlock us here.
if (!self->Send(&data_source.client_configuration,
- sizeof(data_source.client_configuration), &raw_fd, 1,
- base::UnixSocket::BlockingMode::kBlocking)) {
+ sizeof(data_source.client_configuration), &raw_fd, 1)) {
// If Send fails, the socket will have been Shutdown, and the raw socket
// closed.
producer_->pending_processes_.erase(it);
diff --git a/src/trace_processor/importers/proto/track_event_parser.cc b/src/trace_processor/importers/proto/track_event_parser.cc
index e8faa89..271e6e7 100644
--- a/src/trace_processor/importers/proto/track_event_parser.cc
+++ b/src/trace_processor/importers/proto/track_event_parser.cc
@@ -501,22 +501,40 @@
}
}
}
- } else if ((!event.has_track_uuid() || !event.has_type()) &&
- (sequence_state->state()->pid_and_tid_valid() ||
- (legacy_event.has_pid_override() &&
- legacy_event.has_tid_override()))) {
- uint32_t pid = static_cast<uint32_t>(sequence_state->state()->pid());
- uint32_t tid = static_cast<uint32_t>(sequence_state->state()->tid());
- if (legacy_event.has_pid_override())
- pid = static_cast<uint32_t>(legacy_event.pid_override());
- if (legacy_event.has_tid_override())
- tid = static_cast<uint32_t>(legacy_event.tid_override());
-
- utid = procs->UpdateThread(tid, pid);
- upid = storage->thread_table().upid()[*utid];
- track_id = track_tracker->InternThreadTrack(*utid);
} else {
- track_id = track_tracker->GetOrCreateDefaultDescriptorTrack();
+ bool pid_tid_state_valid = sequence_state->state()->pid_and_tid_valid();
+
+ // We have a 0-value |track_uuid|. Nevertheless, we should only fall back if
+ // we have either no |track_uuid| specified at all or |track_uuid| was set
+ // explicitly to 0 (e.g. to override a default track_uuid) and we have a
+ // legacy phase. Events with real phases should use |track_uuid| to specify
+ // a different track (or use the pid/tid_override fields).
+ bool fallback_to_legacy_pid_tid_tracks =
+ (!event.has_track_uuid() || !event.has_type()) && pid_tid_state_valid;
+
+ // Always allow fallback if we have a process override.
+ fallback_to_legacy_pid_tid_tracks |= legacy_event.has_pid_override();
+
+ // A thread override requires a valid pid.
+ fallback_to_legacy_pid_tid_tracks |=
+ legacy_event.has_tid_override() && pid_tid_state_valid;
+
+ if (fallback_to_legacy_pid_tid_tracks) {
+ uint32_t pid = static_cast<uint32_t>(sequence_state->state()->pid());
+ uint32_t tid = static_cast<uint32_t>(sequence_state->state()->tid());
+ if (legacy_event.has_pid_override()) {
+ pid = static_cast<uint32_t>(legacy_event.pid_override());
+ tid = static_cast<uint32_t>(-1);
+ }
+ if (legacy_event.has_tid_override())
+ tid = static_cast<uint32_t>(legacy_event.tid_override());
+
+ utid = procs->UpdateThread(tid, pid);
+ upid = storage->thread_table().upid()[*utid];
+ track_id = track_tracker->InternThreadTrack(*utid);
+ } else {
+ track_id = track_tracker->GetOrCreateDefaultDescriptorTrack();
+ }
}
// TODO(eseckler): Replace phase with type and remove handling of
@@ -586,6 +604,7 @@
track_id = context_->track_tracker
->GetOrCreateLegacyChromeGlobalInstantTrack();
legacy_passthrough_utid = utid;
+ utid = base::nullopt;
break;
case LegacyEvent::SCOPE_PROCESS:
if (!upid) {
@@ -599,6 +618,7 @@
context_->track_tracker->InternLegacyChromeProcessInstantTrack(
*upid);
legacy_passthrough_utid = utid;
+ utid = base::nullopt;
break;
}
break;
@@ -763,36 +783,15 @@
int64_t duration_ns = 0;
int64_t tidelta = 0;
- switch (legacy_event.instant_event_scope()) {
- case LegacyEvent::SCOPE_UNSPECIFIED:
- case LegacyEvent::SCOPE_THREAD: {
- auto opt_slice_id = slice_tracker->Scoped(
- ts, track_id, category_id, name_id, duration_ns, args_callback);
- if (opt_slice_id.has_value()) {
- auto* thread_slices = storage->mutable_thread_slices();
- PERFETTO_DCHECK(!thread_slices->slice_count() ||
- thread_slices->slice_ids().back() <
- opt_slice_id.value());
- thread_slices->AddThreadSlice(opt_slice_id.value(), tts,
- duration_ns, ticount, tidelta);
- }
- break;
- }
- case LegacyEvent::SCOPE_GLOBAL: {
- slice_tracker->Scoped(ts, track_id, category_id, name_id, duration_ns,
- args_callback);
- break;
- }
- case LegacyEvent::SCOPE_PROCESS: {
- slice_tracker->Scoped(ts, track_id, category_id, name_id, duration_ns,
- args_callback);
- break;
- }
- default: {
- PERFETTO_FATAL("Unknown instant event scope: %u",
- legacy_event.instant_event_scope());
- break;
- }
+ auto opt_slice_id = slice_tracker->Scoped(
+ ts, track_id, category_id, name_id, duration_ns, args_callback);
+ if (utid && opt_slice_id.has_value()) {
+ auto* thread_slices = storage->mutable_thread_slices();
+ PERFETTO_DCHECK(!thread_slices->slice_count() ||
+ thread_slices->slice_ids().back() <
+ opt_slice_id.value());
+ thread_slices->AddThreadSlice(opt_slice_id.value(), tts, duration_ns,
+ ticount, tidelta);
}
break;
}
diff --git a/src/trace_processor/rpc/httpd.cc b/src/trace_processor/rpc/httpd.cc
index 73c1bca..6ab492a 100644
--- a/src/trace_processor/rpc/httpd.cc
+++ b/src/trace_processor/rpc/httpd.cc
@@ -40,7 +40,6 @@
namespace {
constexpr char kBindAddr[] = "127.0.0.1:9001";
-constexpr auto kBlocking = base::UnixSocket::BlockingMode::kBlocking;
// 32 MiB payload + 128K for HTTP headers.
constexpr size_t kMaxRequestSize = (32 * 1024 + 128) * 1024;
@@ -111,9 +110,9 @@
Append(response, "Content-Length: ");
Append(response, std::to_string(body_len));
Append(response, "\r\n\r\n"); // End-of-headers marker.
- sock->Send(response.data(), response.size(), /*fd=*/-1, kBlocking);
+ sock->Send(response.data(), response.size());
if (body_len)
- sock->Send(body, body_len, /*fd=*/-1, kBlocking);
+ sock->Send(body, body_len);
}
void ShutdownBadRequest(base::UnixSocket* sock, const char* reason) {
diff --git a/src/traced/probes/probes_producer.cc b/src/traced/probes/probes_producer.cc
index 90238b5..036fb0c 100644
--- a/src/traced/probes/probes_producer.cc
+++ b/src/traced/probes/probes_producer.cc
@@ -58,6 +58,9 @@
// Should be larger than FtraceController::kControllerFlushTimeoutMs.
constexpr uint32_t kFlushTimeoutMs = 1000;
+constexpr size_t kTracingSharedMemSizeHintBytes = 1024 * 1024;
+constexpr size_t kTracingSharedMemPageSizeHintBytes = 32 * 1024;
+
constexpr char kFtraceSourceName[] = "linux.ftrace";
constexpr char kProcessStatsSourceName[] = "linux.process_stats";
constexpr char kInodeMapSourceName[] = "linux.inode_file_map";
@@ -529,7 +532,9 @@
PERFETTO_DCHECK(state_ == kNotConnected);
state_ = kConnecting;
endpoint_ = ProducerIPCClient::Connect(
- socket_name_, this, "perfetto.traced_probes", task_runner_);
+ socket_name_, this, "perfetto.traced_probes", task_runner_,
+ TracingService::ProducerSMBScrapingMode::kDisabled,
+ kTracingSharedMemSizeHintBytes, kTracingSharedMemPageSizeHintBytes);
}
void ProbesProducer::IncreaseConnectionBackoff() {
diff --git a/src/tracing/BUILD.gn b/src/tracing/BUILD.gn
index cb8f4c5..fb24c7b 100644
--- a/src/tracing/BUILD.gn
+++ b/src/tracing/BUILD.gn
@@ -105,6 +105,7 @@
"tracing.cc",
"track.cc",
"track_event_category_registry.cc",
+ "track_event_legacy.cc",
"virtual_destructors.cc",
]
assert_no_deps = [ "core:service" ]
diff --git a/src/tracing/internal/track_event_internal.cc b/src/tracing/internal/track_event_internal.cc
index 4c0c86b..895d14d 100644
--- a/src/tracing/internal/track_event_internal.cc
+++ b/src/tracing/internal/track_event_internal.cc
@@ -175,8 +175,8 @@
uint32_t seq_flags) {
auto packet = trace_writer->NewTracePacket();
packet->set_timestamp(timestamp);
- // TODO(skyostil): Stop emitting this for every event once the trace processor
- // understands trace packet defaults.
+ // TODO(skyostil): Stop emitting this for every event once the trace
+ // processor understands trace packet defaults.
if (GetClockType() != protos::pbzero::ClockSnapshot::Clock::BOOTTIME)
packet->set_timestamp_clock_id(GetClockType());
packet->set_sequence_flags(seq_flags);
@@ -189,10 +189,10 @@
TrackEventIncrementalState* incr_state,
const char* category,
const char* name,
- perfetto::protos::pbzero::TrackEvent::Type type) {
+ perfetto::protos::pbzero::TrackEvent::Type type,
+ uint64_t timestamp) {
PERFETTO_DCHECK(category);
PERFETTO_DCHECK(g_main_thread);
- auto timestamp = GetTimeNs();
if (incr_state->was_cleared) {
incr_state->was_cleared = false;
diff --git a/src/tracing/ipc/BUILD.gn b/src/tracing/ipc/BUILD.gn
index 0f2bcb6..93c97a6 100644
--- a/src/tracing/ipc/BUILD.gn
+++ b/src/tracing/ipc/BUILD.gn
@@ -28,6 +28,8 @@
]
sources = [
"default_socket.cc",
+ "memfd.cc",
+ "memfd.h",
"posix_shared_memory.cc",
"posix_shared_memory.h",
]
diff --git a/src/tracing/ipc/memfd.cc b/src/tracing/ipc/memfd.cc
new file mode 100644
index 0000000..64025bf
--- /dev/null
+++ b/src/tracing/ipc/memfd.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "src/tracing/ipc/memfd.h"
+
+#include <errno.h>
+
+#define PERFETTO_MEMFD_ENABLED() \
+ PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+ PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX)
+
+#if PERFETTO_MEMFD_ENABLED()
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/syscall.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+// Some android build bots use a sysroot that doesn't support memfd when
+// compiling for the host, so we redefine it if necessary.
+#if !defined(__NR_memfd_create)
+#if defined(__x86_64__)
+#define __NR_memfd_create 319
+#elif defined(__i386__)
+#define __NR_memfd_create 356
+#elif defined(__aarch64__)
+#define __NR_memfd_create 279
+#elif defined(__arm__)
+#define __NR_memfd_create 385
+#else
+#error "unsupported sysroot without memfd support"
+#endif
+#endif // !defined(__NR_memfd_create)
+
+namespace perfetto {
+bool HasMemfdSupport() {
+ static bool kSupportsMemfd = [] {
+ // Check kernel version supports memfd_create(). Some older kernels segfault
+ // executing memfd_create() rather than returning ENOSYS (b/116769556).
+ static constexpr int kRequiredMajor = 3;
+ static constexpr int kRequiredMinor = 17;
+ struct utsname uts;
+ int major, minor;
+ if (uname(&uts) == 0 && strcmp(uts.sysname, "Linux") == 0 &&
+ sscanf(uts.release, "%d.%d", &major, &minor) == 2 &&
+ ((major < kRequiredMajor ||
+ (major == kRequiredMajor && minor < kRequiredMinor)))) {
+ return false;
+ }
+
+ base::ScopedFile fd;
+ fd.reset(static_cast<int>(syscall(__NR_memfd_create, "perfetto_shmem",
+ MFD_CLOEXEC | MFD_ALLOW_SEALING)));
+ return !!fd;
+ }();
+ return kSupportsMemfd;
+}
+
+base::ScopedFile CreateMemfd(const char* name, unsigned int flags) {
+ if (!HasMemfdSupport()) {
+ errno = ENOSYS;
+ return base::ScopedFile();
+ }
+ return base::ScopedFile(
+ static_cast<int>(syscall(__NR_memfd_create, name, flags)));
+}
+} // namespace perfetto
+
+#else // PERFETTO_MEMFD_ENABLED()
+
+namespace perfetto {
+bool HasMemfdSupport() {
+ return false;
+}
+base::ScopedFile CreateMemfd(const char*, unsigned int) {
+ errno = ENOSYS;
+ return base::ScopedFile();
+}
+} // namespace perfetto
+
+#endif // PERFETTO_MEMFD_ENABLED()
diff --git a/src/tracing/ipc/memfd.h b/src/tracing/ipc/memfd.h
new file mode 100644
index 0000000..8cf4b2a
--- /dev/null
+++ b/src/tracing/ipc/memfd.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_IPC_MEMFD_H_
+#define SRC_TRACING_IPC_MEMFD_H_
+
+#include "perfetto/base/build_config.h"
+
+#include "perfetto/ext/base/scoped_file.h"
+
+// Some android build bots use a sysroot that doesn't support memfd when
+// compiling for the host, so we define the flags we need ourselves.
+
+// from memfd.h
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC 0x0001U
+#define MFD_ALLOW_SEALING 0x0002U
+#endif
+
+// from fcntl.h
+#ifndef F_ADD_SEALS
+#define F_ADD_SEALS 1033
+#define F_GET_SEALS 1034
+#define F_SEAL_SEAL 0x0001
+#define F_SEAL_SHRINK 0x0002
+#define F_SEAL_GROW 0x0004
+#define F_SEAL_WRITE 0x0008
+#endif
+
+namespace perfetto {
+
+// Whether the operating system supports memfd.
+bool HasMemfdSupport();
+
+// Call memfd(2) if available on platform and return the fd as result. This call
+// also makes a kernel version check for safety on older kernels (b/116769556).
+// Returns an invalid ScopedFile on failure.
+base::ScopedFile CreateMemfd(const char* name, unsigned int flags);
+
+} // namespace perfetto
+
+#endif // SRC_TRACING_IPC_MEMFD_H_
diff --git a/src/tracing/ipc/posix_shared_memory.cc b/src/tracing/ipc/posix_shared_memory.cc
index d7f0904..4d5191d 100644
--- a/src/tracing/ipc/posix_shared_memory.cc
+++ b/src/tracing/ipc/posix_shared_memory.cc
@@ -27,50 +27,55 @@
#include <memory>
#include <utility>
-#include "perfetto/base/build_config.h"
+#include "perfetto/base/compiler.h"
#include "perfetto/base/logging.h"
#include "perfetto/ext/base/temp_file.h"
-
-#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
-#include <linux/memfd.h>
-#include <sys/syscall.h>
-#endif
+#include "src/tracing/ipc/memfd.h"
namespace perfetto {
+namespace {
+int kFileSeals = F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL;
+} // namespace
+
// static
std::unique_ptr<PosixSharedMemory> PosixSharedMemory::Create(size_t size) {
- base::ScopedFile fd;
-#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
- bool is_memfd = false;
- fd.reset(static_cast<int>(syscall(__NR_memfd_create, "perfetto_shmem",
- MFD_CLOEXEC | MFD_ALLOW_SEALING)));
- is_memfd = !!fd;
+ base::ScopedFile fd =
+ CreateMemfd("perfetto_shmem", MFD_CLOEXEC | MFD_ALLOW_SEALING);
+ bool is_memfd = !!fd;
if (!fd) {
// TODO: if this fails on Android we should fall back on ashmem.
PERFETTO_DPLOG("memfd_create() failed");
- }
-#endif
-
- if (!fd)
fd = base::TempFile::CreateUnlinked().ReleaseFD();
+ }
PERFETTO_CHECK(fd);
int res = ftruncate(fd.get(), static_cast<off_t>(size));
PERFETTO_CHECK(res == 0);
-#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+
if (is_memfd) {
- res = fcntl(*fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL);
+ // When memfd is supported, file seals should be, too.
+ res = fcntl(*fd, F_ADD_SEALS, kFileSeals);
PERFETTO_DCHECK(res == 0);
}
-#endif
+
return MapFD(std::move(fd), size);
}
// static
std::unique_ptr<PosixSharedMemory> PosixSharedMemory::AttachToFd(
- base::ScopedFile fd) {
+ base::ScopedFile fd,
+ bool require_seals_if_supported) {
+ if (require_seals_if_supported && HasMemfdSupport()) {
+ // If the system supports memfd, we require a sealed memfd.
+ int res = fcntl(*fd, F_GET_SEALS);
+ if (res == -1 || (res & kFileSeals) != kFileSeals) {
+ PERFETTO_PLOG("Couldn't verify file seals on shmem FD");
+ return nullptr;
+ }
+ }
+
struct stat stat_buf = {};
int res = fstat(fd.get(), &stat_buf);
PERFETTO_CHECK(res == 0 && stat_buf.st_size > 0);
diff --git a/src/tracing/ipc/posix_shared_memory.h b/src/tracing/ipc/posix_shared_memory.h
index 4bb3baf..0a0c39e 100644
--- a/src/tracing/ipc/posix_shared_memory.h
+++ b/src/tracing/ipc/posix_shared_memory.h
@@ -21,6 +21,7 @@
#include <memory>
+#include "perfetto/base/build_config.h"
#include "perfetto/ext/base/scoped_file.h"
#include "perfetto/ext/tracing/core/shared_memory.h"
@@ -35,11 +36,17 @@
std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) override;
};
- // Create a brand new SHM region (the service uses this).
+ // Create a brand new SHM region.
static std::unique_ptr<PosixSharedMemory> Create(size_t size);
- // Mmaps a file descriptor to an existing SHM region (the producer uses this).
- static std::unique_ptr<PosixSharedMemory> AttachToFd(base::ScopedFile);
+ // Mmaps a file descriptor to an existing SHM region. If
+ // |require_seals_if_supported| is true and the system supports
+ // memfd_create(), the FD is required to be a sealed memfd with F_SEAL_SEAL,
+ // F_SEAL_GROW, and F_SEAL_SHRINK seals set (otherwise, nullptr is returned).
+ // May also return nullptr if mapping fails for another reason (e.g. OOM).
+ static std::unique_ptr<PosixSharedMemory> AttachToFd(
+ base::ScopedFile,
+ bool require_seals_if_supported = true);
~PosixSharedMemory() override;
diff --git a/src/tracing/ipc/posix_shared_memory_unittest.cc b/src/tracing/ipc/posix_shared_memory_unittest.cc
index 1156556..e5589d0 100644
--- a/src/tracing/ipc/posix_shared_memory_unittest.cc
+++ b/src/tracing/ipc/posix_shared_memory_unittest.cc
@@ -30,6 +30,7 @@
#include "perfetto/ext/base/utils.h"
#include "src/base/test/test_task_runner.h"
#include "src/base/test/vm_test_utils.h"
+#include "src/tracing/ipc/memfd.h"
#include "test/gtest_and_gmock.h"
namespace perfetto {
@@ -43,6 +44,7 @@
PosixSharedMemory::Factory factory;
std::unique_ptr<SharedMemory> shm =
factory.CreateSharedMemory(base::kPageSize);
+ ASSERT_NE(shm.get(), nullptr);
void* const shm_start = shm->start();
const size_t shm_size = shm->size();
ASSERT_NE(nullptr, shm_start);
@@ -58,6 +60,7 @@
TEST(PosixSharedMemoryTest, DestructorClosesFD) {
std::unique_ptr<PosixSharedMemory> shm =
PosixSharedMemory::Create(base::kPageSize);
+ ASSERT_NE(shm.get(), nullptr);
int fd = shm->fd();
ASSERT_GE(fd, 0);
ASSERT_EQ(static_cast<off_t>(base::kPageSize), lseek(fd, 0, SEEK_END));
@@ -66,14 +69,15 @@
ASSERT_TRUE(IsFileDescriptorClosed(fd));
}
-TEST(PosixSharedMemoryTest, AttachToFd) {
+TEST(PosixSharedMemoryTest, AttachToFdWithoutSeals) {
base::TempFile tmp_file = base::TempFile::CreateUnlinked();
const int fd_num = tmp_file.fd();
ASSERT_EQ(0, ftruncate(fd_num, base::kPageSize));
ASSERT_EQ(7, base::WriteAll(fd_num, "foobar", 7));
- std::unique_ptr<PosixSharedMemory> shm =
- PosixSharedMemory::AttachToFd(tmp_file.ReleaseFD());
+ std::unique_ptr<PosixSharedMemory> shm = PosixSharedMemory::AttachToFd(
+ tmp_file.ReleaseFD(), /*require_seals_if_supported=*/false);
+ ASSERT_NE(shm.get(), nullptr);
void* const shm_start = shm->start();
const size_t shm_size = shm->size();
ASSERT_NE(nullptr, shm_start);
@@ -87,5 +91,53 @@
ASSERT_FALSE(base::vm_test_utils::IsMapped(shm_start, shm_size));
}
+TEST(PosixSharedMemoryTest, AttachToFdRequiresSeals) {
+ base::TempFile tmp_file = base::TempFile::CreateUnlinked();
+ const int fd_num = tmp_file.fd();
+ ASSERT_EQ(0, ftruncate(fd_num, base::kPageSize));
+
+ std::unique_ptr<PosixSharedMemory> shm =
+ PosixSharedMemory::AttachToFd(tmp_file.ReleaseFD());
+
+ if (HasMemfdSupport()) {
+ EXPECT_EQ(shm.get(), nullptr);
+ } else {
+ ASSERT_NE(shm.get(), nullptr);
+ EXPECT_NE(shm->start(), nullptr);
+ }
+}
+
+TEST(PosixSharedMemoryTest, CreateAndMap) {
+ std::unique_ptr<PosixSharedMemory> shm =
+ PosixSharedMemory::Create(base::kPageSize);
+ void* const shm_start = shm->start();
+ const size_t shm_size = shm->size();
+ ASSERT_NE(shm_start, nullptr);
+ ASSERT_EQ(shm_size, base::kPageSize);
+
+ memcpy(shm_start, "test", 5);
+ ASSERT_TRUE(base::vm_test_utils::IsMapped(shm_start, shm_size));
+
+ base::ScopedFile shm_fd2(dup(shm->fd()));
+ std::unique_ptr<PosixSharedMemory> shm2 =
+ PosixSharedMemory::AttachToFd(std::move(shm_fd2));
+ ASSERT_NE(shm2.get(), nullptr);
+ void* const shm2_start = shm2->start();
+ const size_t shm2_size = shm2->size();
+ ASSERT_NE(shm2_start, nullptr);
+ ASSERT_EQ(shm2_size, shm_size);
+
+ ASSERT_EQ(0, memcmp("test", shm2->start(), 5));
+ ASSERT_TRUE(base::vm_test_utils::IsMapped(shm2_start, shm2_size));
+
+ shm2.reset();
+ ASSERT_FALSE(base::vm_test_utils::IsMapped(shm2_start, shm2_size));
+ ASSERT_TRUE(base::vm_test_utils::IsMapped(shm_start, shm_size));
+
+ shm.reset();
+ ASSERT_FALSE(base::vm_test_utils::IsMapped(shm2_start, shm2_size));
+ ASSERT_FALSE(base::vm_test_utils::IsMapped(shm_start, shm_size));
+}
+
} // namespace
} // namespace perfetto
diff --git a/src/tracing/ipc/producer/producer_ipc_client_impl.cc b/src/tracing/ipc/producer/producer_ipc_client_impl.cc
index f06801a..e828a12 100644
--- a/src/tracing/ipc/producer/producer_ipc_client_impl.cc
+++ b/src/tracing/ipc/producer/producer_ipc_client_impl.cc
@@ -186,7 +186,9 @@
PERFETTO_CHECK(shmem_fd);
// TODO(primiano): handle mmap failure in case of OOM.
- shared_memory_ = PosixSharedMemory::AttachToFd(std::move(shmem_fd));
+ shared_memory_ =
+ PosixSharedMemory::AttachToFd(std::move(shmem_fd),
+ /*require_seals_if_supported=*/false);
shared_buffer_page_size_kb_ =
cmd.setup_tracing().shared_buffer_page_size_kb();
shared_memory_arbiter_ = SharedMemoryArbiter::CreateInstance(
diff --git a/src/tracing/test/api_integrationtest.cc b/src/tracing/test/api_integrationtest.cc
index b633582..03521e5 100644
--- a/src/tracing/test/api_integrationtest.cc
+++ b/src/tracing/test/api_integrationtest.cc
@@ -73,7 +73,10 @@
PERFETTO_DEFINE_CATEGORIES(PERFETTO_CATEGORY(test),
PERFETTO_CATEGORY(foo),
PERFETTO_CATEGORY(bar),
- PERFETTO_CATEGORY(cat));
+ PERFETTO_CATEGORY(cat),
+ // TODO(skyostil): Figure out how to represent
+ // disabled-by-default categories
+ {TRACE_DISABLED_BY_DEFAULT("cat")});
PERFETTO_TRACK_EVENT_STATIC_STORAGE();
// For testing interning of complex objects.
@@ -93,6 +96,52 @@
};
} // namespace std
+// Represents an opaque (from Perfetto's point of view) thread identifier (e.g.,
+// base::PlatformThreadId in Chromium).
+struct MyThreadId {
+ MyThreadId(int pid_, int tid_) : pid(pid_), tid(tid_) {}
+
+ const int pid = 0;
+ const int tid = 0;
+};
+
+// Represents an opaque timestamp (e.g., base::TimeTicks in Chromium).
+class MyTimestamp {
+ public:
+ explicit MyTimestamp(uint64_t ts_) : ts(ts_) {}
+
+ const uint64_t ts;
+};
+
+namespace perfetto {
+namespace legacy {
+
+template <>
+bool ConvertThreadId(const MyThreadId& thread,
+ uint64_t* track_uuid_out,
+ int32_t* pid_override_out,
+ int32_t* tid_override_out) {
+ if (!thread.pid && !thread.tid)
+ return false;
+ if (!thread.pid) {
+ // Thread in current process.
+ *track_uuid_out = perfetto::ThreadTrack::ForThread(thread.tid).uuid;
+ } else {
+ // Thread in another process.
+ *pid_override_out = thread.pid;
+ *tid_override_out = thread.tid;
+ }
+ return true;
+}
+
+template <>
+uint64_t ConvertTimestampToTraceTimeNs(const MyTimestamp& timestamp) {
+ return timestamp.ts;
+}
+
+} // namespace legacy
+} // namespace perfetto
+
namespace {
using ::testing::_;
@@ -420,6 +469,32 @@
default:
ADD_FAILURE();
}
+ if (track_event.has_legacy_event()) {
+ auto legacy_event = track_event.legacy_event();
+ std::stringstream id;
+ if (legacy_event.has_unscoped_id()) {
+ id << "(unscoped_id=" << legacy_event.unscoped_id() << ")";
+ } else if (legacy_event.has_local_id()) {
+ id << "(local_id=" << legacy_event.local_id() << ")";
+ } else if (legacy_event.has_global_id()) {
+ id << "(global_id=" << legacy_event.global_id() << ")";
+ } else if (legacy_event.has_bind_id()) {
+ id << "(bind_id=" << legacy_event.bind_id() << ")";
+ }
+ if (legacy_event.has_id_scope())
+ id << "(id_scope=\"" << legacy_event.id_scope() << "\")";
+ if (legacy_event.use_async_tts())
+ id << "(use_async_tts)";
+ if (legacy_event.bind_to_enclosing())
+ id << "(bind_to_enclosing)";
+ if (legacy_event.has_flow_direction())
+ id << "(flow_direction=" << legacy_event.flow_direction() << ")";
+ if (legacy_event.has_pid_override())
+ id << "(pid_override=" << legacy_event.pid_override() << ")";
+ if (legacy_event.has_tid_override())
+ id << "(tid_override=" << legacy_event.tid_override() << ")";
+ slice += id.str();
+ }
if (!track_event.category_iids().empty())
slice += ":" + categories[track_event.category_iids()[0]];
if (track_event.has_name_iid())
@@ -1958,9 +2033,6 @@
}
TEST_F(PerfettoApiTest, LegacyTraceEvents) {
- // TODO(skyostil): For now we just test that all variants of legacy trace
- // points compile. Test actual functionality when implemented.
-
// Setup the trace config.
perfetto::TraceConfig cfg;
cfg.set_duration_ms(500);
@@ -1986,11 +2058,13 @@
// Event with timestamp.
TRACE_EVENT_INSTANT_WITH_TIMESTAMP0("cat", "LegacyInstantEvent",
- TRACE_EVENT_SCOPE_GLOBAL, 123456789ul);
+ TRACE_EVENT_SCOPE_GLOBAL,
+ MyTimestamp{123456789ul});
// Event with id, thread id and timestamp (and dynamic name).
TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
- "cat", std::string("LegacyWithIdTidAndTimestamp").c_str(), 1, 2, 3);
+ "cat", std::string("LegacyWithIdTidAndTimestamp").c_str(), 1,
+ MyThreadId(123, 456), MyTimestamp{3});
// Event with id.
TRACE_COUNTER1("cat", "LegacyCounter", 1234);
@@ -2004,10 +2078,18 @@
auto slices = ReadSlicesFromTrace(tracing_session->get());
EXPECT_THAT(
slices,
- ElementsAre("I:cat.LegacyEvent", "B:cat.LegacyEvent(arg=(int)123)",
- "E.LegacyEvent(arg=(string)string,arg2=(double)0.123)",
- "B:cat.ScopedLegacyEvent", "E",
- "Legacy_C:cat.LegacyCounter(value=(int)1234)"));
+ ElementsAre(
+ "I:cat.LegacyEvent", "B:cat.LegacyEvent(arg=(int)123)",
+ "E.LegacyEvent(arg=(string)string,arg2=(double)0.123)",
+ "B:cat.ScopedLegacyEvent", "E",
+ "B(bind_id=3671771902)(flow_direction=1):disabled-by-default-cat."
+ "LegacyFlowEvent",
+ "I:cat.LegacyInstantEvent",
+ "Legacy_S(unscoped_id=1)(pid_override=123)(tid_override=456):cat."
+ "LegacyWithIdTidAndTimestamp",
+ "Legacy_C:cat.LegacyCounter(value=(int)1234)",
+ "Legacy_C(unscoped_id=1234):cat.LegacyCounterWithId(value=(int)9000)",
+ "Legacy_M:cat.LegacyMetadata"));
}
TEST_F(PerfettoApiTest, LegacyTraceEventsWithCustomAnnotation) {
@@ -2068,6 +2150,77 @@
ElementsAre("B:cat.LegacyEvent(arg=(json){\"key\": 123})"));
}
+TEST_F(PerfettoApiTest, LegacyTraceEventsWithId) {
+ // Setup the trace config.
+ perfetto::TraceConfig cfg;
+ cfg.set_duration_ms(500);
+ cfg.add_buffers()->set_size_kb(1024);
+ auto* ds_cfg = cfg.add_data_sources()->mutable_config();
+ ds_cfg->set_name("track_event");
+
+ auto* tracing_session = NewTrace(cfg);
+ tracing_session->get()->StartBlocking();
+
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "UnscopedId", 0x1000);
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "LocalId", TRACE_ID_LOCAL(0x2000));
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "GlobalId", TRACE_ID_GLOBAL(0x3000));
+ TRACE_EVENT_ASYNC_BEGIN0(
+ "cat", "WithScope",
+ TRACE_ID_WITH_SCOPE("scope string", TRACE_ID_GLOBAL(0x4000)));
+
+ perfetto::TrackEvent::Flush();
+ tracing_session->get()->StopBlocking();
+ auto slices = ReadSlicesFromTrace(tracing_session->get());
+ EXPECT_THAT(slices, ElementsAre("Legacy_S(unscoped_id=4096):cat.UnscopedId",
+ "Legacy_S(local_id=8192):cat.LocalId",
+ "Legacy_S(global_id=12288):cat.GlobalId",
+ "Legacy_S(global_id=16384)(id_scope=\"scope "
+ "string\"):cat.WithScope"));
+}
+
+TEST_F(PerfettoApiTest, LegacyTraceEventsWithFlow) {
+ // Setup the trace config.
+ perfetto::TraceConfig cfg;
+ cfg.set_duration_ms(500);
+ cfg.add_buffers()->set_size_kb(1024);
+ auto* ds_cfg = cfg.add_data_sources()->mutable_config();
+ ds_cfg->set_name("track_event");
+
+ auto* tracing_session = NewTrace(cfg);
+ tracing_session->get()->StartBlocking();
+
+ const uint64_t flow_id = 1234;
+ {
+ TRACE_EVENT_WITH_FLOW1("cat", "LatencyInfo.Flow", TRACE_ID_GLOBAL(flow_id),
+ TRACE_EVENT_FLAG_FLOW_OUT, "step", "Begin");
+ }
+
+ {
+ TRACE_EVENT_WITH_FLOW2("cat", "LatencyInfo.Flow", TRACE_ID_GLOBAL(flow_id),
+ TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
+ "step", "Middle", "value", false);
+ }
+
+ {
+ TRACE_EVENT_WITH_FLOW1("cat", "LatencyInfo.Flow", TRACE_ID_GLOBAL(flow_id),
+ TRACE_EVENT_FLAG_FLOW_IN, "step", "End");
+ }
+
+ perfetto::TrackEvent::Flush();
+ tracing_session->get()->StopBlocking();
+ auto slices = ReadSlicesFromTrace(tracing_session->get());
+ EXPECT_THAT(slices,
+ ElementsAre("B(bind_id=1234)(flow_direction=2):cat.LatencyInfo."
+ "Flow(step=(string)Begin)",
+ "E",
+ "B(bind_id=1234)(flow_direction=3):cat.LatencyInfo."
+ "Flow(step=(string)Middle,value=(bool)0)",
+ "E",
+ "B(bind_id=1234)(flow_direction=1):cat.LatencyInfo."
+ "Flow(step=(string)End)",
+ "E"));
+}
+
} // namespace
PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(MockDataSource);
diff --git a/src/tracing/track_event_legacy.cc b/src/tracing/track_event_legacy.cc
new file mode 100644
index 0000000..1628a99
--- /dev/null
+++ b/src/tracing/track_event_legacy.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "perfetto/tracing/track_event_legacy.h"
+
+#include "perfetto/tracing/track.h"
+
+namespace perfetto {
+namespace legacy {
+
+template <>
+bool ConvertThreadId(const PerfettoLegacyCurrentThreadId&,
+ uint64_t*,
+ int32_t*,
+ int32_t*) {
+ // No need to override anything for events on to the current thread.
+ return false;
+}
+
+} // namespace legacy
+
+namespace internal {
+
+void LegacyTraceId::Write(protos::pbzero::TrackEvent::LegacyEvent* event,
+ uint32_t event_flags) const {
+ // Legacy flow events always use bind_id.
+ if (event_flags & (TRACE_EVENT_FLAG_FLOW_OUT | TRACE_EVENT_FLAG_FLOW_IN)) {
+ // Flow bind_ids don't have scopes, so we need to mangle in-process ones to
+ // avoid collisions.
+ if (id_flags_ & TRACE_EVENT_FLAG_HAS_LOCAL_ID) {
+ event->set_bind_id(raw_id_ ^ ProcessTrack::Current().uuid);
+ } else {
+ event->set_bind_id(raw_id_);
+ }
+ return;
+ }
+
+ uint32_t scope_flags =
+ id_flags_ & (TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_HAS_LOCAL_ID |
+ TRACE_EVENT_FLAG_HAS_GLOBAL_ID);
+ switch (scope_flags) {
+ case TRACE_EVENT_FLAG_HAS_ID:
+ event->set_unscoped_id(raw_id_);
+ break;
+ case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
+ event->set_local_id(raw_id_);
+ break;
+ case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
+ event->set_global_id(raw_id_);
+ break;
+ }
+ if (scope_)
+ event->set_id_scope(scope_);
+}
+
+} // namespace internal
+} // namespace perfetto
diff --git a/test/end_to_end_integrationtest.cc b/test/end_to_end_integrationtest.cc
index 52947df..92e5416 100644
--- a/test/end_to_end_integrationtest.cc
+++ b/test/end_to_end_integrationtest.cc
@@ -414,7 +414,7 @@
const uint32_t kTestTimeoutMs = 30000;
TraceConfig trace_config;
- trace_config.add_buffers()->set_size_kb(16);
+ trace_config.add_buffers()->set_size_kb(32);
trace_config.set_duration_ms(kTestTimeoutMs);
auto* ds_config = trace_config.add_data_sources()->mutable_config();
diff --git a/test/producer_socket_fuzzer.cc b/test/producer_socket_fuzzer.cc
index 46904df..5f7bc3a 100644
--- a/test/producer_socket_fuzzer.cc
+++ b/test/producer_socket_fuzzer.cc
@@ -37,8 +37,7 @@
void OnConnect(base::UnixSocket* self, bool connected) override {
PERFETTO_CHECK(connected && self->is_connected());
- self->Send(data_, size_, self->fd(),
- base::UnixSocket::BlockingMode::kBlocking);
+ self->Send(data_, size_, self->fd());
data_sent_();
}
diff --git a/test/trace_processor/track_event_tracks.textproto b/test/trace_processor/track_event_tracks.textproto
index b76fa36..65c8401 100644
--- a/test/trace_processor/track_event_tracks.textproto
+++ b/test/trace_processor/track_event_tracks.textproto
@@ -269,10 +269,26 @@
track_event {
track_uuid: 0
categories: "cat"
- name: "event1_on_t1"
+ name: "event2_on_p2"
legacy_event {
phase: 73 # 'I'
instant_event_scope: 2 # Process scope
}
}
}
+
+# And pid/tid overrides take effect even for TrackEvent type events.
+packet {
+ trusted_packet_sequence_id: 1
+ timestamp: 32000
+ track_event {
+ track_uuid: 0
+ categories: "cat"
+ name: "event2_on_t4"
+ type: 3
+ legacy_event {
+ pid_override: 5
+ tid_override: 4
+ }
+ }
+}
diff --git a/test/trace_processor/track_event_tracks_slices.out b/test/trace_processor/track_event_tracks_slices.out
index d7119df..ce31b25 100644
--- a/test/trace_processor/track_event_tracks_slices.out
+++ b/test/trace_processor/track_event_tracks_slices.out
@@ -10,4 +10,5 @@
"[NULL]","p2","[NULL]","[NULL]",21000,0,"cat","event1_on_p2",7
"[NULL]","[NULL]","t4","p2",22000,0,"cat","event1_on_t4",0
"Default Track","[NULL]","[NULL]","[NULL]",30000,0,"cat","event1_on_t1",0
-"[NULL]","p2","[NULL]","[NULL]",31000,0,"cat","event1_on_t1",6
+"[NULL]","p2","[NULL]","[NULL]",31000,0,"cat","event2_on_p2",6
+"[NULL]","[NULL]","t4","p2",32000,0,"cat","event2_on_t4",0
diff --git a/ui/deploy b/ui/deploy
index 863964b..d10d188 100755
--- a/ui/deploy
+++ b/ui/deploy
@@ -81,7 +81,7 @@
api_version: 1
threadsafe: yes
instance_class: B1
-default_expiration: "1s"
+default_expiration: "1m"
manual_scaling:
instances: 1
handlers:
diff --git a/ui/src/frontend/service_worker_controller.ts b/ui/src/frontend/service_worker_controller.ts
index 1c41ec8..55bcfdc 100644
--- a/ui/src/frontend/service_worker_controller.ts
+++ b/ui/src/frontend/service_worker_controller.ts
@@ -18,6 +18,8 @@
// The actual service worker code is in src/service_worker.
// Design doc: http://go/perfetto-offline.
+import {reportError} from '../base/logging';
+
import {globals} from './globals';
// We use a dedicated |caches| object to share a global boolean beween the main
@@ -59,29 +61,12 @@
if (sw !== this._initialWorker && this._initialWorker) {
globals.frontendLocalState.newVersionAvailable = true;
}
- } else if (
- sw.state === 'redundant' && sw !== this._initialWorker &&
- !this._bypassed) {
- // Note that upon updates, the initial SW will hit the 'redundant'
- // state by design once the new one is activated. That's why the
- // != _initialWorker above.
-
- // In the other cases, the 'redundant' state signals a failure in the
- // SW installation. This can happen, for instance, if the subresource
- // integrity check fails. In that case there doesn't seem to be any easy
- // way to get the failure output from the service worker.
- // TODO(primiano): This seems to fail in some weird ways, suppress error
- // until the root cause is found (b/148675312).
- // reportError(
- // 'Service Worker installation failed.\n' +
- // 'Please attach the JavaScript console output to the bug.');
}
}
monitorWorker(sw: ServiceWorker|null) {
if (!sw) return;
- // TODO(primiano): b/148675312
- // sw.addEventListener('error', (e) => reportError(e));
+ sw.addEventListener('error', (e) => reportError(e));
sw.addEventListener('statechange', () => this.onStateChange(sw));
this.onStateChange(sw); // Trigger updates for the current state.
}
diff --git a/ui/src/service_worker/service_worker.ts b/ui/src/service_worker/service_worker.ts
index 98e27d6..3289352 100644
--- a/ui/src/service_worker/service_worker.ts
+++ b/ui/src/service_worker/service_worker.ts
@@ -48,13 +48,119 @@
declare var self: ServiceWorkerGlobalScope;
+const CACHE_NAME = 'dist-' + UI_DIST_MAP.hex_digest.substr(0, 16);
const LOG_TAG = `ServiceWorker[${UI_DIST_MAP.hex_digest.substr(0, 16)}]: `;
-// TODO(primiano): Temporarily disabling service worker because our default
-// cache policy (1d) made the response unreliable (b/148675312).
-self.addEventListener('install', () => {
- self.skipWaiting();
+function shouldHandleHttpRequest(req: Request): boolean {
+ // Suppress warning: 'only-if-cached' can be set only with 'same-origin' mode.
+ // This seems to be a chromium bug. An internal code search suggests this is a
+ // socially acceptable workaround.
+ if (req.cache === 'only-if-cached' && req.mode !== 'same-origin') {
+ return false;
+ }
+
+ const url = new URL(req.url);
+ return req.method === 'GET' && url.origin === self.location.origin;
+}
+
+async function handleHttpRequest(req: Request): Promise<Response> {
+ if (!shouldHandleHttpRequest(req)) {
+ throw new Error(LOG_TAG + `${req.url} shouldn't have been handled`);
+ }
+
+ // We serve from the cache even if req.cache == 'no-cache'. It's a bit
+ // contra-intuitive but it's the most consistent option. If the user hits the
+ // reload button*, the browser requests the "/" index with a 'no-cache' fetch.
+ // However all the other resources (css, js, ...) are requested with a
+ // 'default' fetch (this is just how Chrome works, it's not us). If we bypass
+ // the service worker cache when we get a 'no-cache' request, we can end up in
+ // an inconsistent state where the index.html is more recent than the other
+ // resources, which is undesirable.
+ // * Only Ctrl+R. Ctrl+Shift+R will always bypass service-worker for all the
+ // requests (index.html and the rest) made in that tab.
+ try {
+ const cacheOps = {cacheName: CACHE_NAME} as CacheQueryOptions;
+ const cachedRes = await caches.match(req, cacheOps);
+ if (cachedRes) {
+ console.debug(LOG_TAG + `serving ${req.url} from cache`);
+ return cachedRes;
+ }
+ console.warn(LOG_TAG + `cache miss on ${req.url}`);
+ } catch (exc) {
+ console.error(LOG_TAG + `Cache request failed for ${req.url}`, exc);
+ }
+
+ // In any other case, just propagate the fetch on the network, which is the
+ // safe behavior.
+ console.debug(LOG_TAG + `falling back on network fetch() for ${req.url}`);
+ return fetch(req);
+}
+
+// The install() event is fired:
+// - The very first time the site is visited, after frontend/index.ts has
+// executed the serviceWorker.register() method.
+// - *After* the site is loaded, if the service_worker.js code
+// has changed (because of the hashes in UI_DIST_MAP, service_worker.js will
+// change if anything in the UI has changed).
+self.addEventListener('install', event => {
+ const doInstall = async () => {
+ if (await caches.has('BYPASS_SERVICE_WORKER')) {
+ // Throw will prevent the installation.
+ throw new Error(LOG_TAG + 'skipping installation, bypass enabled');
+ }
+ console.log(LOG_TAG + 'installation started');
+ const cache = await caches.open(CACHE_NAME);
+ const urlsToCache: RequestInfo[] = [];
+ for (const [file, integrity] of Object.entries(UI_DIST_MAP.files)) {
+ const reqOpts:
+ RequestInit = {cache: 'reload', mode: 'same-origin', integrity};
+ urlsToCache.push(new Request(file, reqOpts));
+ if (file === 'index.html' && location.host !== 'storage.googleapis.com') {
+ // Disable cachinig of '/' for cases where the UI is hosted on GCS.
+ // GCS doesn't support auto indexes. GCS returns a 404 page on / that
+ // fails the integrity check.
+ urlsToCache.push(new Request('/', reqOpts));
+ }
+ }
+ await cache.addAll(urlsToCache);
+ console.log(LOG_TAG + 'installation completed');
+
+ // skipWaiting() still waits for the install to be complete. Without this
+ // call, the new version would be activated only when all tabs are closed.
+ // Instead, we ask to activate it immediately. This is safe because each
+ // service worker version uses a different cache named after the SHA256 of
+ // the contents. When the old version is activated, the activate() method
+ // below will evict the cache for the old versions. If there is an old still
+ // opened, any further request from that tab will be a cache-miss and go
+ // through the network (which is inconsitent, but not the end of the world).
+ self.skipWaiting();
+ };
+ event.waitUntil(doInstall());
});
-self.console.debug(LOG_TAG + 'disabled due to b/148675312');
\ No newline at end of file
+self.addEventListener('activate', (event) => {
+ console.warn(LOG_TAG + 'activated');
+ const doActivate = async () => {
+ // Clear old caches.
+ for (const key of await caches.keys()) {
+ if (key !== CACHE_NAME) await caches.delete(key);
+ }
+ // This makes a difference only for the very first load, when no service
+ // worker is present. In all the other cases the skipWaiting() will hot-swap
+ // the active service worker anyways.
+ await self.clients.claim();
+ };
+ event.waitUntil(doActivate());
+});
+
+self.addEventListener('fetch', event => {
+ // The early return here will cause the browser to fall back on standard
+ // network-based fetch.
+ if (!shouldHandleHttpRequest(event.request)) {
+ console.debug(LOG_TAG + `serving ${event.request.url} from network`);
+ return;
+ }
+
+ event.respondWith(handleHttpRequest(event.request));
+});