diff --git a/support/ebpf/dotnet_tracer.ebpf.c b/support/ebpf/dotnet_tracer.ebpf.c index 7ebac864..9b6ca040 100644 --- a/support/ebpf/dotnet_tracer.ebpf.c +++ b/support/ebpf/dotnet_tracer.ebpf.c @@ -244,7 +244,7 @@ ErrorCode unwind_one_dotnet_frame(PerCPURecord *record, DotnetProcInfo *vi, bool // unwind_dotnet is the entry point for tracing when invoked from the native tracer // or interpreter dispatcher. It does not reset the trace object and will append the // dotnet stack frames to the trace object for the current CPU. -SEC("perf_event/unwind_dotnet") +static inline __attribute__((__always_inline__)) int unwind_dotnet(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) { @@ -289,3 +289,4 @@ int unwind_dotnet(struct pt_regs *ctx) { DEBUG_PRINT("dotnet: tail call for next frame unwinder (%d) failed", unwinder); return -1; } +MULTI_USE_FUNC(unwind_dotnet) diff --git a/support/ebpf/hotspot_tracer.ebpf.c b/support/ebpf/hotspot_tracer.ebpf.c index 9ae1fdc7..54290e11 100644 --- a/support/ebpf/hotspot_tracer.ebpf.c +++ b/support/ebpf/hotspot_tracer.ebpf.c @@ -890,7 +890,7 @@ static ErrorCode hotspot_unwind_one_frame(PerCPURecord *record, HotspotProcInfo // unwind_hotspot is the entry point for tracing when invoked from the native tracer // and it recursive unwinds all HotSpot frames and then jumps back to unwind further // native frames that follow. -SEC("perf_event/unwind_hotspot") +static inline __attribute__((__always_inline__)) int unwind_hotspot(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) @@ -927,3 +927,4 @@ int unwind_hotspot(struct pt_regs *ctx) { DEBUG_PRINT("jvm: tail call for next frame unwinder (%d) failed", unwinder); return -1; } +MULTI_USE_FUNC(unwind_hotspot) diff --git a/support/ebpf/interpreter_dispatcher.ebpf.c b/support/ebpf/interpreter_dispatcher.ebpf.c index 1589a60d..fcb4b329 100644 --- a/support/ebpf/interpreter_dispatcher.ebpf.c +++ b/support/ebpf/interpreter_dispatcher.ebpf.c @@ -172,7 +172,8 @@ void maybe_add_apm_info(Trace *trace) { trace->apm_transaction_id.as_int, corr_buf.trace_flags); } -SEC("perf_event/unwind_stop") +// unwind_stop is the tail call destination for PROG_UNWIND_STOP. +static inline __attribute__((__always_inline__)) int unwind_stop(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) @@ -238,6 +239,7 @@ int unwind_stop(struct pt_regs *ctx) { return 0; } +MULTI_USE_FUNC(unwind_stop) char _license[] SEC("license") = "GPL"; // this number will be interpreted by the elf loader diff --git a/support/ebpf/native_stack_trace.ebpf.c b/support/ebpf/native_stack_trace.ebpf.c index bfd7f6f6..964768c4 100644 --- a/support/ebpf/native_stack_trace.ebpf.c +++ b/support/ebpf/native_stack_trace.ebpf.c @@ -575,7 +575,8 @@ static ErrorCode unwind_one_frame(u64 pid, u32 frame_idx, struct UnwindState *st #error unsupported architecture #endif -SEC("perf_event/unwind_native") +// unwind_native is the tail call destination for PROG_UNWIND_NATIVE. +static inline __attribute__((__always_inline__)) int unwind_native(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) @@ -636,3 +637,4 @@ int native_tracer_entry(struct bpf_perf_event_data *ctx) { u64 ts = bpf_ktime_get_ns(); return collect_trace((struct pt_regs*) &ctx->regs, TRACE_SAMPLING, pid, tid, ts, 0); } +MULTI_USE_FUNC(unwind_native) diff --git a/support/ebpf/perl_tracer.ebpf.c b/support/ebpf/perl_tracer.ebpf.c index c498341b..1e143468 100644 --- a/support/ebpf/perl_tracer.ebpf.c +++ b/support/ebpf/perl_tracer.ebpf.c @@ -356,7 +356,7 @@ int walk_perl_stack(PerCPURecord *record, const PerlProcInfo *perlinfo) { // unwind_perl is the entry point for tracing when invoked from the native tracer // or interpreter dispatcher. It does not reset the trace object and will append the // Perl stack frames to the trace object for the current CPU. -SEC("perf_event/unwind_perl") +static inline __attribute__((__always_inline__)) int unwind_perl(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) { @@ -426,3 +426,4 @@ int unwind_perl(struct pt_regs *ctx) { tail_call(ctx, unwinder); return -1; } +MULTI_USE_FUNC(unwind_perl) diff --git a/support/ebpf/php_tracer.ebpf.c b/support/ebpf/php_tracer.ebpf.c index 677cd185..506f63e3 100644 --- a/support/ebpf/php_tracer.ebpf.c +++ b/support/ebpf/php_tracer.ebpf.c @@ -182,7 +182,8 @@ int walk_php_stack(PerCPURecord *record, PHPProcInfo *phpinfo, bool is_jitted) { return unwinder; } -SEC("perf_event/unwind_php") +// unwind_php is the tail call destination for PROG_UNWIND_PHP. +static inline __attribute__((__always_inline__)) int unwind_php(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) @@ -239,3 +240,4 @@ int unwind_php(struct pt_regs *ctx) { tail_call(ctx, unwinder); return -1; } +MULTI_USE_FUNC(unwind_php) diff --git a/support/ebpf/python_tracer.ebpf.c b/support/ebpf/python_tracer.ebpf.c index d99147be..9d03375f 100644 --- a/support/ebpf/python_tracer.ebpf.c +++ b/support/ebpf/python_tracer.ebpf.c @@ -276,7 +276,7 @@ ErrorCode get_PyFrame(const PyProcInfo *pyinfo, void **frame) { // unwind_python is the entry point for tracing when invoked from the native tracer // or interpreter dispatcher. It does not reset the trace object and will append the // Python stack frames to the trace object for the current CPU. -SEC("perf_event/unwind_python") +static inline __attribute__((__always_inline__)) int unwind_python(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) @@ -318,3 +318,4 @@ int unwind_python(struct pt_regs *ctx) { tail_call(ctx, unwinder); return -1; } +MULTI_USE_FUNC(unwind_python) diff --git a/support/ebpf/ruby_tracer.ebpf.c b/support/ebpf/ruby_tracer.ebpf.c index 41ecacaa..57fa1101 100644 --- a/support/ebpf/ruby_tracer.ebpf.c +++ b/support/ebpf/ruby_tracer.ebpf.c @@ -216,7 +216,8 @@ ErrorCode walk_ruby_stack(PerCPURecord *record, const RubyProcInfo *rubyinfo, return ERR_OK; } -SEC("perf_event/unwind_ruby") +// unwind_ruby is the tail call destination for PROG_UNWIND_RUBY. +static inline __attribute__((__always_inline__)) int unwind_ruby(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) @@ -273,3 +274,4 @@ int unwind_ruby(struct pt_regs *ctx) { tail_call(ctx, unwinder); return -1; } +MULTI_USE_FUNC(unwind_ruby) diff --git a/support/ebpf/tracemgmt.h b/support/ebpf/tracemgmt.h index 1a5b8d4e..27c00ba6 100644 --- a/support/ebpf/tracemgmt.h +++ b/support/ebpf/tracemgmt.h @@ -10,6 +10,20 @@ #include "types.h" #include "errors.h" +// MULTI_USE_FUNC generates perf event and kprobe eBPF programs +// for a given function. +#define MULTI_USE_FUNC(func_name) \ + SEC("perf_event/"#func_name) \ + int perf_##func_name(struct pt_regs *ctx) { \ + return func_name(ctx); \ + } \ + \ + SEC("kprobe/"#func_name) \ + int kprobe_##func_name(struct pt_regs *ctx) { \ + return func_name(ctx); \ + } + + // increment_metric increments the value of the given metricID by 1 static inline __attribute__((__always_inline__)) void increment_metric(u32 metricID) { diff --git a/support/ebpf/tracer.ebpf.release.amd64 b/support/ebpf/tracer.ebpf.release.amd64 index b286090e..b85f454d 100644 Binary files a/support/ebpf/tracer.ebpf.release.amd64 and b/support/ebpf/tracer.ebpf.release.amd64 differ diff --git a/support/ebpf/tracer.ebpf.release.arm64 b/support/ebpf/tracer.ebpf.release.arm64 index 536f7c72..71cfea1a 100644 Binary files a/support/ebpf/tracer.ebpf.release.arm64 and b/support/ebpf/tracer.ebpf.release.arm64 differ diff --git a/support/ebpf/v8_tracer.ebpf.c b/support/ebpf/v8_tracer.ebpf.c index d6da2a71..1faa1834 100644 --- a/support/ebpf/v8_tracer.ebpf.c +++ b/support/ebpf/v8_tracer.ebpf.c @@ -284,7 +284,7 @@ ErrorCode unwind_one_v8_frame(PerCPURecord *record, V8ProcInfo *vi, bool top) { // unwind_v8 is the entry point for tracing when invoked from the native tracer // or interpreter dispatcher. It does not reset the trace object and will append the // V8 stack frames to the trace object for the current CPU. -SEC("perf_event/unwind_v8") +static inline __attribute__((__always_inline__)) int unwind_v8(struct pt_regs *ctx) { PerCPURecord *record = get_per_cpu_record(); if (!record) { @@ -328,3 +328,4 @@ int unwind_v8(struct pt_regs *ctx) { DEBUG_PRINT("v8: tail call for next frame unwinder (%d) failed", unwinder); return -1; } +MULTI_USE_FUNC(unwind_v8) diff --git a/tracer/tracer.go b/tracer/tracer.go index 4654d0fb..7bbe5481 100644 --- a/tracer/tracer.go +++ b/tracer/tracer.go @@ -602,9 +602,14 @@ func loadPerfUnwinders(coll *cebpf.CollectionSpec, ebpfProgs map[string]*cebpf.P continue } - progSpec, ok := coll.Programs[unwindProg.name] + unwindProgName := unwindProg.name + if !unwindProg.noTailCallTarget { + unwindProgName = "perf_" + unwindProg.name + } + + progSpec, ok := coll.Programs[unwindProgName] if !ok { - return fmt.Errorf("program %s does not exist", unwindProg.name) + return fmt.Errorf("program %s does not exist", unwindProgName) } if err := loadProgram(ebpfProgs, tailcallMap, unwindProg.progID, progSpec, @@ -666,9 +671,14 @@ func loadKProbeUnwinders(coll *cebpf.CollectionSpec, ebpfProgs map[string]*cebpf continue } - progSpec, ok := coll.Programs[unwindProg.name] + unwindProgName := unwindProg.name + if !unwindProg.noTailCallTarget { + unwindProgName = "kprobe_" + unwindProg.name + } + + progSpec, ok := coll.Programs[unwindProgName] if !ok { - return fmt.Errorf("program %s does not exist", unwindProg.name) + return fmt.Errorf("program %s does not exist", unwindProgName) } // Replace the prog array for the tail calls. @@ -679,15 +689,6 @@ func loadKProbeUnwinders(coll *cebpf.CollectionSpec, ebpfProgs map[string]*cebpf } } - // All the tail call targets are perf event programs. To be able to tail call them - // from a kprobe, adjust their specification. - if !unwindProg.noTailCallTarget { - // Adjust program type - progSpec.Type = cebpf.Kprobe - - // Adjust program name for easier debugging - progSpec.Name = "kp_" + progSpec.Name - } if err := loadProgram(ebpfProgs, tailcallMap, unwindProg.progID, progSpec, programOptions, unwindProg.noTailCallTarget); err != nil { return err