20
20
#include <bpf/bpf.h>
21
21
#include <bpf/libbpf.h>
22
22
#include <bpf/btf.h>
23
- #ifdef HAVE_BPF_SKEL
24
- #include "bpf_skel/augmented_raw_syscalls.skel.h"
25
- #endif
26
23
#endif
27
24
#include "util/bpf_map.h"
28
25
#include "util/rlimit.h"
@@ -155,9 +152,6 @@ struct trace {
155
152
* bpf_output ;
156
153
} events ;
157
154
} syscalls ;
158
- #ifdef HAVE_BPF_SKEL
159
- struct augmented_raw_syscalls_bpf * skel ;
160
- #endif
161
155
#ifdef HAVE_LIBBPF_SUPPORT
162
156
struct btf * btf ;
163
157
#endif
@@ -3703,7 +3697,10 @@ static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
3703
3697
goto out ;
3704
3698
}
3705
3699
3706
- #ifdef HAVE_BPF_SKEL
3700
+ #ifdef HAVE_LIBBPF_SUPPORT
3701
+
3702
+ static struct bpf_program * unaugmented_prog ;
3703
+
3707
3704
static int syscall_arg_fmt__cache_btf_struct (struct syscall_arg_fmt * arg_fmt , struct btf * btf , char * type )
3708
3705
{
3709
3706
int id ;
@@ -3721,46 +3718,28 @@ static int syscall_arg_fmt__cache_btf_struct(struct syscall_arg_fmt *arg_fmt, st
3721
3718
return 0 ;
3722
3719
}
3723
3720
3724
- static struct bpf_program * trace__find_bpf_program_by_title (struct trace * trace , const char * name )
3725
- {
3726
- struct bpf_program * pos , * prog = NULL ;
3727
- const char * sec_name ;
3728
-
3729
- if (trace -> skel -> obj == NULL )
3730
- return NULL ;
3731
-
3732
- bpf_object__for_each_program (pos , trace -> skel -> obj ) {
3733
- sec_name = bpf_program__section_name (pos );
3734
- if (sec_name && !strcmp (sec_name , name )) {
3735
- prog = pos ;
3736
- break ;
3737
- }
3738
- }
3739
-
3740
- return prog ;
3741
- }
3742
-
3743
- static struct bpf_program * trace__find_syscall_bpf_prog (struct trace * trace , struct syscall * sc ,
3721
+ static struct bpf_program * trace__find_syscall_bpf_prog (struct trace * trace __maybe_unused ,
3722
+ struct syscall * sc ,
3744
3723
const char * prog_name , const char * type )
3745
3724
{
3746
3725
struct bpf_program * prog ;
3747
3726
3748
3727
if (prog_name == NULL ) {
3749
3728
char default_prog_name [256 ];
3750
3729
scnprintf (default_prog_name , sizeof (default_prog_name ), "tp/syscalls/sys_%s_%s" , type , sc -> name );
3751
- prog = trace__find_bpf_program_by_title ( trace , default_prog_name );
3730
+ prog = augmented_syscalls__find_by_title ( default_prog_name );
3752
3731
if (prog != NULL )
3753
3732
goto out_found ;
3754
3733
if (sc -> fmt && sc -> fmt -> alias ) {
3755
3734
scnprintf (default_prog_name , sizeof (default_prog_name ), "tp/syscalls/sys_%s_%s" , type , sc -> fmt -> alias );
3756
- prog = trace__find_bpf_program_by_title ( trace , default_prog_name );
3735
+ prog = augmented_syscalls__find_by_title ( default_prog_name );
3757
3736
if (prog != NULL )
3758
3737
goto out_found ;
3759
3738
}
3760
3739
goto out_unaugmented ;
3761
3740
}
3762
3741
3763
- prog = trace__find_bpf_program_by_title ( trace , prog_name );
3742
+ prog = augmented_syscalls__find_by_title ( prog_name );
3764
3743
3765
3744
if (prog != NULL ) {
3766
3745
out_found :
@@ -3770,7 +3749,7 @@ static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, str
3770
3749
pr_debug ("Couldn't find BPF prog \"%s\" to associate with syscalls:sys_%s_%s, not augmenting it\n" ,
3771
3750
prog_name , type , sc -> name );
3772
3751
out_unaugmented :
3773
- return trace -> skel -> progs . syscall_unaugmented ;
3752
+ return unaugmented_prog ;
3774
3753
}
3775
3754
3776
3755
static void trace__init_syscall_bpf_progs (struct trace * trace , int e_machine , int id )
@@ -3787,13 +3766,13 @@ static void trace__init_syscall_bpf_progs(struct trace *trace, int e_machine, in
3787
3766
static int trace__bpf_prog_sys_enter_fd (struct trace * trace , int e_machine , int id )
3788
3767
{
3789
3768
struct syscall * sc = trace__syscall_info (trace , NULL , e_machine , id );
3790
- return sc ? bpf_program__fd (sc -> bpf_prog .sys_enter ) : bpf_program__fd (trace -> skel -> progs . syscall_unaugmented );
3769
+ return sc ? bpf_program__fd (sc -> bpf_prog .sys_enter ) : bpf_program__fd (unaugmented_prog );
3791
3770
}
3792
3771
3793
3772
static int trace__bpf_prog_sys_exit_fd (struct trace * trace , int e_machine , int id )
3794
3773
{
3795
3774
struct syscall * sc = trace__syscall_info (trace , NULL , e_machine , id );
3796
- return sc ? bpf_program__fd (sc -> bpf_prog .sys_exit ) : bpf_program__fd (trace -> skel -> progs . syscall_unaugmented );
3775
+ return sc ? bpf_program__fd (sc -> bpf_prog .sys_exit ) : bpf_program__fd (unaugmented_prog );
3797
3776
}
3798
3777
3799
3778
static int trace__bpf_sys_enter_beauty_map (struct trace * trace , int e_machine , int key , unsigned int * beauty_array )
@@ -3903,7 +3882,7 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
3903
3882
bool is_candidate = false;
3904
3883
3905
3884
if (pair == NULL || pair -> id == sc -> id ||
3906
- pair -> bpf_prog .sys_enter == trace -> skel -> progs . syscall_unaugmented )
3885
+ pair -> bpf_prog .sys_enter == unaugmented_prog )
3907
3886
continue ;
3908
3887
3909
3888
for (field = sc -> args , candidate_field = pair -> args ;
@@ -3969,7 +3948,7 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
3969
3948
*/
3970
3949
if (pair_prog == NULL ) {
3971
3950
pair_prog = trace__find_syscall_bpf_prog (trace , pair , pair -> fmt ? pair -> fmt -> bpf_prog_name .sys_enter : NULL , "enter" );
3972
- if (pair_prog == trace -> skel -> progs . syscall_unaugmented )
3951
+ if (pair_prog == unaugmented_prog )
3973
3952
goto next_candidate ;
3974
3953
}
3975
3954
@@ -3985,12 +3964,17 @@ static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace
3985
3964
3986
3965
static int trace__init_syscalls_bpf_prog_array_maps (struct trace * trace , int e_machine )
3987
3966
{
3988
- int map_enter_fd = bpf_map__fd ( trace -> skel -> maps . syscalls_sys_enter ) ;
3989
- int map_exit_fd = bpf_map__fd ( trace -> skel -> maps . syscalls_sys_exit ) ;
3990
- int beauty_map_fd = bpf_map__fd ( trace -> skel -> maps . beauty_map_enter ) ;
3967
+ int map_enter_fd ;
3968
+ int map_exit_fd ;
3969
+ int beauty_map_fd ;
3991
3970
int err = 0 ;
3992
3971
unsigned int beauty_array [6 ];
3993
3972
3973
+ if (augmented_syscalls__get_map_fds (& map_enter_fd , & map_exit_fd , & beauty_map_fd ) < 0 )
3974
+ return -1 ;
3975
+
3976
+ unaugmented_prog = augmented_syscalls__unaugmented ();
3977
+
3994
3978
for (int i = 0 , num_idx = syscalltbl__num_idx (e_machine ); i < num_idx ; ++ i ) {
3995
3979
int prog_fd , key = syscalltbl__id_at_idx (e_machine , i );
3996
3980
@@ -4060,7 +4044,7 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_m
4060
4044
* For now we're just reusing the sys_enter prog, and if it
4061
4045
* already has an augmenter, we don't need to find one.
4062
4046
*/
4063
- if (sc -> bpf_prog .sys_enter != trace -> skel -> progs . syscall_unaugmented )
4047
+ if (sc -> bpf_prog .sys_enter != unaugmented_prog )
4064
4048
continue ;
4065
4049
4066
4050
/*
@@ -4085,7 +4069,13 @@ static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace, int e_m
4085
4069
4086
4070
return err ;
4087
4071
}
4088
- #endif // HAVE_BPF_SKEL
4072
+ #else // !HAVE_LIBBPF_SUPPORT
4073
+ static int trace__init_syscalls_bpf_prog_array_maps (struct trace * trace __maybe_unused ,
4074
+ int e_machine __maybe_unused )
4075
+ {
4076
+ return -1 ;
4077
+ }
4078
+ #endif // HAVE_LIBBPF_SUPPORT
4089
4079
4090
4080
static int trace__set_ev_qualifier_filter (struct trace * trace )
4091
4081
{
@@ -4094,24 +4084,6 @@ static int trace__set_ev_qualifier_filter(struct trace *trace)
4094
4084
return 0 ;
4095
4085
}
4096
4086
4097
- static int bpf_map__set_filter_pids (struct bpf_map * map __maybe_unused ,
4098
- size_t npids __maybe_unused , pid_t * pids __maybe_unused )
4099
- {
4100
- int err = 0 ;
4101
- #ifdef HAVE_LIBBPF_SUPPORT
4102
- bool value = true;
4103
- int map_fd = bpf_map__fd (map );
4104
- size_t i ;
4105
-
4106
- for (i = 0 ; i < npids ; ++ i ) {
4107
- err = bpf_map_update_elem (map_fd , & pids [i ], & value , BPF_ANY );
4108
- if (err )
4109
- break ;
4110
- }
4111
- #endif
4112
- return err ;
4113
- }
4114
-
4115
4087
static int trace__set_filter_loop_pids (struct trace * trace )
4116
4088
{
4117
4089
unsigned int nr = 1 , err ;
@@ -4140,8 +4112,8 @@ static int trace__set_filter_loop_pids(struct trace *trace)
4140
4112
thread__put (thread );
4141
4113
4142
4114
err = evlist__append_tp_filter_pids (trace -> evlist , nr , pids );
4143
- if (!err && trace -> filter_pids . map )
4144
- err = bpf_map__set_filter_pids ( trace -> filter_pids . map , nr , pids );
4115
+ if (!err )
4116
+ err = augmented_syscalls__set_filter_pids ( nr , pids );
4145
4117
4146
4118
return err ;
4147
4119
}
@@ -4158,8 +4130,8 @@ static int trace__set_filter_pids(struct trace *trace)
4158
4130
if (trace -> filter_pids .nr > 0 ) {
4159
4131
err = evlist__append_tp_filter_pids (trace -> evlist , trace -> filter_pids .nr ,
4160
4132
trace -> filter_pids .entries );
4161
- if (!err && trace -> filter_pids . map ) {
4162
- err = bpf_map__set_filter_pids ( trace -> filter_pids . map , trace -> filter_pids .nr ,
4133
+ if (!err ) {
4134
+ err = augmented_syscalls__set_filter_pids ( trace -> filter_pids .nr ,
4163
4135
trace -> filter_pids .entries );
4164
4136
}
4165
4137
} else if (perf_thread_map__pid (trace -> evlist -> core .threads , 0 ) == -1 ) {
@@ -4482,41 +4454,18 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
4482
4454
err = evlist__open (evlist );
4483
4455
if (err < 0 )
4484
4456
goto out_error_open ;
4485
- #ifdef HAVE_BPF_SKEL
4486
- if (trace -> syscalls .events .bpf_output ) {
4487
- struct perf_cpu cpu ;
4488
4457
4489
- /*
4490
- * Set up the __augmented_syscalls__ BPF map to hold for each
4491
- * CPU the bpf-output event's file descriptor.
4492
- */
4493
- perf_cpu_map__for_each_cpu (cpu , i , trace -> syscalls .events .bpf_output -> core .cpus ) {
4494
- int mycpu = cpu .cpu ;
4495
-
4496
- bpf_map__update_elem (trace -> skel -> maps .__augmented_syscalls__ ,
4497
- & mycpu , sizeof (mycpu ),
4498
- xyarray__entry (trace -> syscalls .events .bpf_output -> core .fd ,
4499
- mycpu , 0 ),
4500
- sizeof (__u32 ), BPF_ANY );
4501
- }
4502
- }
4458
+ augmented_syscalls__setup_bpf_output ();
4503
4459
4504
- if (trace -> skel )
4505
- trace -> filter_pids .map = trace -> skel -> maps .pids_filtered ;
4506
- #endif
4507
4460
err = trace__set_filter_pids (trace );
4508
4461
if (err < 0 )
4509
4462
goto out_error_mem ;
4510
4463
4511
- #ifdef HAVE_BPF_SKEL
4512
- if (trace -> skel && trace -> skel -> progs .sys_enter ) {
4513
- /*
4514
- * TODO: Initialize for all host binary machine types, not just
4515
- * those matching the perf binary.
4516
- */
4517
- trace__init_syscalls_bpf_prog_array_maps (trace , EM_HOST );
4518
- }
4519
- #endif
4464
+ /*
4465
+ * TODO: Initialize for all host binary machine types, not just
4466
+ * those matching the perf binary.
4467
+ */
4468
+ trace__init_syscalls_bpf_prog_array_maps (trace , EM_HOST );
4520
4469
4521
4470
if (trace -> ev_qualifier_ids .nr > 0 ) {
4522
4471
err = trace__set_ev_qualifier_filter (trace );
@@ -5379,18 +5328,6 @@ static void trace__exit(struct trace *trace)
5379
5328
#endif
5380
5329
}
5381
5330
5382
- #ifdef HAVE_BPF_SKEL
5383
- static int bpf__setup_bpf_output (struct evlist * evlist )
5384
- {
5385
- int err = parse_event (evlist , "bpf-output/no-inherit=1,name=__augmented_syscalls__/" );
5386
-
5387
- if (err )
5388
- pr_debug ("ERROR: failed to create the \"__augmented_syscalls__\" bpf-output event\n" );
5389
-
5390
- return err ;
5391
- }
5392
- #endif
5393
-
5394
5331
int cmd_trace (int argc , const char * * argv )
5395
5332
{
5396
5333
const char * trace_usage [] = {
@@ -5587,7 +5524,6 @@ int cmd_trace(int argc, const char **argv)
5587
5524
"cgroup monitoring only available in system-wide mode" );
5588
5525
}
5589
5526
5590
- #ifdef HAVE_BPF_SKEL
5591
5527
if (!trace .trace_syscalls )
5592
5528
goto skip_augmentation ;
5593
5529
@@ -5606,42 +5542,17 @@ int cmd_trace(int argc, const char **argv)
5606
5542
goto skip_augmentation ;
5607
5543
}
5608
5544
5609
- trace .skel = augmented_raw_syscalls_bpf__open ();
5610
- if (!trace .skel ) {
5611
- pr_debug ("Failed to open augmented syscalls BPF skeleton" );
5612
- } else {
5613
- /*
5614
- * Disable attaching the BPF programs except for sys_enter and
5615
- * sys_exit that tail call into this as necessary.
5616
- */
5617
- struct bpf_program * prog ;
5545
+ err = augmented_syscalls__prepare ();
5546
+ if (err < 0 )
5547
+ goto skip_augmentation ;
5618
5548
5619
- bpf_object__for_each_program (prog , trace .skel -> obj ) {
5620
- if (prog != trace .skel -> progs .sys_enter && prog != trace .skel -> progs .sys_exit )
5621
- bpf_program__set_autoattach (prog , /*autoattach=*/ false);
5622
- }
5549
+ trace__add_syscall_newtp (& trace );
5623
5550
5624
- err = augmented_raw_syscalls_bpf__load (trace .skel );
5551
+ err = augmented_syscalls__create_bpf_output (trace .evlist );
5552
+ if (err == 0 )
5553
+ trace .syscalls .events .bpf_output = evlist__last (trace .evlist );
5625
5554
5626
- if (err < 0 ) {
5627
- libbpf_strerror (err , bf , sizeof (bf ));
5628
- pr_debug ("Failed to load augmented syscalls BPF skeleton: %s\n" , bf );
5629
- } else {
5630
- augmented_raw_syscalls_bpf__attach (trace .skel );
5631
- trace__add_syscall_newtp (& trace );
5632
- }
5633
- }
5634
-
5635
- err = bpf__setup_bpf_output (trace .evlist );
5636
- if (err ) {
5637
- libbpf_strerror (err , bf , sizeof (bf ));
5638
- pr_err ("ERROR: Setup BPF output event failed: %s\n" , bf );
5639
- goto out ;
5640
- }
5641
- trace .syscalls .events .bpf_output = evlist__last (trace .evlist );
5642
- assert (evsel__name_is (trace .syscalls .events .bpf_output , "__augmented_syscalls__" ));
5643
5555
skip_augmentation :
5644
- #endif
5645
5556
err = -1 ;
5646
5557
5647
5558
if (trace .trace_pgfaults ) {
@@ -5833,8 +5744,6 @@ int cmd_trace(int argc, const char **argv)
5833
5744
fclose (trace .output );
5834
5745
out :
5835
5746
trace__exit (& trace );
5836
- #ifdef HAVE_BPF_SKEL
5837
- augmented_raw_syscalls_bpf__destroy (trace .skel );
5838
- #endif
5747
+ augmented_syscalls__cleanup ();
5839
5748
return err ;
5840
5749
}
0 commit comments