@@ -82,7 +82,7 @@ struct event_t {
8282 u64 ts ;
8383 u64 print_skb_id ;
8484 u64 print_shinfo_id ;
85- // u64 print_bpf_map_id ;
85+ u64 print_bpfmap_id ;
8686 struct skb_meta meta ;
8787 struct tuple tuple ;
8888 s64 print_stack_id ;
@@ -144,8 +144,7 @@ struct config {
144144 u8 output_shinfo : 1 ;
145145 u8 output_stack : 1 ;
146146 u8 output_caller : 1 ;
147- u8 output_bpf_map : 1 ;
148- u8 output_unused : 1 ;
147+ u8 output_unused : 2 ;
149148 u8 is_set : 1 ;
150149 u8 track_skb : 1 ;
151150 u8 track_skb_by_stackid : 1 ;
@@ -166,6 +165,13 @@ struct {
166165 __uint (value_size , MAX_STACK_DEPTH * sizeof (u64 ));
167166} print_stack_map SEC (".maps" );
168167
168+ struct {
169+ __uint (type , BPF_MAP_TYPE_PERCPU_ARRAY );
170+ __uint (max_entries , 1 );
171+ __type (key , u32 );
172+ __type (value , struct event_t );
173+ } event_stash SEC (".maps" );
174+
169175struct print_skb_value {
170176 u32 len ;
171177 char str [PRINT_SKB_STR_SIZE ];
@@ -199,6 +205,28 @@ struct {
199205 __type (value , struct print_shinfo_value );
200206} print_shinfo_map SEC (".maps" );
201207
208+ struct print_bpfmap_value {
209+ u32 id ;
210+ char name [16 ];
211+ u32 key_size ;
212+ u32 value_size ;
213+ u8 key [128 ];
214+ u8 value [128 ];
215+ } __attribute__((packed ));
216+
217+ struct {
218+ __uint (type , BPF_MAP_TYPE_PERCPU_ARRAY );
219+ __uint (max_entries , 1 );
220+ __type (key , u32 );
221+ __type (value , u32 );
222+ } print_bpfmap_id_map SEC (".maps" );
223+
224+ struct {
225+ __uint (type , BPF_MAP_TYPE_HASH );
226+ __type (key , u64 );
227+ __type (value , struct print_bpfmap_value );
228+ } print_bpfmap_map SEC (".maps" );
229+
202230static __always_inline u32
203231get_netns (struct sk_buff * skb ) {
204232 u32 netns = BPF_CORE_READ (skb , dev , nd_net .net , ns .inum );
@@ -517,6 +545,7 @@ handle_everything(struct sk_buff *skb, void *ctx, struct event_t *event, u64 *_s
517545 bpf_map_update_elem (& skb_stackid , & skb , & stackid , BPF_ANY );
518546 }
519547
548+ event -> skb_addr = (u64 ) skb ;
520549 event -> pid = bpf_get_current_pid_tgid () >> 32 ;
521550 event -> ts = bpf_ktime_get_ns ();
522551 event -> cpu_id = bpf_get_smp_processor_id ();
@@ -531,20 +560,12 @@ kprobe_skb(struct sk_buff *skb, struct pt_regs *ctx, const bool has_get_func_ip,
531560 if (!handle_everything (skb , ctx , & event , _stackid , true))
532561 return BPF_OK ;
533562
534- event .skb_addr = (u64 ) skb ;
535563 event .addr = has_get_func_ip ? bpf_get_func_ip (ctx ) : PT_REGS_IP (ctx );
536564 event .param_second = PT_REGS_PARM2 (ctx );
537565 event .param_third = PT_REGS_PARM3 (ctx );
538566 if (CFG .output_caller )
539567 bpf_probe_read_kernel (& event .caller_addr , sizeof (event .caller_addr ), (void * )PT_REGS_SP (ctx ));
540568
541- if (CFG .output_bpf_map ) {
542- // TODO@gray: kernel>=5.15
543- __u64 cookie = bpf_get_attach_cookie (ctx );
544- if (cookie )
545- set_bpf_map (ctx , cookie , NULL );
546- }
547-
548569 bpf_map_push_elem (& events , & event , BPF_EXIST );
549570
550571 return BPF_OK ;
@@ -631,7 +652,6 @@ int BPF_PROG(fentry_tc, struct sk_buff *skb) {
631652 if (!handle_everything (skb , ctx , & event , NULL , false))
632653 return BPF_OK ;
633654
634- event .skb_addr = (u64 ) skb ;
635655 event .addr = BPF_PROG_ADDR ;
636656 event .type = EVENT_TYPE_TC ;
637657 bpf_map_push_elem (& events , & event , BPF_EXIST );
@@ -781,4 +801,122 @@ int kretprobe_veth_convert_skb_to_xdp_buff(struct pt_regs *ctx) {
781801 return BPF_OK ;
782802}
783803
804+ static __always_inline void
805+ set_common_bpfmap_info (struct pt_regs * ctx , u64 * event_id ,
806+ struct print_bpfmap_value * map_value ) {
807+ * event_id = sync_fetch_and_add (& print_bpfmap_id_map );
808+
809+ struct bpf_map * map = (struct bpf_map * )PT_REGS_PARM1 (ctx );
810+ BPF_CORE_READ_INTO (& map_value -> id , map , id );
811+ BPF_CORE_READ_STR_INTO (& map_value -> name , map , name );
812+ BPF_CORE_READ_INTO (& map_value -> key_size , map , key_size );
813+ BPF_CORE_READ_INTO (& map_value -> value_size , map , value_size );
814+ bpf_probe_read_kernel (& map_value -> key , sizeof (map_value -> key ), (void * )PT_REGS_PARM2 (ctx ));
815+ }
816+
817+ SEC ("kprobe/bpf_map_lookup" )
818+ int kprobe_bpf_map_lookup (struct pt_regs * ctx ) {
819+ u64 stackid = get_stackid (ctx , true);
820+
821+ struct sk_buff * * skb = bpf_map_lookup_elem (& stackid_skb , & stackid );
822+ if (skb && * skb ) {
823+ struct event_t event = {};
824+
825+ event .addr = PT_REGS_IP (ctx );
826+ if (!handle_everything (* skb , ctx , & event , & stackid , true))
827+ return BPF_OK ;
828+
829+ if (CFG .output_caller )
830+ bpf_probe_read_kernel (& event .caller_addr ,
831+ sizeof (event .caller_addr ),
832+ (void * )PT_REGS_SP (ctx ));
833+
834+ struct print_bpfmap_value map_value = {};
835+ set_common_bpfmap_info (ctx , & event .print_bpfmap_id , & map_value );
836+
837+ u64 pid_tgid = bpf_get_current_pid_tgid ();
838+ bpf_map_update_elem (& print_bpfmap_map , & event .print_bpfmap_id , & map_value , BPF_ANY );
839+ bpf_map_update_elem (& event_stash , & ZERO , & event , BPF_ANY );
840+ }
841+
842+ return BPF_OK ;
843+ }
844+
845+ SEC ("kretprobe/bpf_map_lookup" )
846+ int kretprobe_bpf_map_lookup (struct pt_regs * ctx ) {
847+ struct event_t * event = bpf_map_lookup_elem (& event_stash , & ZERO );
848+ if (!event )
849+ return BPF_OK ;
850+
851+ struct print_bpfmap_value * map_value = bpf_map_lookup_elem (& print_bpfmap_map ,
852+ & event -> print_bpfmap_id );
853+ if (!map_value )
854+ return BPF_OK ;
855+
856+ bpf_probe_read_kernel (& map_value -> value ,
857+ sizeof (map_value -> value ),
858+ (void * )PT_REGS_RC (ctx ));
859+
860+ bpf_map_push_elem (& events , & event , BPF_EXIST );
861+ return BPF_OK ;
862+ }
863+
864+ SEC ("kprobe/bpf_map_update" )
865+ int kprobe_bpf_map_update (struct pt_regs * ctx ) {
866+ u64 stackid = get_stackid (ctx , true);
867+
868+ struct sk_buff * * skb = bpf_map_lookup_elem (& stackid_skb , & stackid );
869+ if (skb && * skb ) {
870+ struct event_t event = {};
871+
872+ event .addr = PT_REGS_IP (ctx );
873+ if (!handle_everything (* skb , ctx , & event , & stackid , true))
874+ return BPF_OK ;
875+
876+ if (CFG .output_caller )
877+ bpf_probe_read_kernel (& event .caller_addr ,
878+ sizeof (event .caller_addr ),
879+ (void * )PT_REGS_SP (ctx ));
880+
881+ // todo@gray: static?
882+ static struct print_bpfmap_value map_value = {};
883+ set_common_bpfmap_info (ctx , & event .print_bpfmap_id , & map_value );
884+
885+ bpf_probe_read_kernel (& map_value .value ,
886+ sizeof (map_value .value ),
887+ (void * )PT_REGS_PARM3 (ctx ));
888+ bpf_map_update_elem (& print_bpfmap_map , & event .print_bpfmap_id , & map_value , BPF_ANY );
889+ bpf_map_push_elem (& events , & event , BPF_EXIST );
890+ }
891+
892+ return BPF_OK ;
893+ }
894+
895+ SEC ("kprobe/bpf_map_delete" )
896+ int kprobe_bpf_map_delete (struct pt_regs * ctx ) {
897+ u64 stackid = get_stackid (ctx , true);
898+
899+ struct sk_buff * * skb = bpf_map_lookup_elem (& stackid_skb , & stackid );
900+ if (skb && * skb ) {
901+ struct event_t event = {};
902+
903+ event .addr = PT_REGS_IP (ctx );
904+ if (!handle_everything (* skb , ctx , & event , & stackid , true))
905+ return BPF_OK ;
906+
907+ if (CFG .output_caller )
908+ bpf_probe_read_kernel (& event .caller_addr ,
909+ sizeof (event .caller_addr ),
910+ (void * )PT_REGS_SP (ctx ));
911+
912+ static struct print_bpfmap_value map_value = {};
913+ set_common_bpfmap_info (ctx , & event .print_bpfmap_id , & map_value );
914+
915+ bpf_map_update_elem (& print_bpfmap_map , & event .print_bpfmap_id , & map_value , BPF_ANY );
916+ bpf_map_push_elem (& events , & event , BPF_EXIST );
917+ }
918+
919+ return BPF_OK ;
920+ }
921+
784922char __license [] SEC ("license" ) = "Dual BSD/GPL" ;
0 commit comments