1+ /*
2+ * IPsec monitoring kretprobe eBPF hook.
3+ */
4+
5+ #ifndef __IPSEC_H__
6+ #define __IPSEC_H__
7+
8+ #include "utils.h"
9+
10+ static inline int ipsec_lookup_and_update_flow (flow_id * id , int flow_encrypted_ret ,
11+ u16 eth_protocol ) {
12+ additional_metrics * extra_metrics = bpf_map_lookup_elem (& additional_flow_metrics , id );
13+ if (extra_metrics != NULL ) {
14+ extra_metrics -> end_mono_time_ts = bpf_ktime_get_ns ();
15+ extra_metrics -> eth_protocol = eth_protocol ;
16+ extra_metrics -> flow_encrypted_ret = flow_encrypted_ret ;
17+ extra_metrics -> flow_encrypted = flow_encrypted_ret == 0 ? true : false;
18+ return 0 ;
19+ }
20+ return -1 ;
21+ }
22+
23+ static inline int update_flow_with_ipsec_return (int flow_encrypted_ret , direction dir ) {
24+ u64 pid_tgid = bpf_get_current_pid_tgid ();
25+ u16 eth_protocol = 0 ;
26+ flow_id * id = NULL ;
27+ int ret = 0 ;
28+
29+ if (dir == INGRESS ) {
30+ id = bpf_map_lookup_elem (& ipsec_ingress_map , & pid_tgid );
31+ } else {
32+ id = bpf_map_lookup_elem (& ipsec_egress_map , & pid_tgid );
33+ }
34+
35+ if (!id ) {
36+ BPF_PRINTK ("ipsec flow id not found in dir: %d" , dir );
37+ return 0 ;
38+ }
39+
40+ if (is_ipv4 (id -> src_ip )) {
41+ eth_protocol = ETH_P_IP ;
42+ } else {
43+ eth_protocol = ETH_P_IPV6 ;
44+ }
45+
46+ BPF_PRINTK ("found encrypted flow dir: %d encrypted: %d\n" , dir ,
47+ flow_encrypted_ret == 0 ? true : false);
48+
49+ // update flow with ipsec info
50+ ret = ipsec_lookup_and_update_flow (id , flow_encrypted_ret , eth_protocol );
51+ if (ret == 0 ) {
52+ goto end ;
53+ }
54+
55+ u64 current_time = bpf_ktime_get_ns ();
56+ additional_metrics new_flow ;
57+ __builtin_memset (& new_flow , 0 , sizeof (new_flow ));
58+ new_flow .start_mono_time_ts = current_time ;
59+ new_flow .end_mono_time_ts = current_time ;
60+ new_flow .eth_protocol = eth_protocol ;
61+ new_flow .flow_encrypted_ret = flow_encrypted_ret ;
62+ new_flow .flow_encrypted = flow_encrypted_ret == 0 ? true : false;
63+ ret = bpf_map_update_elem (& additional_flow_metrics , id , & new_flow , BPF_NOEXIST );
64+ if (ret != 0 ) {
65+ if (ret != - EEXIST ) {
66+ BPF_PRINTK ("error ipsec creating flow err: %d\n" , ret );
67+ }
68+ if (ret == - EEXIST ) {
69+ ret = ipsec_lookup_and_update_flow (id , flow_encrypted_ret , eth_protocol );
70+ if (ret != 0 ) {
71+ BPF_PRINTK ("error ipsec updating an existing flow err: %d\n" , ret );
72+ }
73+ }
74+ }
75+ end :
76+ if (dir == INGRESS ) {
77+ bpf_map_delete_elem (& ipsec_ingress_map , & pid_tgid );
78+ } else {
79+ bpf_map_delete_elem (& ipsec_egress_map , & pid_tgid );
80+ }
81+ return 0 ;
82+ }
83+
84+ static inline int enter_xfrm_func (struct sk_buff * skb , direction dir ) {
85+ u64 pid_tgid = bpf_get_current_pid_tgid ();
86+ u16 family = 0 , flags = 0 , eth_protocol = 0 ;
87+ u8 dscp = 0 , protocol = 0 ;
88+ flow_id id ;
89+ int ret = 0 ;
90+
91+ __builtin_memset (& id , 0 , sizeof (id ));
92+
93+ u32 if_index = BPF_CORE_READ (skb , skb_iif );
94+
95+ // read L2 info
96+ core_fill_in_l2 (skb , & eth_protocol , & family );
97+
98+ // read L3 info
99+ core_fill_in_l3 (skb , & id , family , & protocol , & dscp );
100+
101+ // read L4 info
102+ switch (protocol ) {
103+ case IPPROTO_TCP :
104+ core_fill_in_tcp (skb , & id , & flags );
105+ break ;
106+ case IPPROTO_UDP :
107+ core_fill_in_udp (skb , & id );
108+ break ;
109+ case IPPROTO_SCTP :
110+ core_fill_in_sctp (skb , & id );
111+ break ;
112+ case IPPROTO_ICMP :
113+ core_fill_in_icmpv4 (skb , & id );
114+ break ;
115+ case IPPROTO_ICMPV6 :
116+ core_fill_in_icmpv6 (skb , & id );
117+ break ;
118+ default :
119+ fill_in_others_protocol (& id , protocol );
120+ }
121+
122+ // check if this packet need to be filtered if filtering feature is enabled
123+ bool skip = check_and_do_flow_filtering (& id , flags , 0 , eth_protocol , NULL , dir );
124+ if (skip ) {
125+ return 0 ;
126+ }
127+
128+ BPF_PRINTK ("Enter xfm dir: %d protocol: %d family: %d if_index: %d \n" , dir , protocol , family ,
129+ if_index );
130+
131+ if (dir == INGRESS ) {
132+ ret = bpf_map_update_elem (& ipsec_ingress_map , & pid_tgid , & id , BPF_NOEXIST );
133+ } else {
134+ ret = bpf_map_update_elem (& ipsec_egress_map , & pid_tgid , & id , BPF_NOEXIST );
135+ }
136+ if (ret != 0 ) {
137+ if (trace_messages && ret != - EEXIST ) {
138+ BPF_PRINTK ("error creating new ipsec map dir: %d err: %d\n" , dir , ret );
139+ }
140+ }
141+ return 0 ;
142+ }
143+
144+ SEC ("kprobe/xfrm_input" )
145+ int BPF_KPROBE (xfrm_input_kprobe ) {
146+ if (do_sampling == 0 || enable_ipsec == 0 ) {
147+ return 0 ;
148+ }
149+ struct sk_buff * skb = (struct sk_buff * )PT_REGS_PARM1 (ctx );
150+ if (!skb ) {
151+ return 0 ;
152+ }
153+ return enter_xfrm_func (skb , INGRESS );
154+ }
155+
156+ SEC ("kretprobe/xfrm_input" )
157+ int BPF_KRETPROBE (xfrm_input_kretprobe ) {
158+ if (do_sampling == 0 || enable_ipsec == 0 ) {
159+ return 0 ;
160+ }
161+ int xfrm_ret = PT_REGS_RC (ctx );
162+ return update_flow_with_ipsec_return (xfrm_ret , INGRESS );
163+ }
164+
165+ SEC ("kprobe/xfrm_output" )
166+ int BPF_KPROBE (xfrm_output_kprobe ) {
167+ if (do_sampling == 0 || enable_ipsec == 0 ) {
168+ return 0 ;
169+ }
170+ struct sk_buff * skb = (struct sk_buff * )PT_REGS_PARM2 (ctx );
171+ if (!skb ) {
172+ return 0 ;
173+ }
174+ return enter_xfrm_func (skb , EGRESS );
175+ }
176+
177+ SEC ("kretprobe/xfrm_output" )
178+ int BPF_KRETPROBE (xfrm_output_kretprobe ) {
179+ if (do_sampling == 0 || enable_ipsec == 0 ) {
180+ return 0 ;
181+ }
182+ int xfrm_ret = PT_REGS_RC (ctx );
183+ return update_flow_with_ipsec_return (xfrm_ret , EGRESS );
184+ }
185+
186+ #endif /* __IPSEC_H__ */
0 commit comments