1
+ /*
2
+ * IPsec monitoring kretprobe eBPF hook.
3
+ */
4
+
5
+ #ifndef __IPSEC_H__
6
+ #define __IPSEC_H__
7
+
8
+ #include "utils.h"
9
+
10
+ static inline int ipsec_lookup_and_update_flow (flow_id * id , int flow_encrypted_ret ,
11
+ u16 eth_protocol ) {
12
+ additional_metrics * extra_metrics = bpf_map_lookup_elem (& additional_flow_metrics , id );
13
+ if (extra_metrics != NULL ) {
14
+ extra_metrics -> end_mono_time_ts = bpf_ktime_get_ns ();
15
+ extra_metrics -> eth_protocol = eth_protocol ;
16
+ if (flow_encrypted_ret != 0 ) {
17
+ extra_metrics -> flow_encrypted_ret = flow_encrypted_ret ;
18
+ if (extra_metrics -> flow_encrypted ) {
19
+ extra_metrics -> flow_encrypted = false;
20
+ }
21
+ }
22
+ return 0 ;
23
+ }
24
+ return -1 ;
25
+ }
26
+
27
+ static inline int update_flow_with_ipsec_return (int flow_encrypted_ret , direction dir ) {
28
+ u64 pid_tgid = bpf_get_current_pid_tgid ();
29
+ u16 eth_protocol = 0 ;
30
+ flow_id * id = NULL ;
31
+ int ret = 0 ;
32
+
33
+ if (dir == INGRESS ) {
34
+ id = bpf_map_lookup_elem (& ipsec_ingress_map , & pid_tgid );
35
+ } else {
36
+ id = bpf_map_lookup_elem (& ipsec_egress_map , & pid_tgid );
37
+ }
38
+
39
+ if (!id ) {
40
+ BPF_PRINTK ("ipsec flow id not found in dir: %d" , dir );
41
+ return 0 ;
42
+ }
43
+
44
+ if (is_ipv4 (id -> src_ip )) {
45
+ eth_protocol = ETH_P_IP ;
46
+ } else {
47
+ eth_protocol = ETH_P_IPV6 ;
48
+ }
49
+
50
+ BPF_PRINTK ("found encrypted flow dir: %d encrypted: %d\n" , dir ,
51
+ flow_encrypted_ret == 0 ? true : false);
52
+
53
+ // update flow with ipsec info
54
+ ret = ipsec_lookup_and_update_flow (id , flow_encrypted_ret , eth_protocol );
55
+ if (ret == 0 ) {
56
+ goto end ;
57
+ }
58
+
59
+ u64 current_time = bpf_ktime_get_ns ();
60
+ additional_metrics new_flow ;
61
+ __builtin_memset (& new_flow , 0 , sizeof (new_flow ));
62
+ new_flow .start_mono_time_ts = current_time ;
63
+ new_flow .end_mono_time_ts = current_time ;
64
+ new_flow .eth_protocol = eth_protocol ;
65
+ new_flow .flow_encrypted_ret = flow_encrypted_ret ;
66
+ new_flow .flow_encrypted = flow_encrypted_ret == 0 ? true : false;
67
+ ret = bpf_map_update_elem (& additional_flow_metrics , id , & new_flow , BPF_NOEXIST );
68
+ if (ret != 0 ) {
69
+ if (ret != - EEXIST ) {
70
+ BPF_PRINTK ("error ipsec creating flow err: %d\n" , ret );
71
+ }
72
+ if (ret == - EEXIST ) {
73
+ ret = ipsec_lookup_and_update_flow (id , flow_encrypted_ret , eth_protocol );
74
+ if (ret != 0 ) {
75
+ BPF_PRINTK ("error ipsec updating an existing flow err: %d\n" , ret );
76
+ }
77
+ }
78
+ }
79
+ end :
80
+ if (dir == INGRESS ) {
81
+ bpf_map_delete_elem (& ipsec_ingress_map , & pid_tgid );
82
+ } else {
83
+ bpf_map_delete_elem (& ipsec_egress_map , & pid_tgid );
84
+ }
85
+ return 0 ;
86
+ }
87
+
88
+ static inline int enter_xfrm_func (struct sk_buff * skb , direction dir ) {
89
+ u64 pid_tgid = bpf_get_current_pid_tgid ();
90
+ u16 family = 0 , flags = 0 , eth_protocol = 0 ;
91
+ u8 dscp = 0 , protocol = 0 ;
92
+ flow_id id ;
93
+ int ret = 0 ;
94
+
95
+ __builtin_memset (& id , 0 , sizeof (id ));
96
+
97
+ u32 if_index = BPF_CORE_READ (skb , skb_iif );
98
+
99
+ // read L2 info
100
+ core_fill_in_l2 (skb , & eth_protocol , & family );
101
+
102
+ // read L3 info
103
+ core_fill_in_l3 (skb , & id , family , & protocol , & dscp );
104
+
105
+ // read L4 info
106
+ switch (protocol ) {
107
+ case IPPROTO_TCP :
108
+ core_fill_in_tcp (skb , & id , & flags );
109
+ break ;
110
+ case IPPROTO_UDP :
111
+ core_fill_in_udp (skb , & id );
112
+ break ;
113
+ case IPPROTO_SCTP :
114
+ core_fill_in_sctp (skb , & id );
115
+ break ;
116
+ case IPPROTO_ICMP :
117
+ core_fill_in_icmpv4 (skb , & id );
118
+ break ;
119
+ case IPPROTO_ICMPV6 :
120
+ core_fill_in_icmpv6 (skb , & id );
121
+ break ;
122
+ default :
123
+ fill_in_others_protocol (& id , protocol );
124
+ }
125
+
126
+ // check if this packet need to be filtered if filtering feature is enabled
127
+ bool skip = check_and_do_flow_filtering (& id , flags , 0 , eth_protocol , NULL , dir );
128
+ if (skip ) {
129
+ return 0 ;
130
+ }
131
+
132
+ BPF_PRINTK ("Enter xfrm dir: %d protocol: %d family: %d if_index: %d \n" , dir , protocol , family ,
133
+ if_index );
134
+
135
+ if (dir == INGRESS ) {
136
+ ret = bpf_map_update_elem (& ipsec_ingress_map , & pid_tgid , & id , BPF_NOEXIST );
137
+ } else {
138
+ ret = bpf_map_update_elem (& ipsec_egress_map , & pid_tgid , & id , BPF_NOEXIST );
139
+ }
140
+ if (ret != 0 ) {
141
+ if (trace_messages ) {
142
+ BPF_PRINTK ("error creating new ipsec map dir: %d err: %d\n" , dir , ret );
143
+ }
144
+ }
145
+ return 0 ;
146
+ }
147
+
148
+ SEC ("kprobe/xfrm_input" )
149
+ int BPF_KPROBE (xfrm_input_kprobe ) {
150
+ if (do_sampling == 0 || enable_ipsec == 0 ) {
151
+ return 0 ;
152
+ }
153
+ struct sk_buff * skb = (struct sk_buff * )PT_REGS_PARM1 (ctx );
154
+ if (!skb ) {
155
+ return 0 ;
156
+ }
157
+ return enter_xfrm_func (skb , INGRESS );
158
+ }
159
+
160
+ SEC ("kretprobe/xfrm_input" )
161
+ int BPF_KRETPROBE (xfrm_input_kretprobe ) {
162
+ if (do_sampling == 0 || enable_ipsec == 0 ) {
163
+ return 0 ;
164
+ }
165
+ int xfrm_ret = PT_REGS_RC (ctx );
166
+ return update_flow_with_ipsec_return (xfrm_ret , INGRESS );
167
+ }
168
+
169
+ SEC ("kprobe/xfrm_output" )
170
+ int BPF_KPROBE (xfrm_output_kprobe ) {
171
+ if (do_sampling == 0 || enable_ipsec == 0 ) {
172
+ return 0 ;
173
+ }
174
+ struct sk_buff * skb = (struct sk_buff * )PT_REGS_PARM2 (ctx );
175
+ if (!skb ) {
176
+ return 0 ;
177
+ }
178
+ return enter_xfrm_func (skb , EGRESS );
179
+ }
180
+
181
+ SEC ("kretprobe/xfrm_output" )
182
+ int BPF_KRETPROBE (xfrm_output_kretprobe ) {
183
+ if (do_sampling == 0 || enable_ipsec == 0 ) {
184
+ return 0 ;
185
+ }
186
+ int xfrm_ret = PT_REGS_RC (ctx );
187
+ return update_flow_with_ipsec_return (xfrm_ret , EGRESS );
188
+ }
189
+
190
+ #endif /* __IPSEC_H__ */
0 commit comments