11
11
#include <stddef.h>
12
12
#include <stdint.h>
13
13
#include <string.h>
14
+ #ifndef __STDC_NO_THREADS__
15
+ #include <threads.h>
16
+ #endif
14
17
15
18
#include "upb/port/sanitizers.h"
16
19
22
25
//
23
26
// We need this because the decoder inlines a upb_Arena for performance but
24
27
// the full struct is not visible outside of arena.c. Yes, I know, it's awful.
25
- #define UPB_ARENA_SIZE_HACK (9 + (UPB_XSAN_STRUCT_SIZE * 2))
28
+ #define UPB_ARENA_SIZE_HACK (10 + (UPB_XSAN_STRUCT_SIZE * 2))
26
29
27
30
// LINT.IfChange(upb_Arena)
28
31
29
32
struct upb_Arena {
30
33
char * UPB_ONLYBITS (ptr );
31
34
char * UPB_ONLYBITS (end );
35
+ #ifndef __STDC_NO_THREADS__
36
+ UPB_ATOMIC (thrd_t ) tid ;
37
+ #endif
32
38
UPB_XSAN_MEMBER
33
39
};
34
40
@@ -38,6 +44,32 @@ struct upb_Arena {
38
44
extern "C" {
39
45
#endif
40
46
47
+ // enterTid() and exitTid() are used to compare the current thread ID to the
48
+ // thread ID stored in the arena during allocations. If they are not the same
49
+ // during arena malloc/realloc, the arena is being used by another thread and
50
+ // should crash.
51
+ #ifndef __STDC_NO_THREADS__
52
+ static thrd_t enterTid (struct upb_Arena * a ) {
53
+ thrd_t t = thrd_current ();
54
+ thrd_t old = upb_Atomic_Exchange (& a -> tid , t , memory_order_relaxed );
55
+ if (old != t && old != 0 ) {
56
+ __builtin_trap ();
57
+ }
58
+ return old ;
59
+ }
60
+
61
+ static void exitTid (struct upb_Arena * a , uintptr_t tid ) {
62
+ thrd_t old = upb_Atomic_Exchange (& a -> tid , tid , memory_order_relaxed );
63
+ if (old != thrd_current ()) {
64
+ __builtin_trap ();
65
+ }
66
+ }
67
+ #else
68
+ // No-op for non-threaded builds.
69
+ static uintptr_t enterTid (struct upb_Arena * a ) { return 0 ; }
70
+ static void exitTid (struct upb_Arena * a , uintptr_t tid ) {}
71
+ #endif
72
+
41
73
void UPB_PRIVATE (_upb_Arena_SwapIn )(struct upb_Arena * des ,
42
74
const struct upb_Arena * src );
43
75
void UPB_PRIVATE (_upb_Arena_SwapOut )(struct upb_Arena * des ,
@@ -63,21 +95,24 @@ UPB_INLINE bool UPB_PRIVATE(_upb_Arena_IsAligned)(const void* ptr) {
63
95
}
64
96
65
97
UPB_API_INLINE void * upb_Arena_Malloc (struct upb_Arena * a , size_t size ) {
98
+ uintptr_t tid = enterTid (a );
66
99
UPB_PRIVATE (upb_Xsan_AccessReadWrite )(UPB_XSAN (a ));
67
100
68
101
size_t span = UPB_PRIVATE (_upb_Arena_AllocSpan )(size );
69
102
70
103
if (UPB_UNLIKELY (UPB_PRIVATE (_upb_ArenaHas )(a ) < span )) {
71
104
void * UPB_PRIVATE (_upb_Arena_SlowMalloc )(struct upb_Arena * a , size_t size );
72
- return UPB_PRIVATE (_upb_Arena_SlowMalloc )(a , span );
105
+ void * ret = UPB_PRIVATE (_upb_Arena_SlowMalloc )(a , span );
106
+ exitTid (a , tid );
107
+ return ret ;
73
108
}
74
109
75
110
// We have enough space to do a fast malloc.
76
111
void * ret = a -> UPB_ONLYBITS (ptr );
77
112
a -> UPB_ONLYBITS (ptr ) += span ;
78
113
UPB_ASSERT (UPB_PRIVATE (_upb_Arena_IsAligned )(ret ));
79
114
UPB_ASSERT (UPB_PRIVATE (_upb_Arena_IsAligned )(a -> UPB_ONLYBITS (ptr )));
80
-
115
+ exitTid ( a , tid );
81
116
return UPB_PRIVATE (upb_Xsan_NewUnpoisonedRegion )(UPB_XSAN (a ), ret , size );
82
117
}
83
118
@@ -123,6 +158,7 @@ UPB_API_INLINE bool upb_Arena_TryExtend(struct upb_Arena* a, void* ptr,
123
158
124
159
UPB_API_INLINE void * upb_Arena_Realloc (struct upb_Arena * a , void * ptr ,
125
160
size_t oldsize , size_t size ) {
161
+ uintptr_t tid = enterTid (a );
126
162
UPB_PRIVATE (upb_Xsan_AccessReadWrite )(UPB_XSAN (a ));
127
163
128
164
void * ret ;
@@ -145,7 +181,10 @@ UPB_API_INLINE void* upb_Arena_Realloc(struct upb_Arena* a, void* ptr,
145
181
// We want to invalidate pointers to the old region if hwasan is enabled, so
146
182
// we poison and unpoison even if ptr == ret.
147
183
UPB_PRIVATE (upb_Xsan_PoisonRegion )(ptr , oldsize );
148
- return UPB_PRIVATE (upb_Xsan_NewUnpoisonedRegion )(UPB_XSAN (a ), ret , size );
184
+ void * ret_final =
185
+ UPB_PRIVATE (upb_Xsan_NewUnpoisonedRegion )(UPB_XSAN (a ), ret , size );
186
+ exitTid (a , tid );
187
+ return ret_final ;
149
188
}
150
189
151
190
#ifdef __cplusplus
0 commit comments