@@ -320,7 +320,7 @@ static uint32_t vmem_id;
320
320
static uint32_t vmem_populators ;
321
321
static vmem_seg_t vmem_seg0 [VMEM_SEG_INITIAL ];
322
322
static vmem_seg_t * vmem_segfree ;
323
- static kmutex_t vmem_list_lock ;
323
+ kmutex_t vmem_list_lock ;
324
324
static kmutex_t vmem_segfree_lock ;
325
325
static kmutex_t vmem_sleep_lock ;
326
326
static kmutex_t vmem_nosleep_lock ;
@@ -342,9 +342,10 @@ vmem_t *spl_heap_arena;
342
342
static void * spl_heap_arena_initial_alloc ;
343
343
static uint32_t spl_heap_arena_initial_alloc_size = 0 ;
344
344
#define NUMBER_OF_ARENAS_IN_VMEM_INIT 21
345
- // static struct timespec vmem_update_interval = {15 , 0}; /* vmem_update() every 15 seconds */
345
+ static struct timespec vmem_update_interval = {60 , 0 }; /* vmem_update() every 60 seconds */
346
346
uint32_t vmem_mtbf ; /* mean time between failures [default: off] */
347
347
uint32_t vmem_seg_size = sizeof (vmem_seg_t );
348
+ volatile boolean_t hash_rescale_exit = FALSE;
348
349
349
350
// must match with include/sys/vmem_impl.h
350
351
static vmem_kstat_t vmem_kstat_template = {
@@ -2127,9 +2128,12 @@ vmem_destroy(vmem_t *vmp)
2127
2128
vmp -> vm_name , leaked , (vmp -> vm_cflags & VMC_IDENTIFIER ) ?
2128
2129
"identifiers" : "bytes" );
2129
2130
2130
- if (vmp -> vm_hash_table != vmp -> vm_hash0 )
2131
- vmem_free (vmem_hash_arena , vmp -> vm_hash_table ,
2132
- (vmp -> vm_hash_mask + 1 ) * sizeof (void * ));
2131
+ if (vmp -> vm_hash_table != vmp -> vm_hash0 ) {
2132
+ if (vmem_hash_arena != NULL ) {
2133
+ vmem_free (vmem_hash_arena , vmp -> vm_hash_table ,
2134
+ (vmp -> vm_hash_mask + 1 ) * sizeof (void * ));
2135
+ }
2136
+ }
2133
2137
2134
2138
/*
2135
2139
* Give back the segment structures for anything that's left in the
@@ -2176,10 +2180,13 @@ vmem_destroy_internal(vmem_t *vmp)
2176
2180
vmp -> vm_name , leaked , (vmp -> vm_cflags & VMC_IDENTIFIER ) ?
2177
2181
"identifiers" : "bytes" );
2178
2182
2179
- if (vmp -> vm_hash_table != vmp -> vm_hash0 )
2180
- if (vmem_hash_arena != NULL )
2181
- vmem_free (vmem_hash_arena , vmp -> vm_hash_table ,
2182
- (vmp -> vm_hash_mask + 1 ) * sizeof (void * ));
2183
+ if (vmp -> vm_hash_table != vmp -> vm_hash0 ) {
2184
+ if (vmem_hash_arena != NULL ) {
2185
+ vmem_free (vmem_hash_arena , vmp -> vm_hash_table ,
2186
+ (vmp -> vm_hash_mask + 1 ) * sizeof (void * ));
2187
+ }
2188
+ }
2189
+
2183
2190
2184
2191
/*
2185
2192
* Give back the segment structures for anything that's left in the
@@ -2275,9 +2282,16 @@ void
2275
2282
vmem_update (void * dummy )
2276
2283
{
2277
2284
vmem_t * vmp ;
2285
+ static struct bsd_timeout_wrapper vmem_update_tm ;
2286
+
2287
+ if (hash_rescale_exit == TRUE)
2288
+ return ;
2278
2289
2279
2290
mutex_enter (& vmem_list_lock );
2280
2291
for (vmp = vmem_list ; vmp != NULL ; vmp = vmp -> vm_next ) {
2292
+ if (hash_rescale_exit == TRUE) {
2293
+ break ;
2294
+ }
2281
2295
/*
2282
2296
* If threads are waiting for resources, wake them up
2283
2297
* periodically so they can issue another kmem_reap()
@@ -2292,7 +2306,9 @@ vmem_update(void *dummy)
2292
2306
}
2293
2307
mutex_exit (& vmem_list_lock );
2294
2308
2295
- // (void) bsd_timeout(vmem_update, dummy, &vmem_update_interval);
2309
+
2310
+ if (hash_rescale_exit == FALSE)
2311
+ (void ) bsd_timeout (vmem_update , & vmem_update_tm , & vmem_update_interval );
2296
2312
}
2297
2313
2298
2314
void
0 commit comments