|
| 1 | +/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ |
| 2 | +/* |
| 3 | + * arcus-memcached - Arcus memory cache server |
| 4 | + * Copyright 2017 JaM2in Co., Ltd. |
| 5 | + * |
| 6 | + * Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | + * you may not use this file except in compliance with the License. |
| 8 | + * You may obtain a copy of the License at |
| 9 | + * |
| 10 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | + * |
| 12 | + * Unless required by applicable law or agreed to in writing, software |
| 13 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | + * See the License for the specific language governing permissions and |
| 16 | + * limitations under the License. |
| 17 | + */ |
| 18 | + |
| 19 | +#include <stdlib.h> |
| 20 | +//#include <pthread.h> |
| 21 | +#include <assert.h> |
| 22 | +#include "mblock_allocator.h" |
| 23 | + |
| 24 | +static mem_block_t *pool_head = NULL; |
| 25 | +static mem_block_t *pool_tail = NULL; |
| 26 | +static uint32_t initial_mblocks; |
| 27 | +static uint32_t total_mblocks; |
| 28 | +static uint32_t free_mblocks; |
| 29 | + |
| 30 | +//static pthread_mutex_t pool_mutex; |
| 31 | + |
| 32 | +static void do_mblock_allocator_free_all() { |
| 33 | + mem_block_t *helper; |
| 34 | + while (pool_head != NULL) { |
| 35 | + helper = pool_head; |
| 36 | + pool_head = pool_head->next; |
| 37 | + free(helper); |
| 38 | + } |
| 39 | + pool_tail = NULL; |
| 40 | +} |
| 41 | + |
| 42 | +int mblock_allocator_init(size_t nblocks) { |
| 43 | + mem_block_t *helper = NULL; |
| 44 | + int i; |
| 45 | + size_t nblk; |
| 46 | + |
| 47 | + if (nblocks == 0) nblk = BLOCK_ALLOCATOR_DEFAULT_SIZE; |
| 48 | + else if (nblocks > BLOCK_ALLOCATOR_MAXINIT_SIZE) nblk = BLOCK_ALLOCATOR_MAXINIT_SIZE; |
| 49 | + else nblk = nblocks; |
| 50 | + |
| 51 | + for (i = 0; i < nblk; i++) { |
| 52 | + helper = (mem_block_t *)malloc(sizeof(mem_block_t)); |
| 53 | + if (helper == NULL) break; |
| 54 | + |
| 55 | + helper->next = NULL; |
| 56 | + if (pool_tail) pool_tail->next = helper; |
| 57 | + else pool_head = helper; |
| 58 | + pool_tail = helper; |
| 59 | + } |
| 60 | + if (i < nblk) { /* incompleted state */ |
| 61 | + do_mblock_allocator_free_all(); |
| 62 | + return -1; |
| 63 | + } |
| 64 | + |
| 65 | + //pthread_mutex_init(&pool_mutex, NULL); |
| 66 | + initial_mblocks = nblk; |
| 67 | + total_mblocks= nblk; |
| 68 | + free_mblocks = nblk; |
| 69 | + |
| 70 | + return 0; |
| 71 | +} |
| 72 | + |
| 73 | +void mblock_allocator_destroy() { |
| 74 | + //pthread_mutex_lock(&pool_mutex); |
| 75 | + do_mblock_allocator_free_all(); |
| 76 | + |
| 77 | + initial_mblocks = 0; |
| 78 | + total_mblocks = 0; |
| 79 | + free_mblocks = 0; |
| 80 | + //pthread_mutex_unlock(&pool_mutex); |
| 81 | + //pthread_mutex_destroy(&pool_mutex); |
| 82 | +} |
| 83 | + |
| 84 | +void mblock_allocator_stats(mblock_stats *blk_stat) { |
| 85 | + //pthread_mutex_lock(&pool_mutex); |
| 86 | + blk_stat->total_mblocks = total_mblocks; |
| 87 | + blk_stat->free_mblocks = free_mblocks; |
| 88 | + //pthread_mutex_unlock(&pool_mutex); |
| 89 | +} |
| 90 | + |
| 91 | +/* As a function that returns single block, it is not currently used |
| 92 | +void *allocate_single_block() { |
| 93 | + mem_block_t *ret; |
| 94 | +
|
| 95 | + //pthread_mutex_lock(&pool_mutex); |
| 96 | +
|
| 97 | + if ((ret = pool_head) != NULL) { |
| 98 | + pool_head = pool_head->next; |
| 99 | + if (pool_head == NULL) |
| 100 | + pool_tail = NULL; |
| 101 | + ret->next = NULL; |
| 102 | + free_mblocks--; |
| 103 | + } else { |
| 104 | + // TODO : |
| 105 | + // This malloc() inside mutex may raise some performance issue, |
| 106 | + // Is there any way to execute malloc and counter adjustment |
| 107 | + // outside the mutex lock? |
| 108 | + ret = (mem_block_t *)malloc(sizeof(mem_block_t)); |
| 109 | + if (ret != NULL) { |
| 110 | + total_blocks++; |
| 111 | + ret->next = NULL; |
| 112 | + } |
| 113 | + } |
| 114 | +
|
| 115 | + //pthread_mutex_unlock(&pool_mutex); |
| 116 | +
|
| 117 | + return (void *)ret; |
| 118 | +} |
| 119 | +*/ |
| 120 | + |
| 121 | +bool mblock_list_alloc(uint32_t blck_cnt, mem_block_t **head_blk, mem_block_t **tail_blk) { |
| 122 | + assert(blck_cnt > 0); |
| 123 | + uint32_t alloc_cnt = 0; |
| 124 | + *head_blk = *tail_blk = NULL; |
| 125 | + |
| 126 | + //pthread_mutex_lock(&pool_mutex); |
| 127 | + if (free_mblocks > 0) { |
| 128 | + if (blck_cnt >= free_mblocks) { |
| 129 | + *head_blk = pool_head; |
| 130 | + *tail_blk = pool_tail; |
| 131 | + alloc_cnt = free_mblocks; |
| 132 | + |
| 133 | + pool_head = pool_tail = NULL; |
| 134 | + free_mblocks = 0; |
| 135 | + } else { /* free_mblocks > blck_cnt */ |
| 136 | + *head_blk = pool_head; |
| 137 | + alloc_cnt = 1; |
| 138 | + while (alloc_cnt < blck_cnt) { |
| 139 | + pool_head = pool_head->next; |
| 140 | + alloc_cnt++; |
| 141 | + } |
| 142 | + *tail_blk = pool_head; |
| 143 | + |
| 144 | + pool_head = pool_head->next; |
| 145 | + free_mblocks -= alloc_cnt; |
| 146 | + |
| 147 | + (*tail_blk)->next = NULL; |
| 148 | + } |
| 149 | + } |
| 150 | + //pthread_mutex_unlock(&pool_mutex); |
| 151 | + |
| 152 | + if (alloc_cnt < blck_cnt) { |
| 153 | + // TODO : |
| 154 | + // This malloc() inside mutex may raise some performance issue, |
| 155 | + // Is there any way to execute malloc and counter adjustment |
| 156 | + // outside the mutex lock? |
| 157 | + mem_block_t *new_blk = NULL; |
| 158 | + uint32_t new_cnt = 0; |
| 159 | + while (alloc_cnt < blck_cnt) { |
| 160 | + if ((new_blk = (mem_block_t *)malloc(sizeof(mem_block_t))) == NULL) break; |
| 161 | + new_blk->next = NULL; |
| 162 | + |
| 163 | + if (*head_blk) (*tail_blk)->next = new_blk; |
| 164 | + else *head_blk = new_blk; |
| 165 | + (*tail_blk) = new_blk; |
| 166 | + |
| 167 | + new_cnt++; |
| 168 | + alloc_cnt++; |
| 169 | + } |
| 170 | + //pthread_mutex_lock(&pool_mutex); |
| 171 | + total_mblocks += new_cnt; |
| 172 | + //pthread_mutex_unlock(&pool_mutex); |
| 173 | + if (alloc_cnt < blck_cnt) { |
| 174 | + mblock_list_free(alloc_cnt, *head_blk, *tail_blk); |
| 175 | + return false; |
| 176 | + } |
| 177 | + } |
| 178 | + |
| 179 | + return true; |
| 180 | +} |
| 181 | + |
| 182 | +void mblock_list_free(uint32_t blck_cnt, mem_block_t *head_blk, mem_block_t *tail_blk) { |
| 183 | + //mem_block_t *bye = NULL; |
| 184 | + //mem_block_t *bye_helper = NULL; |
| 185 | + |
| 186 | + //pthread_mutex_lock(&pool_mutex); |
| 187 | + if (head_blk == NULL || blck_cnt == 0) |
| 188 | + return; |
| 189 | + |
| 190 | + assert(pool_tail == NULL || pool_tail->next == NULL); |
| 191 | + assert(tail_blk->next == NULL); |
| 192 | + |
| 193 | + if (pool_head == NULL) { |
| 194 | + pool_head = head_blk; |
| 195 | + } else { |
| 196 | + pool_tail->next = head_blk; |
| 197 | + } |
| 198 | + pool_tail = tail_blk; |
| 199 | + |
| 200 | + free_mblocks += blck_cnt; |
| 201 | + assert(free_mblocks <= total_mblocks); |
| 202 | + |
| 203 | + // TODO : implement intelligent resize logic |
| 204 | + /* |
| 205 | + if (total_mblocks > initial_mblocks |
| 206 | + && free_mblocks > initial_mblocks / 2) { |
| 207 | + bye = pool_head; |
| 208 | + while(total_mblocks > initial_mblocks) { |
| 209 | + bye_helper = pool_head; |
| 210 | + pool_head = pool_head->next; |
| 211 | + free_mblocks--; |
| 212 | + total_mblocks--; |
| 213 | + } |
| 214 | + bye_helper->next = NULL; |
| 215 | + }*/ |
| 216 | + |
| 217 | + //pthread_mutex_unlock(&pool_mutex); |
| 218 | + |
| 219 | + // rest of resize logic |
| 220 | + /*while (bye != NULL) { |
| 221 | + bye_helper = bye; |
| 222 | + bye = bye->next; |
| 223 | + free(bye_helper); |
| 224 | + }*/ |
| 225 | +} |
| 226 | +bool eblk_prepare(eblock_result_t *result, uint32_t elem_count) { |
| 227 | + assert(elem_count > 0); |
| 228 | + uint32_t blkcnt = ((elem_count - 1) / EITEMS_PER_BLOCK) + 1; |
| 229 | + if (!mblock_list_alloc(blkcnt, &result->head_blk, &result->last_blk)) { |
| 230 | + result->elem_cnt = 0; |
| 231 | + return false; |
| 232 | + } |
| 233 | + result->tail_blk = NULL; |
| 234 | + result->blck_cnt = blkcnt; |
| 235 | + return true; |
| 236 | +} |
| 237 | +void eblk_truncate(eblock_result_t *result) { |
| 238 | + assert(result->last_blk->next == NULL); |
| 239 | + /* returns empty blocklist */ |
| 240 | + if (result->tail_blk != NULL) { |
| 241 | + if (result->tail_blk != result->last_blk) { |
| 242 | + mem_block_t *free_head = result->tail_blk->next; |
| 243 | + mem_block_t *free_tail = result->last_blk; |
| 244 | + uint32_t used_nblks = ((result->elem_cnt - 1) / EITEMS_PER_BLOCK) + 1; |
| 245 | + uint32_t free_nblks = result->blck_cnt - used_nblks; |
| 246 | + |
| 247 | + mblock_list_free(free_nblks, free_head, free_tail); |
| 248 | + result->tail_blk->next = NULL; |
| 249 | + result->last_blk = result->tail_blk; |
| 250 | + result->blck_cnt -= free_nblks; |
| 251 | + } |
| 252 | + } else { /* ENGINE_ELEM_ENOENT case */ |
| 253 | + mblock_list_free(result->blck_cnt, result->head_blk, result->last_blk); |
| 254 | + result->head_blk = result->tail_blk = result->last_blk = NULL; |
| 255 | + result->elem_cnt = result->blck_cnt = 0; |
| 256 | + } |
| 257 | +} |
| 258 | + |
| 259 | +void eblk_add_elem(eblock_result_t *result, eitem *elem) { |
| 260 | + if (result->tail_blk == NULL) { |
| 261 | + result->tail_blk = result->head_blk; |
| 262 | + result->elem_cnt = 0; |
| 263 | + } else { |
| 264 | + assert(result->elem_cnt > 0); |
| 265 | + if (result->elem_cnt % EITEMS_PER_BLOCK == 0) |
| 266 | + result->tail_blk = result->tail_blk->next; |
| 267 | + } |
| 268 | + |
| 269 | + result->tail_blk->items[result->elem_cnt++ % EITEMS_PER_BLOCK] = (eitem *)elem; |
| 270 | +} |
0 commit comments