From d886bfff9bd93233ea0dde7bbdbc0c832562ca36 Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Sat, 27 Jan 2024 00:35:52 +0000 Subject: [PATCH 01/11] Include llama2.c as a submodule and just add header file to example instead of .c file --- .gitmodules | 3 + cpp/third-party/llama2.c | 1 + examples/cpp/babyllama/CMakeLists.txt | 7 +- .../cpp/babyllama/src/baby_llama_handler.cc | 4 +- examples/cpp/babyllama/src/llama2.c/llama2.h | 113 +++ examples/cpp/babyllama/src/llama2.c/run.c | 863 ------------------ 6 files changed, 124 insertions(+), 867 deletions(-) create mode 160000 cpp/third-party/llama2.c create mode 100644 examples/cpp/babyllama/src/llama2.c/llama2.h delete mode 100644 examples/cpp/babyllama/src/llama2.c/run.c diff --git a/.gitmodules b/.gitmodules index 5ff9ad429d..a2584b165a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,6 @@ [submodule "cpp/third-party/llama.cpp"] path = cpp/third-party/llama.cpp url = https://github.com/ggerganov/llama.cpp.git +[submodule "cpp/third-party/llama2.c"] + path = cpp/third-party/llama2.c + url = https://github.com/karpathy/llama2.c diff --git a/cpp/third-party/llama2.c b/cpp/third-party/llama2.c new file mode 160000 index 0000000000..d9862069e7 --- /dev/null +++ b/cpp/third-party/llama2.c @@ -0,0 +1 @@ +Subproject commit d9862069e7ef665fe6309e3c17398ded2f121bf5 diff --git a/examples/cpp/babyllama/CMakeLists.txt b/examples/cpp/babyllama/CMakeLists.txt index 4da9bbf60d..696fa5f14a 100644 --- a/examples/cpp/babyllama/CMakeLists.txt +++ b/examples/cpp/babyllama/CMakeLists.txt @@ -1,5 +1,6 @@ -add_library(babyllama_handler SHARED src/baby_llama_handler.cc) +add_library(llama2_c STATIC ../../../cpp/third-party/llama2.c/run.c) +target_compile_options(llama2_c PRIVATE -Wall -Wextra -Ofast -fPIC) -target_link_libraries(babyllama_handler PRIVATE ts_backends_core ts_utils ${TORCH_LIBRARIES}) -target_compile_options(babyllama_handler PRIVATE -Wall -Wextra -Ofast) +add_library(babyllama_handler SHARED src/baby_llama_handler.cc) +target_link_libraries(babyllama_handler PRIVATE llama2_c ts_backends_core ts_utils ${TORCH_LIBRARIES}) diff --git a/examples/cpp/babyllama/src/baby_llama_handler.cc b/examples/cpp/babyllama/src/baby_llama_handler.cc index 0d3b2b5491..df62dc864e 100644 --- a/examples/cpp/babyllama/src/baby_llama_handler.cc +++ b/examples/cpp/babyllama/src/baby_llama_handler.cc @@ -5,7 +5,9 @@ #include -#include "llama2.c/run.c" +extern "C" { + #include "llama2.c/llama2.h" +} namespace llm { diff --git a/examples/cpp/babyllama/src/llama2.c/llama2.h b/examples/cpp/babyllama/src/llama2.c/llama2.h new file mode 100644 index 0000000000..2a267aa7fd --- /dev/null +++ b/examples/cpp/babyllama/src/llama2.c/llama2.h @@ -0,0 +1,113 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +// ---------------------------------------------------------------------------- +// Transformer model + +typedef struct { + int dim; // transformer dimension + int hidden_dim; // for ffn layers + int n_layers; // number of layers + int n_heads; // number of query heads + int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery) + int vocab_size; // vocabulary size, usually 256 (byte-level) + int seq_len; // max sequence length +} Config; + +typedef struct { + // token embedding table + float* token_embedding_table; // (vocab_size, dim) + // weights for rmsnorms + float* rms_att_weight; // (layer, dim) rmsnorm weights + float* rms_ffn_weight; // (layer, dim) + // weights for matmuls. note dim == n_heads * head_size + float* wq; // (layer, dim, n_heads * head_size) + float* wk; // (layer, dim, n_kv_heads * head_size) + float* wv; // (layer, dim, n_kv_heads * head_size) + float* wo; // (layer, n_heads * head_size, dim) + // weights for ffn + float* w1; // (layer, hidden_dim, dim) + float* w2; // (layer, dim, hidden_dim) + float* w3; // (layer, hidden_dim, dim) + // final rmsnorm + float* rms_final_weight; // (dim,) + // (optional) classifier weights for the logits, on the last layer + float* wcls; +} TransformerWeights; + +typedef struct { + // current wave of activations + float *x; // activation at current time stamp (dim,) + float *xb; // same, but inside a residual branch (dim,) + float *xb2; // an additional buffer just for convenience (dim,) + float *hb; // buffer for hidden dimension in the ffn (hidden_dim,) + float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,) + float *q; // query (dim,) + float *k; // key (dim,) + float *v; // value (dim,) + float *att; // buffer for scores/attention values (n_heads, seq_len) + float *logits; // output logits + // kv cache + float* key_cache; // (layer, seq_len, dim) + float* value_cache; // (layer, seq_len, dim) +} RunState; + +typedef struct { + Config config; // the hyperparameters of the architecture (the blueprint) + TransformerWeights weights; // the weights of the model + RunState state; // buffers for the "wave" of activations in the forward pass + // some more state needed to properly clean up the memory mapping (sigh) + int fd; // file descriptor for memory mapping + float* data; // memory mapped data pointer + ssize_t file_size; // size of the checkpoint file in bytes +} Transformer; +// ---------------------------------------------------------------------------- +// The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens + +typedef struct { + char *str; + int id; +} TokenIndex; + +typedef struct { + char** vocab; + float* vocab_scores; + TokenIndex *sorted_vocab; + int vocab_size; + unsigned int max_token_length; + unsigned char byte_pieces[512]; // stores all single-byte strings +} Tokenizer; + +// ---------------------------------------------------------------------------- +// The Sampler, which takes logits and returns a sampled token +// sampling can be done in a few ways: greedy argmax, sampling, top-p sampling + +typedef struct { + float prob; + int index; +} ProbIndex; // struct used when sorting probabilities during top-p sampling + +typedef struct { + int vocab_size; + ProbIndex* probindex; // buffer used in top-p sampling + float temperature; + float topp; + unsigned long long rng_state; +} Sampler; +void build_transformer(Transformer *t, char* checkpoint_path); +void build_tokenizer(Tokenizer* t, char* tokenizer_path, int vocab_size); +void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed); +void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens); +float* forward(Transformer* transformer, int token, int pos); +int sample(Sampler* sampler, float* logits); +long time_in_ms(); +char* decode(Tokenizer* t, int prev_token, int token); +void free_sampler(Sampler* sampler); +void free_tokenizer(Tokenizer* t); +void free_transformer(Transformer* t); diff --git a/examples/cpp/babyllama/src/llama2.c/run.c b/examples/cpp/babyllama/src/llama2.c/run.c deleted file mode 100644 index cacd1414af..0000000000 --- a/examples/cpp/babyllama/src/llama2.c/run.c +++ /dev/null @@ -1,863 +0,0 @@ -/* Inference for Llama-2 Transformer model in pure C */ - -#include -#include -#include -#include -#include -#include -#include -#if defined _WIN32 - #include "win.h" -#else - #include - #include -#endif -// ---------------------------------------------------------------------------- -// Transformer model - -typedef struct { - int dim; // transformer dimension - int hidden_dim; // for ffn layers - int n_layers; // number of layers - int n_heads; // number of query heads - int n_kv_heads; // number of key/value heads (can be < query heads because of multiquery) - int vocab_size; // vocabulary size, usually 256 (byte-level) - int seq_len; // max sequence length -} Config; - -typedef struct { - // token embedding table - float* token_embedding_table; // (vocab_size, dim) - // weights for rmsnorms - float* rms_att_weight; // (layer, dim) rmsnorm weights - float* rms_ffn_weight; // (layer, dim) - // weights for matmuls. note dim == n_heads * head_size - float* wq; // (layer, dim, n_heads * head_size) - float* wk; // (layer, dim, n_kv_heads * head_size) - float* wv; // (layer, dim, n_kv_heads * head_size) - float* wo; // (layer, n_heads * head_size, dim) - // weights for ffn - float* w1; // (layer, hidden_dim, dim) - float* w2; // (layer, dim, hidden_dim) - float* w3; // (layer, hidden_dim, dim) - // final rmsnorm - float* rms_final_weight; // (dim,) - // (optional) classifier weights for the logits, on the last layer - float* wcls; -} TransformerWeights; - -typedef struct { - // current wave of activations - float *x; // activation at current time stamp (dim,) - float *xb; // same, but inside a residual branch (dim,) - float *xb2; // an additional buffer just for convenience (dim,) - float *hb; // buffer for hidden dimension in the ffn (hidden_dim,) - float *hb2; // buffer for hidden dimension in the ffn (hidden_dim,) - float *q; // query (dim,) - float *k; // key (dim,) - float *v; // value (dim,) - float *att; // buffer for scores/attention values (n_heads, seq_len) - float *logits; // output logits - // kv cache - float* key_cache; // (layer, seq_len, dim) - float* value_cache; // (layer, seq_len, dim) -} RunState; - -typedef struct { - Config config; // the hyperparameters of the architecture (the blueprint) - TransformerWeights weights; // the weights of the model - RunState state; // buffers for the "wave" of activations in the forward pass - // some more state needed to properly clean up the memory mapping (sigh) - int fd; // file descriptor for memory mapping - float* data; // memory mapped data pointer - ssize_t file_size; // size of the checkpoint file in bytes -} Transformer; - -void malloc_run_state(RunState* s, Config* p) { - // we calloc instead of malloc to keep valgrind happy - int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads; - s->x = (float*)calloc(p->dim, sizeof(float)); - s->xb = (float*)calloc(p->dim, sizeof(float)); - s->xb2 = (float*)calloc(p->dim, sizeof(float)); - s->hb = (float*)calloc(p->hidden_dim, sizeof(float)); - s->hb2 = (float*)calloc(p->hidden_dim, sizeof(float)); - s->q = (float*)calloc(p->dim, sizeof(float)); - s->k = (float*)calloc(kv_dim, sizeof(float)); - s->v = (float*)calloc(kv_dim, sizeof(float)); - s->att = (float*)calloc(p->n_heads * p->seq_len, sizeof(float)); - s->logits = (float*)calloc(p->vocab_size, sizeof(float)); - s->key_cache = (float*)calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float)); - s->value_cache = (float*)calloc(p->n_layers * p->seq_len * kv_dim, sizeof(float)); - // ensure all mallocs went fine - if (!s->x || !s->xb || !s->xb2 || !s->hb || !s->hb2 || !s->q - || !s->k || !s->v || !s->att || !s->logits || !s->key_cache - || !s->value_cache) { - fprintf(stderr, "malloc failed!\n"); - exit(EXIT_FAILURE); - } -} - -void free_run_state(RunState* s) { - free(s->x); - free(s->xb); - free(s->xb2); - free(s->hb); - free(s->hb2); - free(s->q); - free(s->k); - free(s->v); - free(s->att); - free(s->logits); - free(s->key_cache); - free(s->value_cache); -} - -void memory_map_weights(TransformerWeights *w, Config* p, float* ptr, int shared_weights) { - int head_size = p->dim / p->n_heads; - w->token_embedding_table = ptr; - ptr += p->vocab_size * p->dim; - w->rms_att_weight = ptr; - ptr += p->n_layers * p->dim; - w->wq = ptr; - ptr += p->n_layers * p->dim * (p->n_heads * head_size); - w->wk = ptr; - ptr += p->n_layers * p->dim * (p->n_kv_heads * head_size); - w->wv = ptr; - ptr += p->n_layers * p->dim * (p->n_kv_heads * head_size); - w->wo = ptr; - ptr += p->n_layers * (p->n_heads * head_size) * p->dim; - w->rms_ffn_weight = ptr; - ptr += p->n_layers * p->dim; - w->w1 = ptr; - ptr += p->n_layers * p->dim * p->hidden_dim; - w->w2 = ptr; - ptr += p->n_layers * p->hidden_dim * p->dim; - w->w3 = ptr; - ptr += p->n_layers * p->dim * p->hidden_dim; - w->rms_final_weight = ptr; - ptr += p->dim; - ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_real (for RoPE) - ptr += p->seq_len * head_size / 2; // skip what used to be freq_cis_imag (for RoPE) - w->wcls = shared_weights ? w->token_embedding_table : ptr; -} - -void read_checkpoint(char* checkpoint, Config* config, TransformerWeights* weights, - int* fd, float** data, ssize_t* file_size) { - FILE *file = fopen(checkpoint, "rb"); - if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); exit(EXIT_FAILURE); } - // read in the config header - if (fread(config, sizeof(Config), 1, file) != 1) { exit(EXIT_FAILURE); } - // negative vocab size is hacky way of signaling unshared weights. bit yikes. - int shared_weights = config->vocab_size > 0 ? 1 : 0; - config->vocab_size = abs(config->vocab_size); - // figure out the file size - fseek(file, 0, SEEK_END); // move file pointer to end of file - *file_size = ftell(file); // get the file size, in bytes - fclose(file); - // memory map the Transformer weights into the data pointer - *fd = open(checkpoint, O_RDONLY); // open in read only mode - if (*fd == -1) { fprintf(stderr, "open failed!\n"); exit(EXIT_FAILURE); } - *data = (float*)mmap(NULL, *file_size, PROT_READ, MAP_PRIVATE, *fd, 0); - if (*data == MAP_FAILED) { fprintf(stderr, "mmap failed!\n"); exit(EXIT_FAILURE); } - float* weights_ptr = *data + sizeof(Config)/sizeof(float); - memory_map_weights(weights, config, weights_ptr, shared_weights); -} - -void build_transformer(Transformer *t, char* checkpoint_path) { - // read in the Config and the Weights from the checkpoint - read_checkpoint(checkpoint_path, &t->config, &t->weights, &t->fd, &t->data, &t->file_size); - // allocate the RunState buffers - malloc_run_state(&t->state, &t->config); -} - -void free_transformer(Transformer* t) { - // close the memory mapping - if (t->data != MAP_FAILED) { munmap(t->data, t->file_size); } - if (t->fd != -1) { close(t->fd); } - // free the RunState buffers - free_run_state(&t->state); -} - -// ---------------------------------------------------------------------------- -// neural net blocks; the dynamics of the Transformer - -void rmsnorm(float* o, float* x, float* weight, int size) { - // calculate sum of squares - float ss = 0.0f; - for (int j = 0; j < size; j++) { - ss += x[j] * x[j]; - } - ss /= size; - ss += 1e-5f; - ss = 1.0f / sqrtf(ss); - // normalize and scale - for (int j = 0; j < size; j++) { - o[j] = weight[j] * (ss * x[j]); - } -} - -void softmax(float* x, int size) { - // find max value (for numerical stability) - float max_val = x[0]; - for (int i = 1; i < size; i++) { - if (x[i] > max_val) { - max_val = x[i]; - } - } - // exp and sum - float sum = 0.0f; - for (int i = 0; i < size; i++) { - x[i] = expf(x[i] - max_val); - sum += x[i]; - } - // normalize - for (int i = 0; i < size; i++) { - x[i] /= sum; - } -} - -void matmul(float* xout, float* x, float* w, int n, int d) { - // W (d,n) @ x (n,) -> xout (d,) - // by far the most amount of time is spent inside this little function - int i; - #pragma omp parallel for private(i) - for (i = 0; i < d; i++) { - float val = 0.0f; - for (int j = 0; j < n; j++) { - val += w[i * n + j] * x[j]; - } - xout[i] = val; - } -} - -float* forward(Transformer* transformer, int token, int pos) { - // a few convenience variables - Config* p = &transformer->config; - TransformerWeights* w = &transformer->weights; - RunState* s = &transformer->state; - float *x = s->x; - int dim = p->dim; - int kv_dim = (p->dim * p->n_kv_heads) / p->n_heads; - int kv_mul = p->n_heads / p->n_kv_heads; // integer multiplier of the kv sharing in multiquery - int hidden_dim = p->hidden_dim; - int head_size = dim / p->n_heads; - - // copy the token embedding into x - float* content_row = w->token_embedding_table + token * dim; - memcpy(x, content_row, dim*sizeof(*x)); - - // forward all the layers - for(int l = 0; l < p->n_layers; l++) { - - // attention rmsnorm - rmsnorm(s->xb, x, w->rms_att_weight + l*dim, dim); - - // qkv matmuls for this position - matmul(s->q, s->xb, w->wq + l*dim*dim, dim, dim); - matmul(s->k, s->xb, w->wk + l*dim*kv_dim, dim, kv_dim); - matmul(s->v, s->xb, w->wv + l*dim*kv_dim, dim, kv_dim); - - // RoPE relative positional encoding: complex-valued rotate q and k in each head - for (int i = 0; i < dim; i+=2) { - int head_dim = i % head_size; - float freq = 1.0f / powf(10000.0f, head_dim / (float)head_size); - float val = pos * freq; - float fcr = cosf(val); - float fci = sinf(val); - int rotn = i < kv_dim ? 2 : 1; // how many vectors? 2 = q & k, 1 = q only - for (int v = 0; v < rotn; v++) { - float* vec = v == 0 ? s->q : s->k; // the vector to rotate (query or key) - float v0 = vec[i]; - float v1 = vec[i+1]; - vec[i] = v0 * fcr - v1 * fci; - vec[i+1] = v0 * fci + v1 * fcr; - } - } - - // save key,value at this time step (pos) to our kv cache - int loff = l * p->seq_len * kv_dim; // kv cache layer offset for convenience - float* key_cache_row = s->key_cache + loff + pos * kv_dim; - float* value_cache_row = s->value_cache + loff + pos * kv_dim; - memcpy(key_cache_row, s->k, kv_dim * sizeof(*key_cache_row)); - memcpy(value_cache_row, s->v, kv_dim * sizeof(*value_cache_row)); - - // multihead attention. iterate over all heads - int h; - #pragma omp parallel for private(h) - for (h = 0; h < p->n_heads; h++) { - // get the query vector for this head - float* q = s->q + h * head_size; - // attention scores for this head - float* att = s->att + h * p->seq_len; - // iterate over all timesteps, including the current one - for (int t = 0; t <= pos; t++) { - // get the key vector for this head and at this timestep - float* k = s->key_cache + loff + t * kv_dim + (h / kv_mul) * head_size; - // calculate the attention score as the dot product of q and k - float score = 0.0f; - for (int i = 0; i < head_size; i++) { - score += q[i] * k[i]; - } - score /= sqrtf(head_size); - // save the score to the attention buffer - att[t] = score; - } - - // softmax the scores to get attention weights, from 0..pos inclusively - softmax(att, pos + 1); - - // weighted sum of the values, store back into xb - float* xb = s->xb + h * head_size; - memset(xb, 0, head_size * sizeof(float)); - for (int t = 0; t <= pos; t++) { - // get the value vector for this head and at this timestep - float* v = s->value_cache + loff + t * kv_dim + (h / kv_mul) * head_size; - // get the attention weight for this timestep - float a = att[t]; - // accumulate the weighted value into xb - for (int i = 0; i < head_size; i++) { - xb[i] += a * v[i]; - } - } - } - - // final matmul to get the output of the attention - matmul(s->xb2, s->xb, w->wo + l*dim*dim, dim, dim); - - // residual connection back into x - for (int i = 0; i < dim; i++) { - x[i] += s->xb2[i]; - } - - // ffn rmsnorm - rmsnorm(s->xb, x, w->rms_ffn_weight + l*dim, dim); - - // Now for FFN in PyTorch we have: self.w2(F.silu(self.w1(x)) * self.w3(x)) - // first calculate self.w1(x) and self.w3(x) - matmul(s->hb, s->xb, w->w1 + l*dim*hidden_dim, dim, hidden_dim); - matmul(s->hb2, s->xb, w->w3 + l*dim*hidden_dim, dim, hidden_dim); - - // SwiGLU non-linearity - for (int i = 0; i < hidden_dim; i++) { - float val = s->hb[i]; - // silu(x)=x*σ(x), where σ(x) is the logistic sigmoid - val *= (1.0f / (1.0f + expf(-val))); - // elementwise multiply with w3(x) - val *= s->hb2[i]; - s->hb[i] = val; - } - - // final matmul to get the output of the ffn - matmul(s->xb, s->hb, w->w2 + l*dim*hidden_dim, hidden_dim, dim); - - // residual connection - for (int i = 0; i < dim; i++) { - x[i] += s->xb[i]; - } - } - - // final rmsnorm - rmsnorm(x, x, w->rms_final_weight, dim); - - // classifier into logits - matmul(s->logits, x, w->wcls, p->dim, p->vocab_size); - return s->logits; -} - -// ---------------------------------------------------------------------------- -// The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens - -typedef struct { - char *str; - int id; -} TokenIndex; - -typedef struct { - char** vocab; - float* vocab_scores; - TokenIndex *sorted_vocab; - int vocab_size; - unsigned int max_token_length; - unsigned char byte_pieces[512]; // stores all single-byte strings -} Tokenizer; - -int compare_tokens(const void *a, const void *b) { - return strcmp(((TokenIndex*)a)->str, ((TokenIndex*)b)->str); -} - -void build_tokenizer(Tokenizer* t, char* tokenizer_path, int vocab_size) { - // i should have written the vocab_size into the tokenizer file... sigh - t->vocab_size = vocab_size; - // malloc space to hold the scores and the strings - t->vocab = (char**)malloc(vocab_size * sizeof(char*)); - t->vocab_scores = (float*)malloc(vocab_size * sizeof(float)); - t->sorted_vocab = NULL; // initialized lazily - for (int i = 0; i < 256; i++) { - t->byte_pieces[i * 2] = (unsigned char)i; - t->byte_pieces[i * 2 + 1] = '\0'; - } - // read in the file - FILE *file = fopen(tokenizer_path, "rb"); - if (!file) { fprintf(stderr, "couldn't load %s\n", tokenizer_path); exit(EXIT_FAILURE); } - if (fread(&t->max_token_length, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); } - int len; - for (int i = 0; i < vocab_size; i++) { - if (fread(t->vocab_scores + i, sizeof(float), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE);} - if (fread(&len, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); } - t->vocab[i] = (char *)malloc(len + 1); - if (fread(t->vocab[i], len, 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); } - t->vocab[i][len] = '\0'; // add the string terminating token - } - fclose(file); -} - -void free_tokenizer(Tokenizer* t) { - for (int i = 0; i < t->vocab_size; i++) { free(t->vocab[i]); } - free(t->vocab); - free(t->vocab_scores); - free(t->sorted_vocab); -} - -char* decode(Tokenizer* t, int prev_token, int token) { - char *piece = t->vocab[token]; - // following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89) - if (prev_token == 1 && piece[0] == ' ') { piece++; } - // careful, some tokens designate raw bytes, and look like e.g. '<0x01>' - // parse this and convert and return the actual byte - unsigned char byte_val; - if (sscanf(piece, "<0x%02hhX>", &byte_val) == 1) { - piece = (char*)t->byte_pieces + byte_val * 2; - } - return piece; -} - -void safe_printf(char *piece) { - // piece might be a raw byte token, and we only want to print printable chars or whitespace - // because some of the other bytes can be various control codes, backspace, etc. - if (piece == NULL) { return; } - if (piece[0] == '\0') { return; } - if (piece[1] == '\0') { - unsigned char byte_val = piece[0]; - if (!(isprint(byte_val) || isspace(byte_val))) { - return; // bad byte, don't print it - } - } - printf("%s", piece); -} - -int str_lookup(char *str, TokenIndex *sorted_vocab, int vocab_size) { - // efficiently find the perfect match for str in vocab, return its index or -1 if not found - TokenIndex tok = { .str = str }; // acts as the key to search for - TokenIndex *res = (TokenIndex*)bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens); - return res != NULL ? res->id : -1; -} - -void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) { - // encode the string text (input) into an upper-bound preallocated tokens[] array - // bos != 0 means prepend the BOS token (=1), eos != 0 means append the EOS token (=2) - if (text == NULL) { fprintf(stderr, "cannot encode NULL text\n"); exit(EXIT_FAILURE); } - - if (t->sorted_vocab == NULL) { - // lazily malloc and sort the vocabulary - t->sorted_vocab = (TokenIndex*)malloc(t->vocab_size * sizeof(TokenIndex)); - for (int i = 0; i < t->vocab_size; i++) { - t->sorted_vocab[i].str = t->vocab[i]; - t->sorted_vocab[i].id = i; - } - qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens); - } - - // create a temporary buffer that will store merge candidates of always two consecutive tokens - // *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_length is 1) - char* str_buffer = (char*)malloc((t->max_token_length*2 +1 +2) * sizeof(char)); - size_t str_len = 0; - - // start at 0 tokens - *n_tokens = 0; - - // add optional BOS (=1) token, if desired - if (bos) tokens[(*n_tokens)++] = 1; - - // add_dummy_prefix is true by default - // so prepend a dummy prefix token to the input string, but only if text != "" - // TODO: pretty sure this isn't correct in the general case but I don't have the - // energy to read more of the sentencepiece code to figure out what it's doing - if (text[0] != '\0') { - int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size); - tokens[(*n_tokens)++] = dummy_prefix; - } - - // Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia: - // Code point ↔ UTF-8 conversion - // First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4 - // U+0000 U+007F 0xxxxxxx - // U+0080 U+07FF 110xxxxx 10xxxxxx - // U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx - // U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - - // process the raw (UTF-8) byte sequence of the input string - for (char *c = text; *c != '\0'; c++) { - - // reset buffer if the current byte is ASCII or a leading byte - // 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest - // 0x80 is 10000000 - // in UTF-8, all continuation bytes start with "10" in first two bits - // so in English this is: "if this byte is not a continuation byte" - if ((*c & 0xC0) != 0x80) { - // this byte must be either a leading byte (11...) or an ASCII char (0x...) - // => reset our location, as we're starting a new UTF-8 codepoint - str_len = 0; - } - - // append the current byte to the buffer - str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line - str_buffer[str_len] = '\0'; - - // while the next character is a continuation byte, continue appending - // but if there are too many of them, just stop to avoid overruning str_buffer size. - if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) { - continue; - } - - // ok c+1 is not a continuation byte, so we've read in a full codepoint - int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size); - - if (id != -1) { - // we found this codepoint in vocab, add it as a token - tokens[(*n_tokens)++] = id; - } else { - // byte_fallback encoding: just encode each byte as a token - // +3 is here because the first 3 vocab elements are , , - // so the individual bytes only start at index 3 - for (int i=0; i < str_len; i++) { - tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3; - } - } - str_len = 0; // protect against a sequence of stray UTF8 continuation bytes - } - - // merge the best consecutive pair each iteration, according the scores in vocab_scores - while (1) { - float best_score = -1e10; - int best_id = -1; - int best_idx = -1; - - for (int i=0; i < (*n_tokens-1); i++) { - // check if we can merge the pair (tokens[i], tokens[i+1]) - sprintf(str_buffer, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]); - int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size); - if (id != -1 && t->vocab_scores[id] > best_score) { - // this merge pair exists in vocab! record its score and position - best_score = t->vocab_scores[id]; - best_id = id; - best_idx = i; - } - } - - if (best_idx == -1) { - break; // we couldn't find any more pairs to merge, so we're done - } - - // merge the consecutive pair (best_idx, best_idx+1) into new token best_id - tokens[best_idx] = best_id; - // delete token at position best_idx+1, shift the entire sequence back 1 - for (int i = best_idx+1; i < (*n_tokens-1); i++) { - tokens[i] = tokens[i+1]; - } - (*n_tokens)--; // token length decreased - } - - // add optional EOS (=2) token, if desired - if (eos) tokens[(*n_tokens)++] = 2; - - free(str_buffer); -} - -// ---------------------------------------------------------------------------- -// The Sampler, which takes logits and returns a sampled token -// sampling can be done in a few ways: greedy argmax, sampling, top-p sampling - -typedef struct { - float prob; - int index; -} ProbIndex; // struct used when sorting probabilities during top-p sampling - -typedef struct { - int vocab_size; - ProbIndex* probindex; // buffer used in top-p sampling - float temperature; - float topp; - unsigned long long rng_state; -} Sampler; - -int sample_argmax(float* probabilities, int n) { - // return the index that has the highest probability - int max_i = 0; - float max_p = probabilities[0]; - for (int i = 1; i < n; i++) { - if (probabilities[i] > max_p) { - max_i = i; - max_p = probabilities[i]; - } - } - return max_i; -} - -int sample_mult(float* probabilities, int n, float coin) { - // sample index from probabilities (they must sum to 1!) - // coin is a random number in [0, 1), usually from random_f32() - float cdf = 0.0f; - for (int i = 0; i < n; i++) { - cdf += probabilities[i]; - if (coin < cdf) { - return i; - } - } - return n - 1; // in case of rounding errors -} - -int compare(const void* a, const void* b) { - ProbIndex* a_ = (ProbIndex*) a; - ProbIndex* b_ = (ProbIndex*) b; - if (a_->prob > b_->prob) return -1; - if (a_->prob < b_->prob) return 1; - return 0; -} - -int sample_topp(float* probabilities, int n, float topp, ProbIndex* probindex, float coin) { - // top-p sampling (or "nucleus sampling") samples from the smallest set of - // tokens that exceed probability topp. This way we never sample tokens that - // have very low probabilities and are less likely to go "off the rails". - // coin is a random number in [0, 1), usually from random_f32() - - int n0 = 0; - // quicksort indices in descending order of probabilities - // values smaller than (1 - topp) / (n - 1) cannot be part of the result - // so for efficiency we crop these out as candidates before sorting - const float cutoff = (1.0f - topp) / (n - 1); - for (int i = 0; i < n; i++) { - if (probabilities[i] >= cutoff) { - probindex[n0].index = i; - probindex[n0].prob = probabilities[i]; - n0++; - } - } - qsort(probindex, n0, sizeof(ProbIndex), compare); - - // truncate the list where cumulative probability exceeds topp - float cumulative_prob = 0.0f; - int last_idx = n0 - 1; // in case of rounding errors consider all elements - for (int i = 0; i < n0; i++) { - cumulative_prob += probindex[i].prob; - if (cumulative_prob > topp) { - last_idx = i; - break; // we've exceeded topp by including last_idx - } - } - - // sample from the truncated list - float r = coin * cumulative_prob; - float cdf = 0.0f; - for (int i = 0; i <= last_idx; i++) { - cdf += probindex[i].prob; - if (r < cdf) { - return probindex[i].index; - } - } - return probindex[last_idx].index; // in case of rounding errors -} - -void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed) { - sampler->vocab_size = vocab_size; - sampler->temperature = temperature; - sampler->topp = topp; - sampler->rng_state = rng_seed; - // buffer only used with nucleus sampling; may not need but it's ~small - sampler->probindex = (ProbIndex*)malloc(sampler->vocab_size * sizeof(ProbIndex)); -} - -void free_sampler(Sampler* sampler) { - free(sampler->probindex); -} - -unsigned int random_u32(unsigned long long *state) { - // xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A - *state ^= *state >> 12; - *state ^= *state << 25; - *state ^= *state >> 27; - return (*state * 0x2545F4914F6CDD1Dull) >> 32; -} -float random_f32(unsigned long long *state) { // random float32 in [0,1) - return (random_u32(state) >> 8) / 16777216.0f; -} - -int sample(Sampler* sampler, float* logits) { - // sample the token given the logits and some hyperparameters - int next; - if (sampler->temperature == 0.0f) { - // greedy argmax sampling: take the token with the highest probability - next = sample_argmax(logits, sampler->vocab_size); - } else { - // apply the temperature to the logits - for (int q=0; qvocab_size; q++) { logits[q] /= sampler->temperature; } - // apply softmax to the logits to get the probabilities for next token - softmax(logits, sampler->vocab_size); - // flip a (float) coin (this is our source of entropy for sampling) - float coin = random_f32(&sampler->rng_state); - // we sample from this distribution to get the next token - if (sampler->topp <= 0 || sampler->topp >= 1) { - // simply sample from the predicted probability distribution - next = sample_mult(logits, sampler->vocab_size, coin); - } else { - // top-p (nucleus) sampling, clamping the least likely tokens to zero - next = sample_topp(logits, sampler->vocab_size, sampler->topp, sampler->probindex, coin); - } - } - return next; -} - -// ---------------------------------------------------------------------------- -// utilities: time - -long time_in_ms() { - // return time in milliseconds, for benchmarking the model speed - struct timespec time; - clock_gettime(CLOCK_REALTIME, &time); - return time.tv_sec * 1000 + time.tv_nsec / 1000000; -} - -// ---------------------------------------------------------------------------- -// generation loop - -void generate(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, char *prompt, int steps) { - - // encode the (string) prompt into tokens sequence - int num_prompt_tokens = 0; - int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int)); // +3 for '\0', ?BOS, ?EOS - encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens); - if (num_prompt_tokens < 1) { - fprintf(stderr, "something is wrong, expected at least 1 prompt token\n"); - exit(EXIT_FAILURE); - } - - // start the main loop - long start = 0; // used to time our code, only initialized after first iteration - int next; // will store the next token in the sequence - int token = prompt_tokens[0]; // kick off with the first token in the prompt - int pos = 0; // position in the sequence - while (pos < steps) { - - // forward the transformer to get logits for the next token - float* logits = forward(transformer, token, pos); - - // advance the state state machine - if (pos < num_prompt_tokens - 1) { - // if we are still processing the input prompt, force the next prompt token - next = prompt_tokens[pos + 1]; - } else { - // otherwise sample the next token from the logits - next = sample(sampler, logits); - } - pos++; - - // data-dependent terminating condition: the BOS (=1) token delimits sequences - if (next == 1) { break; } - - // print the token as string, decode it with the Tokenizer object - char* piece = decode(tokenizer, token, next); - safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes - fflush(stdout); - token = next; - - // init the timer here because the first iteration can be slower - if (start == 0) { start = time_in_ms(); } - } - printf("\n"); - - // report achieved tok/s (pos-1 because the timer starts after first iteration) - if (pos > 1) { - long end = time_in_ms(); - fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000); - } - - free(prompt_tokens); -} - - -// ---------------------------------------------------------------------------- -// CLI, include only if not testing -#ifndef TESTING - -void error_usage() { - fprintf(stderr, "Usage: run [options]\n"); - fprintf(stderr, "Example: run model.bin -n 256 -i \"Once upon a time\"\n"); - fprintf(stderr, "Options:\n"); - fprintf(stderr, " -t temperature in [0,inf], default 1.0\n"); - fprintf(stderr, " -p p value in top-p (nucleus) sampling in [0,1] default 0.9\n"); - fprintf(stderr, " -s random seed, default time(NULL)\n"); - fprintf(stderr, " -n number of steps to run for, default 256. 0 = max_seq_len\n"); - fprintf(stderr, " -i input prompt\n"); - fprintf(stderr, " -z optional path to custom tokenizer\n"); - exit(EXIT_FAILURE); -} - - -int main(int argc, char *argv[]) { - - // default parameters - char *checkpoint_path = NULL; // e.g. out/model.bin - char *tokenizer_path = "tokenizer.bin"; - float temperature = 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher - float topp = 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, but slower - int steps = 256; // number of steps to run for - char *prompt = ""; // prompt string - unsigned long long rng_seed = 0; // seed rng with time by default - - // poor man's C argparse so we can override the defaults above from the command line - if (argc >= 2) { checkpoint_path = argv[1]; } else { error_usage(); } - for (int i = 2; i < argc; i+=2) { - // do some basic validation - if (i + 1 >= argc) { error_usage(); } // must have arg after flag - if (argv[i][0] != '-') { error_usage(); } // must start with dash - if (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter) - // read in the args - if (argv[i][1] == 't') { temperature = atof(argv[i + 1]); } - else if (argv[i][1] == 'p') { topp = atof(argv[i + 1]); } - else if (argv[i][1] == 's') { rng_seed = atoi(argv[i + 1]); } - else if (argv[i][1] == 'n') { steps = atoi(argv[i + 1]); } - else if (argv[i][1] == 'i') { prompt = argv[i + 1]; } - else if (argv[i][1] == 'z') { tokenizer_path = argv[i + 1]; } - else { error_usage(); } - } - - // parameter validation/overrides - if (rng_seed <= 0) rng_seed = (unsigned int)time(NULL); - if (temperature < 0.0) temperature = 0.0; - if (topp < 0.0 || 1.0 < topp) topp = 0.9; - if (steps < 0) steps = 0; - - // build the Transformer via the model .bin file - Transformer transformer; - build_transformer(&transformer, checkpoint_path); - if (steps == 0 || steps > transformer.config.seq_len) steps = transformer.config.seq_len; // ovrerride to ~max length - - // build the Tokenizer via the tokenizer .bin file - Tokenizer tokenizer; - build_tokenizer(&tokenizer, tokenizer_path, transformer.config.vocab_size); - - // build the Sampler - Sampler sampler; - build_sampler(&sampler, transformer.config.vocab_size, temperature, topp, rng_seed); - - // run! - generate(&transformer, &tokenizer, &sampler, prompt, steps); - - // memory and file handles cleanup - free_sampler(&sampler); - free_tokenizer(&tokenizer); - free_transformer(&transformer); - return 0; -} - -#endif From ac96344db4371fd319980cd95f7169a382f0474d Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Tue, 30 Jan 2024 05:54:45 +0000 Subject: [PATCH 02/11] Add AOTInductor example --- .gitmodules | 3 + cpp/build.sh | 47 ++- cpp/src/examples/CMakeLists.txt | 2 + cpp/test/examples/examples_test.cc | 20 ++ .../llama_handler/MAR-INF/MANIFEST.json | 10 + .../aot_inductor/llama_handler/config.json | 4 + .../examples/aot_inductor/prompt.txt | 1 + cpp/third-party/llama2.so | 1 + examples/cpp/aot_inductor/CMakeLists.txt | 5 + examples/cpp/aot_inductor/README.md | 87 +++++ examples/cpp/aot_inductor/compile.py | 43 +++ examples/cpp/aot_inductor/config.json | 4 + examples/cpp/aot_inductor/prompt1.txt | 1 + examples/cpp/aot_inductor/prompt2.txt | 1 + .../cpp/aot_inductor/src/llama2.so/LICENSE | 21 ++ .../cpp/aot_inductor/src/llama2.so/llama2.hh | 72 +++++ .../cpp/aot_inductor/src/llama_handler.cc | 302 ++++++++++++++++++ .../cpp/aot_inductor/src/llama_handler.hh | 41 +++ 18 files changed, 652 insertions(+), 13 deletions(-) create mode 100644 cpp/test/resources/examples/aot_inductor/llama_handler/MAR-INF/MANIFEST.json create mode 100644 cpp/test/resources/examples/aot_inductor/llama_handler/config.json create mode 100644 cpp/test/resources/examples/aot_inductor/prompt.txt create mode 160000 cpp/third-party/llama2.so create mode 100644 examples/cpp/aot_inductor/CMakeLists.txt create mode 100644 examples/cpp/aot_inductor/README.md create mode 100644 examples/cpp/aot_inductor/compile.py create mode 100644 examples/cpp/aot_inductor/config.json create mode 100644 examples/cpp/aot_inductor/prompt1.txt create mode 100644 examples/cpp/aot_inductor/prompt2.txt create mode 100644 examples/cpp/aot_inductor/src/llama2.so/LICENSE create mode 100644 examples/cpp/aot_inductor/src/llama2.so/llama2.hh create mode 100644 examples/cpp/aot_inductor/src/llama_handler.cc create mode 100644 examples/cpp/aot_inductor/src/llama_handler.hh diff --git a/.gitmodules b/.gitmodules index a2584b165a..4a00ef021e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -7,3 +7,6 @@ [submodule "cpp/third-party/llama2.c"] path = cpp/third-party/llama2.c url = https://github.com/karpathy/llama2.c +[submodule "cpp/third-party/llama2.so"] + path = cpp/third-party/llama2.so + url = https://github.com/bertmaher/llama2.so.git diff --git a/cpp/build.sh b/cpp/build.sh index 165cf17cbb..0ad5e9c5d5 100755 --- a/cpp/build.sh +++ b/cpp/build.sh @@ -80,19 +80,17 @@ function install_libtorch() { cd "$DEPS_DIR" || exit if [ "$PLATFORM" = "Linux" ]; then echo -e "${COLOR_GREEN}[ INFO ] Install libtorch on Linux ${COLOR_OFF}" - if [ "$CUDA" = "cu118" ]; then - wget https://download.pytorch.org/libtorch/cu118/libtorch-cxx11-abi-shared-with-deps-2.1.1%2Bcu118.zip - unzip libtorch-cxx11-abi-shared-with-deps-2.1.1+cu118.zip - rm libtorch-cxx11-abi-shared-with-deps-2.1.1+cu118.zip - elif [ "$CUDA" = "cu121" ]; then - wget https://download.pytorch.org/libtorch/cu121/libtorch-cxx11-abi-shared-with-deps-2.1.1%2Bcu121.zip - unzip libtorch-cxx11-abi-shared-with-deps-2.1.1+cu121.zip - rm libtorch-cxx11-abi-shared-with-deps-2.1.1+cu121.zip + if [ "$USE_NIGHTLIES" == true ] ; then + URL=https://download.pytorch.org/libtorch/nightly/${CUDA}/libtorch-cxx11-abi-shared-with-deps-latest.zip else - wget https://download.pytorch.org/libtorch/cpu/libtorch-cxx11-abi-shared-with-deps-2.1.1%2Bcpu.zip - unzip libtorch-cxx11-abi-shared-with-deps-2.1.1+cpu.zip - rm libtorch-cxx11-abi-shared-with-deps-2.1.1+cpu.zip + URL=https://download.pytorch.org/libtorch/${CUDA}/libtorch-cxx11-abi-shared-with-deps-2.1.1%2B${CUDA}.zip fi + wget $URL + ZIP_FILE=$(basename "$URL") + ZIP_FILE="${ZIP_FILE//%2B/+}" + unzip $ZIP_FILE + rm $ZIP_FILE + elif [ "$PLATFORM" = "Windows" ]; then echo -e "${COLOR_GREEN}[ INFO ] Install libtorch on Windows ${COLOR_OFF}" # TODO: Windows @@ -144,6 +142,25 @@ function build_llama_cpp() { cd "$BWD" || exit } +function prepare_test_files() { + local R_DIR="${BASE_DIR}/test/resources/examples/" + if [ ! -f "${R_DIR}/babyllama/babyllama_handler/tokenizer.bin" ]; then + wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.bin -O "${R_DIR}/babyllama/babyllama_handler/tokenizer.bin" + fi + if [ ! -f "${R_DIR}/babyllama/babyllama_handler/stories15M.bin" ]; then + wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin -O "${R_DIR}/babyllama/babyllama_handler/stories15M.bin" + fi + if [ ! -f "${R_DIR}/aot_inductor/llama_handler/stories15M.so" ] && [ "$USE_NIGHTLIES" == true ]; then + local L_DIR=${R_DIR}/aot_inductor/llama_handler/ + if [ ! -f "${L_DIR}/stories15M.pt" ]; then + wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt?download=true -O "${L_DIR}/stories15M.pt" + fi + local LLAMA_SO_DIR=${BASE_DIR}/third-party/llama2_so/ + # touch ${LLAMA_SO_DIR}/llama2_so/__init__.py + PYTHONPATH=${LLAMA_SO_DIR}:${PYTHONPATH} python ${BASE_DIR}/../examples/cpp/aot_inductor/compile.py --checkpoint ${L_DIR}/stories15M.pt ${L_DIR}/stories15M.so + fi +} + function build() { MAYBE_BUILD_QUIC="" if [ "$WITH_QUIC" == true ] ; then @@ -255,8 +272,8 @@ WITH_QUIC=false INSTALL_DEPENDENCIES=false PREFIX="" COMPILER_FLAGS="" -CUDA="" -USAGE="./build.sh [-j num_jobs] [-g cu118|cu121] [-q|--with-quic] [-p|--prefix] [-x|--compiler-flags]" +CUDA="cpu" +USAGE="./build.sh [-j num_jobs] [-g cu118|cu121] [-q|--with-quic] [-t|--no-tets] [-p|--prefix] [-x|--compiler-flags] [-n|--nighlies]" while [ "$1" != "" ]; do case $1 in -j | --jobs ) shift @@ -279,6 +296,9 @@ while [ "$1" != "" ]; do shift COMPILER_FLAGS=$1 ;; + -n | --nightlies ) + USE_NIGHTLIES=true + ;; * ) echo $USAGE exit 1 esac @@ -316,6 +336,7 @@ install_kineto install_libtorch install_yaml_cpp build_llama_cpp +prepare_test_files build symlink_torch_libs symlink_yaml_cpp_lib diff --git a/cpp/src/examples/CMakeLists.txt b/cpp/src/examples/CMakeLists.txt index a313616270..c7308cdcd6 100644 --- a/cpp/src/examples/CMakeLists.txt +++ b/cpp/src/examples/CMakeLists.txt @@ -1,6 +1,8 @@ add_subdirectory("../../../examples/cpp/babyllama/" "../../../test/resources/examples/babyllama/babyllama_handler/") +add_subdirectory("../../../examples/cpp/aot_inductor/" "../../../test/resources/examples/aot_inductor/llama_handler/") + add_subdirectory("../../../examples/cpp/llamacpp/" "../../../test/resources/examples/llamacpp/llamacpp_handler/") add_subdirectory("../../../examples/cpp/mnist/" "../../../test/resources/examples/mnist/mnist_handler/") diff --git a/cpp/test/examples/examples_test.cc b/cpp/test/examples/examples_test.cc index 22254288cc..bb3c4d998c 100644 --- a/cpp/test/examples/examples_test.cc +++ b/cpp/test/examples/examples_test.cc @@ -21,6 +21,26 @@ TEST_F(ModelPredictTest, TestLoadPredictBabyLlamaHandler) { base_dir + "babyllama_handler", base_dir + "prompt.txt", "llm_ts", 200); } +TEST_F(ModelPredictTest, TestLoadPredictAotInductorLlamaHandler) { + std::string base_dir = "test/resources/examples/aot_inductor/"; + std::string file1 = base_dir + "llama_handler/stories15M.so"; + std::string file2 = + "test/resources/examples/babyllama/babyllama_handler/tokenizer.bin"; + + std::ifstream f1(file1); + std::ifstream f2(file2); + + if (!f1.good() && !f2.good()) + GTEST_SKIP() << "Skipping TestLoadPredictAotInductorLlamaHandler because " + "of missing files: " + << file1 << " or " << file2; + + this->LoadPredict( + std::make_shared( + base_dir + "llama_handler", "llama", -1, "", "", 1, false), + base_dir + "llama_handler", base_dir + "prompt.txt", "llm_ts", 200); +} + TEST_F(ModelPredictTest, TestLoadPredictLlmHandler) { std::string base_dir = "test/resources/examples/llamacpp/"; std::string file1 = base_dir + "llamacpp_handler/llama-2-7b-chat.Q5_0.gguf"; diff --git a/cpp/test/resources/examples/aot_inductor/llama_handler/MAR-INF/MANIFEST.json b/cpp/test/resources/examples/aot_inductor/llama_handler/MAR-INF/MANIFEST.json new file mode 100644 index 0000000000..6f0f2d5295 --- /dev/null +++ b/cpp/test/resources/examples/aot_inductor/llama_handler/MAR-INF/MANIFEST.json @@ -0,0 +1,10 @@ +{ + "createdOn": "28/07/2020 06:32:08", + "runtime": "LSP", + "model": { + "modelName": "llama", + "handler": "libllama_so_handler:LlamaHandler", + "modelVersion": "2.0" + }, + "archiverVersion": "0.2.0" +} diff --git a/cpp/test/resources/examples/aot_inductor/llama_handler/config.json b/cpp/test/resources/examples/aot_inductor/llama_handler/config.json new file mode 100644 index 0000000000..a55d1bf7dc --- /dev/null +++ b/cpp/test/resources/examples/aot_inductor/llama_handler/config.json @@ -0,0 +1,4 @@ +{ +"checkpoint_path" : "test/resources/examples/aot_inductor/llama_handler/stories15M.so", +"tokenizer_path" : "test/resources/examples/babyllama/babyllama_handler/tokenizer.bin" +} diff --git a/cpp/test/resources/examples/aot_inductor/prompt.txt b/cpp/test/resources/examples/aot_inductor/prompt.txt new file mode 100644 index 0000000000..74b56be151 --- /dev/null +++ b/cpp/test/resources/examples/aot_inductor/prompt.txt @@ -0,0 +1 @@ +Hello my name is diff --git a/cpp/third-party/llama2.so b/cpp/third-party/llama2.so new file mode 160000 index 0000000000..d07f694bb4 --- /dev/null +++ b/cpp/third-party/llama2.so @@ -0,0 +1 @@ +Subproject commit d07f694bb43cf990cdc08edbf1643113d76dc80e diff --git a/examples/cpp/aot_inductor/CMakeLists.txt b/examples/cpp/aot_inductor/CMakeLists.txt new file mode 100644 index 0000000000..5d50a0710a --- /dev/null +++ b/examples/cpp/aot_inductor/CMakeLists.txt @@ -0,0 +1,5 @@ +add_library(llama2_so STATIC ../../../cpp/third-party/llama2.so/run.cpp) +target_compile_options(llama2_so PRIVATE -Wall -Wextra -Ofast -fpermissive) + +add_library(llama_so_handler SHARED src/llama_handler.cc) +target_link_libraries(llama_so_handler PRIVATE llama2_so ts_backends_core ts_utils ${TORCH_LIBRARIES}) diff --git a/examples/cpp/aot_inductor/README.md b/examples/cpp/aot_inductor/README.md new file mode 100644 index 0000000000..cba4df5cd5 --- /dev/null +++ b/examples/cpp/aot_inductor/README.md @@ -0,0 +1,87 @@ +This example is adapted from https://github.com/karpathy/llama2.c. The handler C++ source code for this examples can be found [here](../../../cpp/src/examples/babyllama/). + +### Setup +1. Follow the instructions in [README.md](../../../cpp/README.md) to build the TorchServe C++ backend. + +``` +cd serve/cpp +./builld.sh +``` + +2. Download the model and tokenizer using the following command + +```bash +cd ~/serve/examples/cpp/babyllama +wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin +wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.bin +``` + +4. Create a [config.json](config.json) with the path of the downloaded model and tokenizer: + +```bash +echo '{ +"checkpoint_path" : "/home/ubuntu/serve/examples/cpp/babyllama/stories15M.bin", +"tokenizer_path" : "/home/ubuntu/serve/examples/cpp/babyllama/tokenizer.bin" +}' > config.json +``` + +5. Copy handle .so file + +While building the C++ backend the `libbabyllama_handler.so` file is generated in the [babyllama_handler](../../../cpp/test/resources/examples/babyllama/babyllama_handler) folder. + +```bash +cp ../../../cpp/test/resources/examples/babyllama/babyllama_handler/libbabyllama_handler.so ./ +``` + +### Generate MAR file + +Now lets generate the mar file + +```bash +torch-model-archiver --model-name llm --version 1.0 --handler libbabyllama_handler:BabyLlamaHandler --runtime LSP --extra-files config.json +``` + +Create model store directory and move the mar file + +``` +mkdir model_store +mv llm.mar model_store/ +``` + +### Inference + +Start torchserve using the following command + +``` +torchserve --ncs --model-store model_store/ +``` + +Register the model using the following command + +``` +curl -v -X POST "http://localhost:8081/models?initial_workers=1&url=llm.mar&batch_size=2&max_batch_delay=5000" +``` + +Infer the model using the following command + +``` +curl http://localhost:8080/predictions/llm -T prompt1.txt +``` + +This example supports batching. To run batch prediction, run the following command + +``` +curl http://localhost:8080/predictions/llm -T prompt1.txt & curl http://localhost:8080/predictions/llm -T prompt2.txt & +``` + +Sample Response + +``` +Hello my name is Daisy. Daisy is three years old. She loves to play with her toys. +One day, Daisy's mommy said, "Daisy, it's time to go to the store." Daisy was so excited! She ran to the store with her mommy. +At the store, Daisy saw a big, red balloon. She wanted it so badly! She asked her mommy, "Can I have the balloon, please?" +Mommy said, "No, Daisy. We don't have enough money for that balloon." +Daisy was sad. She wanted the balloon so much. She started to cry. +Mommy said, "Daisy, don't cry. We can get the balloon. We can buy it and take it home." +Daisy smiled. She was so happy. She hugged her mommy and said, "Thank you, mommy!" +``` diff --git a/examples/cpp/aot_inductor/compile.py b/examples/cpp/aot_inductor/compile.py new file mode 100644 index 0000000000..aff0804b3f --- /dev/null +++ b/examples/cpp/aot_inductor/compile.py @@ -0,0 +1,43 @@ +import argparse + +import torch +import torch._export +from model import ModelArgs, Transformer + + +def load_checkpoint(checkpoint): + # load the provided model checkpoint + checkpoint_dict = torch.load(checkpoint, map_location="cpu") + gptconf = ModelArgs(**checkpoint_dict["model_args"]) + model = Transformer(gptconf) + state_dict = checkpoint_dict["model"] + unwanted_prefix = "_orig_mod." + for k, v in list(state_dict.items()): + if k.startswith(unwanted_prefix): + state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k) + model.load_state_dict(state_dict, strict=False) + model.eval() + return model, gptconf + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "filepath", type=str, default="llama2.so", help="the output filepath" + ) + parser.add_argument("--checkpoint", type=str, help="checkpoint .pt") + args = parser.parse_args() + model, config = load_checkpoint(args.checkpoint) + x = torch.randint(0, config.vocab_size, (1, config.max_seq_len // 2)) + constraints = [ + torch._export.dynamic_dim(x, 1), + torch._export.dynamic_dim(x, 1) <= config.max_seq_len, + torch._export.dynamic_dim(x, 1) >= 1, + ] + torch._C._GLIBCXX_USE_CXX11_ABI = True + so_path = torch._export.aot_compile( + model, + (x,), + constraints=constraints, + options={"aot_inductor.output_path": args.filepath}, + ) diff --git a/examples/cpp/aot_inductor/config.json b/examples/cpp/aot_inductor/config.json new file mode 100644 index 0000000000..d2a11f95b9 --- /dev/null +++ b/examples/cpp/aot_inductor/config.json @@ -0,0 +1,4 @@ +{ +"checkpoint_path" : "/home/ubuntu/serve/examples/cpp/babyllama/stories15M.bin", +"tokenizer_path" : "/home/ubuntu/serve/cpp/test/resources/examples/babyllama/babyllama_handler/tokenizer.bin" +} diff --git a/examples/cpp/aot_inductor/prompt1.txt b/examples/cpp/aot_inductor/prompt1.txt new file mode 100644 index 0000000000..baa5a1abbf --- /dev/null +++ b/examples/cpp/aot_inductor/prompt1.txt @@ -0,0 +1 @@ +Hello my name is Dan diff --git a/examples/cpp/aot_inductor/prompt2.txt b/examples/cpp/aot_inductor/prompt2.txt new file mode 100644 index 0000000000..99568648e9 --- /dev/null +++ b/examples/cpp/aot_inductor/prompt2.txt @@ -0,0 +1 @@ +Hello my name is Daisy diff --git a/examples/cpp/aot_inductor/src/llama2.so/LICENSE b/examples/cpp/aot_inductor/src/llama2.so/LICENSE new file mode 100644 index 0000000000..2ad12227f9 --- /dev/null +++ b/examples/cpp/aot_inductor/src/llama2.so/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Andrej + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/examples/cpp/aot_inductor/src/llama2.so/llama2.hh b/examples/cpp/aot_inductor/src/llama2.so/llama2.hh new file mode 100644 index 0000000000..e0344462c7 --- /dev/null +++ b/examples/cpp/aot_inductor/src/llama2.so/llama2.hh @@ -0,0 +1,72 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// ---------------------------------------------------------------------------- +// Transformer model + +typedef struct { + int vocab_size; // vocabulary size, usually 256 (byte-level) + int seq_len; // max sequence length +} Config; + +typedef struct { + float *logits; // output logits + int64_t* toks; // tokens seen so far; no kv-cache :( +} RunState; + +typedef struct { + Config config; // the hyperparameters of the architecture (the blueprint) + RunState state; // buffers for the "wave" of activations in the forward pass + torch::inductor::AOTIModelContainerRunnerCpu *runner; +} Transformer; +// ---------------------------------------------------------------------------- +// The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens + +typedef struct { + char *str; + int id; +} TokenIndex; + +typedef struct { + char** vocab; + float* vocab_scores; + TokenIndex *sorted_vocab; + int vocab_size; + unsigned int max_token_length; + unsigned char byte_pieces[512]; // stores all single-byte strings +} Tokenizer; + +// ---------------------------------------------------------------------------- +// The Sampler, which takes logits and returns a sampled token +// sampling can be done in a few ways: greedy argmax, sampling, top-p sampling + +typedef struct { + float prob; + int index; +} ProbIndex; // struct used when sorting probabilities during top-p sampling + +typedef struct { + int vocab_size; + ProbIndex* probindex; // buffer used in top-p sampling + float temperature; + float topp; + unsigned long long rng_state; +} Sampler; +void build_transformer(Transformer *t, char* checkpoint_path, int vocab_size, int seq_len); +void build_tokenizer(Tokenizer* t, char* tokenizer_path, int vocab_size); +void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed); +void encode(Tokenizer* t, char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens); +float* forward(Transformer* transformer, int token, int pos); +int sample(Sampler* sampler, float* logits); +long time_in_ms(); +char* decode(Tokenizer* t, int prev_token, int token); +void free_sampler(Sampler* sampler); +void free_tokenizer(Tokenizer* t); +void free_transformer(Transformer* t); diff --git a/examples/cpp/aot_inductor/src/llama_handler.cc b/examples/cpp/aot_inductor/src/llama_handler.cc new file mode 100644 index 0000000000..40f6c67b08 --- /dev/null +++ b/examples/cpp/aot_inductor/src/llama_handler.cc @@ -0,0 +1,302 @@ +#include "llama_handler.hh" + +#include +#include + +#include + +#include "llama2.so/llama2.hh" + + +namespace llm { + +Transformer transformer; +Tokenizer tokenizer; +Sampler sampler; +int steps = 256; + +std::pair, std::shared_ptr> +LlamaHandler::LoadModel( + std::shared_ptr &load_model_request) { + try { + auto device = GetTorchDevice(load_model_request); + + const std::string configFilePath = + fmt::format("{}/{}", load_model_request->model_dir, "config.json"); + std::string jsonContent; + if (!folly::readFile(configFilePath.c_str(), jsonContent)) { + std::cerr << "config.json not found at: " << configFilePath << std::endl; + throw; + } + folly::dynamic json; + json = folly::parseJson(jsonContent); + std::string checkpoint_path; + std::string tokenizer_path; + if (json.find("checkpoint_path") != json.items().end() && + json.find("tokenizer_path") != json.items().end()) { + checkpoint_path = json["checkpoint_path"].asString(); + tokenizer_path = json["tokenizer_path"].asString(); + } else { + std::cerr + << "Required fields 'model_name' and 'model_path' not found in JSON." + << std::endl; + throw; + } + + build_transformer(&transformer, const_cast(checkpoint_path.c_str()), 32000, 256); + + build_tokenizer(&tokenizer, const_cast(tokenizer_path.c_str()), + transformer.config.vocab_size); + + float temperature = + 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher + float topp = 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, + // but slower + unsigned long long rng_seed(0); + // build the Sampler + build_sampler(&sampler, transformer.config.vocab_size, temperature, topp, + rng_seed); + + return std::make_pair(nullptr, device); + } catch (const c10::Error &e) { + TS_LOGF(ERROR, "loading the model: {}, device id: {}, error: {}", + load_model_request->model_name, load_model_request->gpu_id, + e.msg()); + throw e; + } catch (const std::runtime_error &e) { + TS_LOGF(ERROR, "loading the model: {}, device id: {}, error: {}", + load_model_request->model_name, load_model_request->gpu_id, + e.what()); + throw e; + } +} + +c10::IValue LlamaHandler::Preprocess( + std::shared_ptr &device, + std::pair &> &idx_to_req_id, + std::shared_ptr &request_batch, + std::shared_ptr &response_batch) { + auto batch_ivalue = c10::impl::GenericList(torch::TensorType::get()); + std::vector batch_tensors; + uint8_t idx = 0; + for (auto &request : *request_batch) { + try { + (*response_batch)[request.request_id] = + std::make_shared(request.request_id); + idx_to_req_id.first += idx_to_req_id.first.empty() + ? request.request_id + : "," + request.request_id; + + auto data_it = request.parameters.find( + torchserve::PayloadType::kPARAMETER_NAME_DATA); + auto dtype_it = + request.headers.find(torchserve::PayloadType::kHEADER_NAME_DATA_TYPE); + if (data_it == request.parameters.end()) { + data_it = request.parameters.find( + torchserve::PayloadType::kPARAMETER_NAME_BODY); + dtype_it = request.headers.find( + torchserve::PayloadType::kHEADER_NAME_BODY_TYPE); + } + + if (data_it == request.parameters.end() || + dtype_it == request.headers.end()) { + TS_LOGF(ERROR, "Empty payload for request id: {}", request.request_id); + (*response_batch)[request.request_id]->SetResponse( + 500, "data_type", torchserve::PayloadType::kCONTENT_TYPE_TEXT, + "Empty payload"); + continue; + } + + std::string msg = torchserve::Converter::VectorToStr(data_it->second); + + int num_prompt_tokens = 0; + + std::unique_ptr msgCStr( + new char[msg.size() + 1], [](char *ptr) { delete[] ptr; }); + + std::strcpy(msgCStr.get(), msg.c_str()); + + std::unique_ptr prompt_tokens(new int[msg.length() + 3]); + + encode(&tokenizer, msgCStr.get(), 1, 0, prompt_tokens.get(), + &num_prompt_tokens); + + std::vector tensor_vector; + for (int64_t i = 0; i < num_prompt_tokens; ++i) { + int token = prompt_tokens[i]; + torch::Tensor tensor = torch::tensor(token, torch::kInt64); + tensor_vector.push_back(tensor); + } + batch_ivalue.emplace_back(torch::stack(tensor_vector)); + + idx_to_req_id.second[idx++] = request.request_id; + } catch (const std::runtime_error &e) { + TS_LOGF(ERROR, "Failed to load tensor for request id: {}, error: {}", + request.request_id, e.what()); + auto response = (*response_batch)[request.request_id]; + response->SetResponse(500, "data_type", + torchserve::PayloadType::kDATA_TYPE_STRING, + "runtime_error, failed to load tensor"); + } catch (const c10::Error &e) { + TS_LOGF(ERROR, "Failed to load tensor for request id: {}, c10 error:{}", + request.request_id, e.msg()); + auto response = (*response_batch)[request.request_id]; + response->SetResponse(500, "data_type", + torchserve::PayloadType::kDATA_TYPE_STRING, + "c10 error, failed to load tensor"); + } + } + + return batch_ivalue; +} + +c10::IValue LlamaHandler::Inference( + std::shared_ptr model, c10::IValue &inputs, + std::shared_ptr &device, + std::pair &> &idx_to_req_id, + std::shared_ptr &response_batch) { + torch::InferenceMode guard; + auto batch_output_vector = c10::impl::GenericList(torch::TensorType::get()); + long batch_token_length = 0; + long start = + 0; // used to time our code, only initialized after first iteration + + try { + for (auto input : inputs.toTensorList()) { + std::vector tensor_vector; + tensor_vector.reserve(steps); + torch::Tensor tokens_list_tensor = input.get().toTensor(); + + int64_t num_elements = tokens_list_tensor.numel(); + + int64_t *data_ptr = tokens_list_tensor.data_ptr(); + + std::unique_ptr prompt_tokens(new int[num_elements]); + + for (int64_t i = 0; i < num_elements; ++i) { + prompt_tokens[i] = data_ptr[i]; + } + + // start the main loop + int next; // will store the next token in the sequence + int token = + prompt_tokens[0]; // kick off with the first token in the prompt + int pos = 0; // position in the sequence + while (pos < steps) { + // forward the transformer to get logits for the next token + float *logits = forward(&transformer, token, pos); + + // advance the state state machine + if (pos < num_elements - 1) { + // if we are still processing the input prompt, force the next prompt + // token + next = prompt_tokens[pos + 1]; + } else { + // otherwise sample the next token from the logits + next = sample(&sampler, logits); + } + pos++; + + torch::Tensor tensor = torch::tensor(next, torch::kLong); + tensor_vector.push_back(tensor); + + // data-dependent terminating condition: the BOS (=1) token delimits + // sequences + if (next == 1) { + break; + } + token = next; + + // init the timer here because the first iteration can be slower + if (start == 0) { + start = time_in_ms(); + } + } + batch_token_length = batch_token_length + pos - 1; + + torch::Tensor stacked_tensor = torch::stack(tensor_vector); + + batch_output_vector.push_back(stacked_tensor); + } + + TS_LOGF(DEBUG, "Total number of tokens generated: {}", batch_token_length); + if (batch_token_length > 1) { + long end = time_in_ms(); + double token_per_sec = batch_token_length / (double)(end - start) * 1000; + TS_LOGF(DEBUG, "Achieved tok per sec: {}", token_per_sec); + } + } catch (std::runtime_error &e) { + TS_LOG(ERROR, e.what()); + } catch (const c10::Error &e) { + TS_LOGF(ERROR, "Failed to apply inference on input, c10 error:{}", e.msg()); + } catch (...) { + TS_LOG(ERROR, "Failed to run inference on this batch"); + } + return batch_output_vector; +} + +void LlamaHandler::Postprocess( + c10::IValue &outputs, + std::pair &> &idx_to_req_id, + std::shared_ptr &response_batch) { + auto data = outputs.toTensorList(); + for (const auto &kv : idx_to_req_id.second) { + try { + int64_t num_elements = data[kv.first].get().toTensor().numel(); + int64_t *data_ptr = data[kv.first].get().toTensor().data_ptr(); + int64_t token = 1; + std::string concatenated_string; + for (int64_t i = 0; i < num_elements; ++i) { + char *piece = decode(&tokenizer, token, data_ptr[i]); + std::string piece_string(piece); + token = data_ptr[i]; + concatenated_string += piece_string; + } + + TS_LOGF(DEBUG, "Generated String: {}", concatenated_string); + + auto response = (*response_batch)[kv.second]; + + response->SetResponse(200, "data_type", + torchserve::PayloadType::kDATA_TYPE_STRING, + concatenated_string); + } catch (const std::runtime_error &e) { + TS_LOGF(ERROR, "Failed to load tensor for request id: {}, error: {}", + kv.second, e.what()); + auto response = (*response_batch)[kv.second]; + response->SetResponse(500, "data_type", + torchserve::PayloadType::kDATA_TYPE_STRING, + "runtime_error, failed to postprocess tensor"); + } catch (const c10::Error &e) { + TS_LOGF(ERROR, + "Failed to postprocess tensor for request id: {}, error: {}", + kv.second, e.msg()); + auto response = (*response_batch)[kv.second]; + response->SetResponse(500, "data_type", + torchserve::PayloadType::kDATA_TYPE_STRING, + "c10 error, failed to postprocess tensor"); + } + } +} + +LlamaHandler::~LlamaHandler() noexcept { + free_sampler(&sampler); + free_tokenizer(&tokenizer); + free_transformer(&transformer); +} + +} // namespace llm + +#if defined(__linux__) || defined(__APPLE__) +extern "C" { +torchserve::BaseHandler *allocatorLlamaHandler() { + return new llm::LlamaHandler(); +} + +void deleterLlamaHandler(torchserve::BaseHandler *p) { + if (p != nullptr) { + delete static_cast(p); + } +} +} +#endif diff --git a/examples/cpp/aot_inductor/src/llama_handler.hh b/examples/cpp/aot_inductor/src/llama_handler.hh new file mode 100644 index 0000000000..7c2c067acc --- /dev/null +++ b/examples/cpp/aot_inductor/src/llama_handler.hh @@ -0,0 +1,41 @@ +#pragma once + +#include + +#include "src/backends/handler/base_handler.hh" + +namespace llm { +class LlamaHandler : public torchserve::BaseHandler { + public: + // NOLINTBEGIN(bugprone-exception-escape) + LlamaHandler() = default; + // NOLINTEND(bugprone-exception-escape) + ~LlamaHandler() noexcept; + + void initialize_context(); + + std::pair, std::shared_ptr> LoadModel( + std::shared_ptr& load_model_request) + override; + + c10::IValue Preprocess( + std::shared_ptr& device, + std::pair&>& idx_to_req_id, + std::shared_ptr& request_batch, + std::shared_ptr& response_batch) + override; + + c10::IValue Inference( + std::shared_ptr model, c10::IValue& inputs, + std::shared_ptr& device, + std::pair&>& idx_to_req_id, + std::shared_ptr& response_batch) + override; + + void Postprocess( + c10::IValue& data, + std::pair&>& idx_to_req_id, + std::shared_ptr& response_batch) + override; +}; +} // namespace llm From fb35caa644109145cae7e59ce7a7144a388fb8df Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Tue, 30 Jan 2024 19:05:15 +0000 Subject: [PATCH 03/11] Temporarilly point llama2.so to a fork --- .gitmodules | 2 +- cpp/third-party/llama2.so | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 4a00ef021e..3125a3b997 100644 --- a/.gitmodules +++ b/.gitmodules @@ -9,4 +9,4 @@ url = https://github.com/karpathy/llama2.c [submodule "cpp/third-party/llama2.so"] path = cpp/third-party/llama2.so - url = https://github.com/bertmaher/llama2.so.git + url = https://github.com/mreso/llama2.so.git diff --git a/cpp/third-party/llama2.so b/cpp/third-party/llama2.so index d07f694bb4..f867d9dd72 160000 --- a/cpp/third-party/llama2.so +++ b/cpp/third-party/llama2.so @@ -1 +1 @@ -Subproject commit d07f694bb43cf990cdc08edbf1643113d76dc80e +Subproject commit f867d9dd7293525ad6c69f6981dbbce81849688a From 22f9814212c3dc62dee327622d239944a566f7b2 Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Tue, 30 Jan 2024 19:22:34 +0000 Subject: [PATCH 04/11] Move llama2.so back to original repo --- .gitmodules | 2 +- cpp/third-party/llama2.so | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index 3125a3b997..4a00ef021e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -9,4 +9,4 @@ url = https://github.com/karpathy/llama2.c [submodule "cpp/third-party/llama2.so"] path = cpp/third-party/llama2.so - url = https://github.com/mreso/llama2.so.git + url = https://github.com/bertmaher/llama2.so.git diff --git a/cpp/third-party/llama2.so b/cpp/third-party/llama2.so index f867d9dd72..e957cf13a9 160000 --- a/cpp/third-party/llama2.so +++ b/cpp/third-party/llama2.so @@ -1 +1 @@ -Subproject commit f867d9dd7293525ad6c69f6981dbbce81849688a +Subproject commit e957cf13a910658e7c01819566de0ddcd958a6f0 From 002b9028333dcbee087c17a6ea68a279679074de Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Wed, 31 Jan 2024 05:39:03 +0000 Subject: [PATCH 05/11] Move creation of test resources and build files under _build instead of the source tree --- cpp/build.sh | 65 ++++++++++++------- cpp/src/examples/CMakeLists.txt | 10 +-- cpp/test/examples/examples_test.cc | 21 ++++-- .../aot_inductor/llama_handler/config.json | 4 +- .../babyllama/babyllama_handler/config.json | 4 +- .../llamacpp/llamacpp_handler/config.json | 3 + .../torch_scripted/torch_scripted_test.cc | 33 +++++----- 7 files changed, 87 insertions(+), 53 deletions(-) create mode 100644 cpp/test/resources/examples/llamacpp/llamacpp_handler/config.json diff --git a/cpp/build.sh b/cpp/build.sh index 0ad5e9c5d5..d7782abaae 100755 --- a/cpp/build.sh +++ b/cpp/build.sh @@ -74,28 +74,37 @@ function install_kineto() { } function install_libtorch() { + TORCH_VERSION="2.1.1" if [ "$PLATFORM" = "Mac" ]; then echo -e "${COLOR_GREEN}[ INFO ] Skip install libtorch on Mac ${COLOR_OFF}" - elif [ ! -d "$DEPS_DIR/libtorch" ] ; then - cd "$DEPS_DIR" || exit - if [ "$PLATFORM" = "Linux" ]; then + elif [ "$PLATFORM" = "Windows" ]; then + echo -e "${COLOR_GREEN}[ INFO ] Install libtorch on Windows ${COLOR_OFF}" + # TODO: Windows + echo -e "${COLOR_RED}[ ERROR ] Unknown platform: $PLATFORM ${COLOR_OFF}" + exit 1 + else # Linux + if [ -d "$DEPS_DIR/libtorch" ]; then + RAW_VERSION=`cat "$DEPS_DIR/libtorch/build-version"` + VERSION=`cat "$DEPS_DIR/libtorch/build-version" | cut -d "+" -f 1` + if [ "$USE_NIGHTLIES" = "true" ] && [[ ! "${RAW_VERSION}" =~ .*"dev".* ]]; then + rm -rf "$DEPS_DIR/libtorch" + elif [ "$USE_NIGHTLIES" == "" ] && [ "$VERSION" != "$TORCH_VERSION" ]; then + rm -rf "$DEPS_DIR/libtorch" + fi + fi + if [ ! -d "$DEPS_DIR/libtorch" ]; then + cd "$DEPS_DIR" || exit echo -e "${COLOR_GREEN}[ INFO ] Install libtorch on Linux ${COLOR_OFF}" - if [ "$USE_NIGHTLIES" == true ] ; then + if [ "$USE_NIGHTLIES" == true ]; then URL=https://download.pytorch.org/libtorch/nightly/${CUDA}/libtorch-cxx11-abi-shared-with-deps-latest.zip else - URL=https://download.pytorch.org/libtorch/${CUDA}/libtorch-cxx11-abi-shared-with-deps-2.1.1%2B${CUDA}.zip + URL=https://download.pytorch.org/libtorch/${CUDA}/libtorch-cxx11-abi-shared-with-deps-${TORCH_VERSION}%2B${CUDA}.zip fi wget $URL ZIP_FILE=$(basename "$URL") ZIP_FILE="${ZIP_FILE//%2B/+}" unzip $ZIP_FILE rm $ZIP_FILE - - elif [ "$PLATFORM" = "Windows" ]; then - echo -e "${COLOR_GREEN}[ INFO ] Install libtorch on Windows ${COLOR_OFF}" - # TODO: Windows - echo -e "${COLOR_RED}[ ERROR ] Unknown platform: $PLATFORM ${COLOR_OFF}" - exit 1 fi echo -e "${COLOR_GREEN}[ INFO ] libtorch is installed ${COLOR_OFF}" fi @@ -143,21 +152,22 @@ function build_llama_cpp() { } function prepare_test_files() { - local R_DIR="${BASE_DIR}/test/resources/examples/" - if [ ! -f "${R_DIR}/babyllama/babyllama_handler/tokenizer.bin" ]; then - wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.bin -O "${R_DIR}/babyllama/babyllama_handler/tokenizer.bin" + echo -e "${COLOR_GREEN}[ INFO ]Preparing test files ${COLOR_OFF}" + local EX_DIR="${TR_DIR}/examples/" + rsync -a --link-dest=../../test/resources/ ${BASE_DIR}/test/resources/ ${TR_DIR}/ + if [ ! -f "${EX_DIR}/babyllama/babyllama_handler/tokenizer.bin" ]; then + wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.bin -O "${EX_DIR}/babyllama/babyllama_handler/tokenizer.bin" fi - if [ ! -f "${R_DIR}/babyllama/babyllama_handler/stories15M.bin" ]; then - wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin -O "${R_DIR}/babyllama/babyllama_handler/stories15M.bin" + if [ ! -f "${EX_DIR}/babyllama/babyllama_handler/stories15M.bin" ]; then + wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin -O "${EX_DIR}/babyllama/babyllama_handler/stories15M.bin" fi - if [ ! -f "${R_DIR}/aot_inductor/llama_handler/stories15M.so" ] && [ "$USE_NIGHTLIES" == true ]; then - local L_DIR=${R_DIR}/aot_inductor/llama_handler/ - if [ ! -f "${L_DIR}/stories15M.pt" ]; then - wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt?download=true -O "${L_DIR}/stories15M.pt" + if [ ! -f "${EX_DIR}/aot_inductor/llama_handler/stories15M.so" ] && [ "$USE_NIGHTLIES" == true ]; then + local HANDLER_DIR=${EX_DIR}/aot_inductor/llama_handler/ + if [ ! -f "${HANDLER_DIR}/stories15M.pt" ]; then + wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt?download=true -O "${HANDLER_DIR}/stories15M.pt" fi - local LLAMA_SO_DIR=${BASE_DIR}/third-party/llama2_so/ - # touch ${LLAMA_SO_DIR}/llama2_so/__init__.py - PYTHONPATH=${LLAMA_SO_DIR}:${PYTHONPATH} python ${BASE_DIR}/../examples/cpp/aot_inductor/compile.py --checkpoint ${L_DIR}/stories15M.pt ${L_DIR}/stories15M.so + local LLAMA_SO_DIR=${BASE_DIR}/third-party/llama2.so/ + PYTHONPATH=${LLAMA_SO_DIR}:${PYTHONPATH} python ${BASE_DIR}/../examples/cpp/aot_inductor/compile.py --checkpoint ${HANDLER_DIR}/stories15M.pt ${HANDLER_DIR}/stories15M.so fi } @@ -185,6 +195,11 @@ function build() { MAYBE_CUDA_COMPILER='-DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc' fi + MAYBE_NIGHTLIES="-Dnightlies=OFF" + if [ "$USE_NIGHTLIES" == true ]; then + MAYBE_NIGHTLIES="-Dnightlies=ON" + fi + # Build torchserve_cpp with cmake cd "$BWD" || exit YAML_CPP_CMAKE_DIR=$DEPS_DIR/yaml-cpp-build @@ -201,6 +216,7 @@ function build() { "$MAYBE_USE_STATIC_DEPS" \ "$MAYBE_LIB_FUZZING_ENGINE" \ "$MAYBE_CUDA_COMPILER" \ + "$MAYBE_NIGHTLIES" \ .. if [ "$CUDA" = "cu118" ] || [ "$CUDA" = "cu121" ]; then @@ -216,6 +232,7 @@ function build() { "$MAYBE_OVERRIDE_CXX_FLAGS" \ "$MAYBE_USE_STATIC_DEPS" \ "$MAYBE_LIB_FUZZING_ENGINE" \ + "$MAYBE_NIGHTLIES" \ .. export LIBRARY_PATH=${LIBRARY_PATH}:/usr/local/opt/icu4c/lib @@ -323,8 +340,10 @@ cd $BUILD_DIR || exit BWD=$(pwd) DEPS_DIR=$BWD/_deps LIBS_DIR=$BWD/libs +TR_DIR=$BWD/test/resources/ mkdir -p "$DEPS_DIR" mkdir -p "$LIBS_DIR" +mkdir -p "$TR_DIR" # Must execute from the directory containing this script cd $BASE_DIR diff --git a/cpp/src/examples/CMakeLists.txt b/cpp/src/examples/CMakeLists.txt index c7308cdcd6..0edbf26af8 100644 --- a/cpp/src/examples/CMakeLists.txt +++ b/cpp/src/examples/CMakeLists.txt @@ -1,8 +1,10 @@ -add_subdirectory("../../../examples/cpp/babyllama/" "../../../test/resources/examples/babyllama/babyllama_handler/") +add_subdirectory("../../../examples/cpp/babyllama/" "${CMAKE_CURRENT_BINARY_DIR}/../../test/resources/examples/babyllama/babyllama_handler/") -add_subdirectory("../../../examples/cpp/aot_inductor/" "../../../test/resources/examples/aot_inductor/llama_handler/") +if(nightlies) + add_subdirectory("../../../examples/cpp/aot_inductor/" "${CMAKE_CURRENT_BINARY_DIR}/../../test/resources/examples/aot_inductor/llama_handler/") +endif() -add_subdirectory("../../../examples/cpp/llamacpp/" "../../../test/resources/examples/llamacpp/llamacpp_handler/") +add_subdirectory("../../../examples/cpp/llamacpp/" "${CMAKE_CURRENT_BINARY_DIR}/../../test/resources/examples/llamacpp/llamacpp_handler/") -add_subdirectory("../../../examples/cpp/mnist/" "../../../test/resources/examples/mnist/mnist_handler/") +add_subdirectory("../../../examples/cpp/mnist/" "${CMAKE_CURRENT_BINARY_DIR}/../../test/resources/examples/mnist/mnist_handler/") diff --git a/cpp/test/examples/examples_test.cc b/cpp/test/examples/examples_test.cc index bb3c4d998c..518cc51158 100644 --- a/cpp/test/examples/examples_test.cc +++ b/cpp/test/examples/examples_test.cc @@ -1,9 +1,11 @@ +#include + #include #include "test/utils/common.hh" TEST_F(ModelPredictTest, TestLoadPredictBabyLlamaHandler) { - std::string base_dir = "test/resources/examples/babyllama/"; + std::string base_dir = "_build/test/resources/examples/babyllama/"; std::string file1 = base_dir + "babyllama_handler/stories15M.bin"; std::string file2 = base_dir + "babyllama_handler/tokenizer.bin"; @@ -22,15 +24,20 @@ TEST_F(ModelPredictTest, TestLoadPredictBabyLlamaHandler) { } TEST_F(ModelPredictTest, TestLoadPredictAotInductorLlamaHandler) { - std::string base_dir = "test/resources/examples/aot_inductor/"; + std::string base_dir = "_build/test/resources/examples/aot_inductor/"; std::string file1 = base_dir + "llama_handler/stories15M.so"; std::string file2 = - "test/resources/examples/babyllama/babyllama_handler/tokenizer.bin"; + "_build/test/resources/examples/babyllama/babyllama_handler/" + "tokenizer.bin"; std::ifstream f1(file1); std::ifstream f2(file2); + if (TORCH_VERSION_MAJOR < 2 || + (TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR < 3)) + GTEST_SKIP() << "Skipping TestLoadPredictAotInductorLlamaHandler because " + "it needs at least libtorch version >=2.3.0"; - if (!f1.good() && !f2.good()) + if (!f1.good() || !f2.good()) GTEST_SKIP() << "Skipping TestLoadPredictAotInductorLlamaHandler because " "of missing files: " << file1 << " or " << file2; @@ -41,14 +48,14 @@ TEST_F(ModelPredictTest, TestLoadPredictAotInductorLlamaHandler) { base_dir + "llama_handler", base_dir + "prompt.txt", "llm_ts", 200); } -TEST_F(ModelPredictTest, TestLoadPredictLlmHandler) { - std::string base_dir = "test/resources/examples/llamacpp/"; +TEST_F(ModelPredictTest, TestLoadPredictLlamaCppHandler) { + std::string base_dir = "_build/test/resources/examples/llamacpp/"; std::string file1 = base_dir + "llamacpp_handler/llama-2-7b-chat.Q5_0.gguf"; std::ifstream f(file1); if (!f.good()) GTEST_SKIP() - << "Skipping TestLoadPredictLlmHandler because of missing file: " + << "Skipping TestLoadPredictLlamaCppHandler because of missing file: " << file1; this->LoadPredict( diff --git a/cpp/test/resources/examples/aot_inductor/llama_handler/config.json b/cpp/test/resources/examples/aot_inductor/llama_handler/config.json index a55d1bf7dc..04e1cd48ee 100644 --- a/cpp/test/resources/examples/aot_inductor/llama_handler/config.json +++ b/cpp/test/resources/examples/aot_inductor/llama_handler/config.json @@ -1,4 +1,4 @@ { -"checkpoint_path" : "test/resources/examples/aot_inductor/llama_handler/stories15M.so", -"tokenizer_path" : "test/resources/examples/babyllama/babyllama_handler/tokenizer.bin" +"checkpoint_path" : "_build/test/resources/examples/aot_inductor/llama_handler/stories15M.so", +"tokenizer_path" : "_build/test/resources/examples/babyllama/babyllama_handler/tokenizer.bin" } diff --git a/cpp/test/resources/examples/babyllama/babyllama_handler/config.json b/cpp/test/resources/examples/babyllama/babyllama_handler/config.json index f75cd1fb53..c88e48143b 100644 --- a/cpp/test/resources/examples/babyllama/babyllama_handler/config.json +++ b/cpp/test/resources/examples/babyllama/babyllama_handler/config.json @@ -1,4 +1,4 @@ { -"checkpoint_path" : "test/resources/examples/babyllama/babyllama_handler/stories15M.bin", -"tokenizer_path" : "test/resources/examples/babyllama/babyllama_handler/tokenizer.bin" +"checkpoint_path" : "_build/test/resources/examples/babyllama/babyllama_handler/stories15M.bin", +"tokenizer_path" : "_build/test/resources/examples/babyllama/babyllama_handler/tokenizer.bin" } diff --git a/cpp/test/resources/examples/llamacpp/llamacpp_handler/config.json b/cpp/test/resources/examples/llamacpp/llamacpp_handler/config.json new file mode 100644 index 0000000000..46169be4ea --- /dev/null +++ b/cpp/test/resources/examples/llamacpp/llamacpp_handler/config.json @@ -0,0 +1,3 @@ +{ + "checkpoint_path" : "_build/test/resources/examples/llamacpp/llamacpp_handler/llama-2-7b-chat.Q5_0.gguf" +} diff --git a/cpp/test/torch_scripted/torch_scripted_test.cc b/cpp/test/torch_scripted/torch_scripted_test.cc index ecb1d7f69f..5f1c986151 100644 --- a/cpp/test/torch_scripted/torch_scripted_test.cc +++ b/cpp/test/torch_scripted/torch_scripted_test.cc @@ -9,44 +9,47 @@ TEST_F(ModelPredictTest, TestLoadPredictBaseHandler) { this->LoadPredict(std::make_shared( - "test/resources/examples/mnist/mnist_handler", + "_build/test/resources/examples/mnist/mnist_handler", "mnist_scripted_v2", -1, "", "", 1, false), - "test/resources/examples/mnist/base_handler", - "test/resources/examples/mnist/0_png.pt", "mnist_ts", 200); + "_build/test/resources/examples/mnist/base_handler", + "_build/test/resources/examples/mnist/0_png.pt", "mnist_ts", + 200); } TEST_F(ModelPredictTest, TestLoadPredictMnistHandler) { this->LoadPredict(std::make_shared( - "test/resources/examples/mnist/mnist_handler", + "_build/test/resources/examples/mnist/mnist_handler", "mnist_scripted_v2", -1, "", "", 1, false), - "test/resources/examples/mnist/mnist_handler", - "test/resources/examples/mnist/0_png.pt", "mnist_ts", 200); + "_build/test/resources/examples/mnist/mnist_handler", + "_build/test/resources/examples/mnist/0_png.pt", "mnist_ts", + 200); } TEST_F(ModelPredictTest, TestBackendInitWrongModelDir) { - auto result = backend_->Initialize("test/resources/examples/mnist"); + auto result = backend_->Initialize("_build/test/resources/examples/mnist"); ASSERT_EQ(result, false); } TEST_F(ModelPredictTest, TestBackendInitWrongHandler) { - auto result = - backend_->Initialize("test/resources/examples/mnist/wrong_handler"); + auto result = backend_->Initialize( + "_build/test/resources/examples/mnist/wrong_handler"); ASSERT_EQ(result, false); } TEST_F(ModelPredictTest, TestLoadModelFailure) { - backend_->Initialize("test/resources/examples/mnist/wrong_model"); + backend_->Initialize("_build/test/resources/examples/mnist/wrong_model"); auto result = backend_->LoadModel(std::make_shared( - "test/resources/examples/mnist/wrong_model", "mnist_scripted_v2", -1, - "", "", 1, false)); + "_build/test/resources/examples/mnist/wrong_model", + "mnist_scripted_v2", -1, "", "", 1, false)); ASSERT_EQ(result->code, 500); } TEST_F(ModelPredictTest, TestLoadPredictMnistHandlerFailure) { this->LoadPredict(std::make_shared( - "test/resources/examples/mnist/mnist_handler", + "_build/test/resources/examples/mnist/mnist_handler", "mnist_scripted_v2", -1, "", "", 1, false), - "test/resources/examples/mnist/mnist_handler", - "test/resources/examples/mnist/0.png", "mnist_ts", 500); + "_build/test/resources/examples/mnist/mnist_handler", + "_build/test/resources/examples/mnist/0.png", "mnist_ts", + 500); } From 32f24155bf0c992e2dd62a0a1d82339fb9ac7c80 Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Wed, 31 Jan 2024 05:52:43 +0000 Subject: [PATCH 06/11] Move aot_inductor example under llama2 subfolder --- cpp/build.sh | 2 +- cpp/src/examples/CMakeLists.txt | 2 +- examples/cpp/aot_inductor/{ => llama2}/CMakeLists.txt | 2 +- examples/cpp/aot_inductor/{ => llama2}/README.md | 0 examples/cpp/aot_inductor/{ => llama2}/compile.py | 0 examples/cpp/aot_inductor/{ => llama2}/config.json | 0 examples/cpp/aot_inductor/{ => llama2}/prompt1.txt | 0 examples/cpp/aot_inductor/{ => llama2}/prompt2.txt | 0 examples/cpp/aot_inductor/{ => llama2}/src/llama2.so/LICENSE | 0 examples/cpp/aot_inductor/{ => llama2}/src/llama2.so/llama2.hh | 0 examples/cpp/aot_inductor/{ => llama2}/src/llama_handler.cc | 0 examples/cpp/aot_inductor/{ => llama2}/src/llama_handler.hh | 0 12 files changed, 3 insertions(+), 3 deletions(-) rename examples/cpp/aot_inductor/{ => llama2}/CMakeLists.txt (75%) rename examples/cpp/aot_inductor/{ => llama2}/README.md (100%) rename examples/cpp/aot_inductor/{ => llama2}/compile.py (100%) rename examples/cpp/aot_inductor/{ => llama2}/config.json (100%) rename examples/cpp/aot_inductor/{ => llama2}/prompt1.txt (100%) rename examples/cpp/aot_inductor/{ => llama2}/prompt2.txt (100%) rename examples/cpp/aot_inductor/{ => llama2}/src/llama2.so/LICENSE (100%) rename examples/cpp/aot_inductor/{ => llama2}/src/llama2.so/llama2.hh (100%) rename examples/cpp/aot_inductor/{ => llama2}/src/llama_handler.cc (100%) rename examples/cpp/aot_inductor/{ => llama2}/src/llama_handler.hh (100%) diff --git a/cpp/build.sh b/cpp/build.sh index d7782abaae..2ca82730fa 100755 --- a/cpp/build.sh +++ b/cpp/build.sh @@ -167,7 +167,7 @@ function prepare_test_files() { wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt?download=true -O "${HANDLER_DIR}/stories15M.pt" fi local LLAMA_SO_DIR=${BASE_DIR}/third-party/llama2.so/ - PYTHONPATH=${LLAMA_SO_DIR}:${PYTHONPATH} python ${BASE_DIR}/../examples/cpp/aot_inductor/compile.py --checkpoint ${HANDLER_DIR}/stories15M.pt ${HANDLER_DIR}/stories15M.so + PYTHONPATH=${LLAMA_SO_DIR}:${PYTHONPATH} python ${BASE_DIR}/../examples/cpp/aot_inductor/llama2/compile.py --checkpoint ${HANDLER_DIR}/stories15M.pt ${HANDLER_DIR}/stories15M.so fi } diff --git a/cpp/src/examples/CMakeLists.txt b/cpp/src/examples/CMakeLists.txt index 0edbf26af8..58b8be78fa 100644 --- a/cpp/src/examples/CMakeLists.txt +++ b/cpp/src/examples/CMakeLists.txt @@ -2,7 +2,7 @@ add_subdirectory("../../../examples/cpp/babyllama/" "${CMAKE_CURRENT_BINARY_DIR}/../../test/resources/examples/babyllama/babyllama_handler/") if(nightlies) - add_subdirectory("../../../examples/cpp/aot_inductor/" "${CMAKE_CURRENT_BINARY_DIR}/../../test/resources/examples/aot_inductor/llama_handler/") + add_subdirectory("../../../examples/cpp/aot_inductor/llama2/" "${CMAKE_CURRENT_BINARY_DIR}/../../test/resources/examples/aot_inductor/llama_handler/") endif() add_subdirectory("../../../examples/cpp/llamacpp/" "${CMAKE_CURRENT_BINARY_DIR}/../../test/resources/examples/llamacpp/llamacpp_handler/") diff --git a/examples/cpp/aot_inductor/CMakeLists.txt b/examples/cpp/aot_inductor/llama2/CMakeLists.txt similarity index 75% rename from examples/cpp/aot_inductor/CMakeLists.txt rename to examples/cpp/aot_inductor/llama2/CMakeLists.txt index 5d50a0710a..1826330c83 100644 --- a/examples/cpp/aot_inductor/CMakeLists.txt +++ b/examples/cpp/aot_inductor/llama2/CMakeLists.txt @@ -1,4 +1,4 @@ -add_library(llama2_so STATIC ../../../cpp/third-party/llama2.so/run.cpp) +add_library(llama2_so STATIC ../../../../cpp/third-party/llama2.so/run.cpp) target_compile_options(llama2_so PRIVATE -Wall -Wextra -Ofast -fpermissive) add_library(llama_so_handler SHARED src/llama_handler.cc) diff --git a/examples/cpp/aot_inductor/README.md b/examples/cpp/aot_inductor/llama2/README.md similarity index 100% rename from examples/cpp/aot_inductor/README.md rename to examples/cpp/aot_inductor/llama2/README.md diff --git a/examples/cpp/aot_inductor/compile.py b/examples/cpp/aot_inductor/llama2/compile.py similarity index 100% rename from examples/cpp/aot_inductor/compile.py rename to examples/cpp/aot_inductor/llama2/compile.py diff --git a/examples/cpp/aot_inductor/config.json b/examples/cpp/aot_inductor/llama2/config.json similarity index 100% rename from examples/cpp/aot_inductor/config.json rename to examples/cpp/aot_inductor/llama2/config.json diff --git a/examples/cpp/aot_inductor/prompt1.txt b/examples/cpp/aot_inductor/llama2/prompt1.txt similarity index 100% rename from examples/cpp/aot_inductor/prompt1.txt rename to examples/cpp/aot_inductor/llama2/prompt1.txt diff --git a/examples/cpp/aot_inductor/prompt2.txt b/examples/cpp/aot_inductor/llama2/prompt2.txt similarity index 100% rename from examples/cpp/aot_inductor/prompt2.txt rename to examples/cpp/aot_inductor/llama2/prompt2.txt diff --git a/examples/cpp/aot_inductor/src/llama2.so/LICENSE b/examples/cpp/aot_inductor/llama2/src/llama2.so/LICENSE similarity index 100% rename from examples/cpp/aot_inductor/src/llama2.so/LICENSE rename to examples/cpp/aot_inductor/llama2/src/llama2.so/LICENSE diff --git a/examples/cpp/aot_inductor/src/llama2.so/llama2.hh b/examples/cpp/aot_inductor/llama2/src/llama2.so/llama2.hh similarity index 100% rename from examples/cpp/aot_inductor/src/llama2.so/llama2.hh rename to examples/cpp/aot_inductor/llama2/src/llama2.so/llama2.hh diff --git a/examples/cpp/aot_inductor/src/llama_handler.cc b/examples/cpp/aot_inductor/llama2/src/llama_handler.cc similarity index 100% rename from examples/cpp/aot_inductor/src/llama_handler.cc rename to examples/cpp/aot_inductor/llama2/src/llama_handler.cc diff --git a/examples/cpp/aot_inductor/src/llama_handler.hh b/examples/cpp/aot_inductor/llama2/src/llama_handler.hh similarity index 100% rename from examples/cpp/aot_inductor/src/llama_handler.hh rename to examples/cpp/aot_inductor/llama2/src/llama_handler.hh From c8b35f2087a95ec474101c248cd26dc863ac26f0 Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Wed, 31 Jan 2024 17:53:01 +0000 Subject: [PATCH 07/11] Update aot inductor cpp example docs --- examples/cpp/aot_inductor/llama2/README.md | 36 +++++++++++--------- examples/cpp/aot_inductor/llama2/config.json | 4 +-- examples/cpp/babyllama/README.md | 4 ++- examples/cpp/llamacpp/README.md | 4 ++- 4 files changed, 27 insertions(+), 21 deletions(-) diff --git a/examples/cpp/aot_inductor/llama2/README.md b/examples/cpp/aot_inductor/llama2/README.md index cba4df5cd5..d04d917fe2 100644 --- a/examples/cpp/aot_inductor/llama2/README.md +++ b/examples/cpp/aot_inductor/llama2/README.md @@ -1,44 +1,46 @@ -This example is adapted from https://github.com/karpathy/llama2.c. The handler C++ source code for this examples can be found [here](../../../cpp/src/examples/babyllama/). +This example uses Bert Maher's [llama2.so](https://github.com/bertmaher/llama2.so/) which is a fork of Andrej Karpathy's [llama2.c](https://github.com/karpathy/llama2.c). +It uses AOTInductor to compile the model into an so file which is then executed using libtorch. +The handler C++ source code for this examples can be found [here](src/). ### Setup -1. Follow the instructions in [README.md](../../../cpp/README.md) to build the TorchServe C++ backend. +1. Follow the instructions in [README.md](../../../../cpp/README.md) to build the TorchServe C++ backend. +Currently, this example required the availability of Pytorch nightlies, so make sure you have that installed. +To build the cpp backend with the newest nightlies of libtorch on Linux we can add the -n flag to the build script. ``` cd serve/cpp -./builld.sh +./builld.sh -n ``` -2. Download the model and tokenizer using the following command +The build script will already create the necessary artifact for this example. +To recreate these by hand you can follow the prepare_test_files function of the [build.sh](../../../../cpp/build.sh) script. +We will need the handler .so file as well as the stories15M.so file containing the model and weights. + +2. Copy the handler file ```bash -cd ~/serve/examples/cpp/babyllama -wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin -wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.bin +cd ~/serve/examples/cpp/aot_inductor/llama2 +cp ../../../../cpp/_build/test/resources/examples/aot_inductor/llama_handler/libllama_so_handler.so ./ ``` +We will leave the model .so file in place and just use its [path](../../../../cpp/_build/test/resources/examples/aot_inductor/llama_handler/stories15M.so) in the next step. 4. Create a [config.json](config.json) with the path of the downloaded model and tokenizer: ```bash echo '{ -"checkpoint_path" : "/home/ubuntu/serve/examples/cpp/babyllama/stories15M.bin", -"tokenizer_path" : "/home/ubuntu/serve/examples/cpp/babyllama/tokenizer.bin" +"checkpoint_path" : "/home/ubuntu/serve/cpp/_build/test/resources/examples/aot_inductor/llama_handler/stories15M.so", +"tokenizer_path" : "/home/ubuntu/serve/cpp/_build/test/resources/examples/babyllama/babyllama_handler/tokenizer.bin" }' > config.json ``` -5. Copy handle .so file - -While building the C++ backend the `libbabyllama_handler.so` file is generated in the [babyllama_handler](../../../cpp/test/resources/examples/babyllama/babyllama_handler) folder. - -```bash -cp ../../../cpp/test/resources/examples/babyllama/babyllama_handler/libbabyllama_handler.so ./ -``` +The tokenizer is the same we also use for the babyllama example so we can reuse the file from there. ### Generate MAR file Now lets generate the mar file ```bash -torch-model-archiver --model-name llm --version 1.0 --handler libbabyllama_handler:BabyLlamaHandler --runtime LSP --extra-files config.json +torch-model-archiver --model-name llm --version 1.0 --handler libllama_so_handler:LlamaHandler --runtime LSP --extra-files config.json ``` Create model store directory and move the mar file diff --git a/examples/cpp/aot_inductor/llama2/config.json b/examples/cpp/aot_inductor/llama2/config.json index d2a11f95b9..9d8728df8f 100644 --- a/examples/cpp/aot_inductor/llama2/config.json +++ b/examples/cpp/aot_inductor/llama2/config.json @@ -1,4 +1,4 @@ { -"checkpoint_path" : "/home/ubuntu/serve/examples/cpp/babyllama/stories15M.bin", -"tokenizer_path" : "/home/ubuntu/serve/cpp/test/resources/examples/babyllama/babyllama_handler/tokenizer.bin" +"checkpoint_path" : "/home/ubuntu/serve/cpp/_build/test/resources/examples/aot_inductor/llama_handler/stories15M.so", +"tokenizer_path" : "/home/ubuntu/serve/cpp/_build/test/resources/examples/babyllama/babyllama_handler/tokenizer.bin" } diff --git a/examples/cpp/babyllama/README.md b/examples/cpp/babyllama/README.md index cba4df5cd5..cd68eec93a 100644 --- a/examples/cpp/babyllama/README.md +++ b/examples/cpp/babyllama/README.md @@ -1,3 +1,5 @@ +## BabyLlama example + This example is adapted from https://github.com/karpathy/llama2.c. The handler C++ source code for this examples can be found [here](../../../cpp/src/examples/babyllama/). ### Setup @@ -30,7 +32,7 @@ echo '{ While building the C++ backend the `libbabyllama_handler.so` file is generated in the [babyllama_handler](../../../cpp/test/resources/examples/babyllama/babyllama_handler) folder. ```bash -cp ../../../cpp/test/resources/examples/babyllama/babyllama_handler/libbabyllama_handler.so ./ +cp ../../../cpp/_build/test/resources/examples/babyllama/babyllama_handler/libbabyllama_handler.so ./ ``` ### Generate MAR file diff --git a/examples/cpp/llamacpp/README.md b/examples/cpp/llamacpp/README.md index 8221262858..f0ab891e52 100644 --- a/examples/cpp/llamacpp/README.md +++ b/examples/cpp/llamacpp/README.md @@ -1,3 +1,5 @@ +## Llama.cpp example + This example used [llama.cpp](https://github.com/ggerganov/llama.cpp) to deploy a Llama-2-7B-Chat model using the TorchServe C++ backend. The handler C++ source code for this examples can be found [here](../../../cpp/src/examples/llamacpp/). @@ -29,7 +31,7 @@ echo '{ While building the C++ backend the `libllamacpp_handler.so` file is generated in the [llamacpp_handler](../../../cpp/test/resources/examples/llamacpp/llamacpp_handler) folder. ```bash -cp ../../../cpp/test/resources/examples/llamacpp/llamacpp_handler/libllamacpp_handler.so ./ +cp ../../../cpp/_build/test/resources/examples/llamacpp/llamacpp_handler/libllamacpp_handler.so ./ ``` ### Generate MAR file From f5888d27b3a1817082411935d0adbbac2fe5bf7d Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Wed, 31 Jan 2024 18:01:15 +0000 Subject: [PATCH 08/11] Fix spell checks --- cpp/README.md | 2 +- ts_scripts/spellcheck_conf/wordlist.txt | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cpp/README.md b/cpp/README.md index 4f7dd53318..849bb76f68 100644 --- a/cpp/README.md +++ b/cpp/README.md @@ -58,7 +58,7 @@ Here is an [example](https://github.com/pytorch/serve/tree/cpp_backend/cpp/test/ torch-model-archiver --model-name mnist_handler --version 1.0 --serialized-file mnist_script.pt --handler libmnist_handler:MnistHandler --runtime LSP ``` Here is an [example](https://github.com/pytorch/serve/tree/cpp_backend/cpp/test/resources/examples/mnist/mnist_handler) of unzipped model mar file. -##### BabyLLama Example +##### BabyLlama Example The babyllama example can be found [here](https://github.com/pytorch/serve/blob/master/cpp/src/examples/babyllama/). To run the example we need to download the weights as well as tokenizer files: ```bash diff --git a/ts_scripts/spellcheck_conf/wordlist.txt b/ts_scripts/spellcheck_conf/wordlist.txt index ccc2188e9f..bbfe1903e3 100644 --- a/ts_scripts/spellcheck_conf/wordlist.txt +++ b/ts_scripts/spellcheck_conf/wordlist.txt @@ -1170,8 +1170,12 @@ bfloat bb babyllama libbabyllama -BabyLLama +BabyLlama BabyLlamaHandler CMakeLists TorchScriptHandler libllamacpp +libtorch +Andrej +Karpathy's +Maher's From 37608a2e8549071bbd457c96ca91f5c51375d802 Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Wed, 31 Jan 2024 18:06:28 +0000 Subject: [PATCH 09/11] Update example description in cpp/README.md --- cpp/README.md | 55 +++++++++------------------------------------------ 1 file changed, 9 insertions(+), 46 deletions(-) diff --git a/cpp/README.md b/cpp/README.md index 849bb76f68..70b96339b9 100644 --- a/cpp/README.md +++ b/cpp/README.md @@ -42,7 +42,7 @@ By default, TorchServe cpp provides a handler for TorchScript [src/backends/hand * [Preprocess](serve/blob/cpp_backend/cpp/src/backends/handler/base_handler.hh#L40) * [Inference](serve/blob/cpp_backend/cpp/src/backends/handler/base_handler.hh#L46) * [Postprocess](serve/blob/cpp_backend/cpp/src/backends/handler/base_handler.hh#L53) -#### Example +#### Usage ##### Using TorchScriptHandler * set runtime as "LSP" in model archiver option [--runtime](https://github.com/pytorch/serve/tree/master/model-archiver#arguments) * set handler as "TorchScriptHandler" in model archiver option [--handler](https://github.com/pytorch/serve/tree/master/model-archiver#arguments) @@ -58,49 +58,12 @@ Here is an [example](https://github.com/pytorch/serve/tree/cpp_backend/cpp/test/ torch-model-archiver --model-name mnist_handler --version 1.0 --serialized-file mnist_script.pt --handler libmnist_handler:MnistHandler --runtime LSP ``` Here is an [example](https://github.com/pytorch/serve/tree/cpp_backend/cpp/test/resources/examples/mnist/mnist_handler) of unzipped model mar file. -##### BabyLlama Example -The babyllama example can be found [here](https://github.com/pytorch/serve/blob/master/cpp/src/examples/babyllama/). -To run the example we need to download the weights as well as tokenizer files: -```bash -wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin -wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.bin -``` -Subsequently, we need to adjust the paths according to our local file structure in [config.json](https://github.com/pytorch/serve/blob/master/serve/cpp/test/resources/examples/babyllama/babyllama_handler/config.json). -```bash -{ -"checkpoint_path" : "/home/ubuntu/serve/cpp/stories15M.bin", -"tokenizer_path" : "/home/ubuntu/serve/cpp/src/examples/babyllama/tokenizer.bin" -} -``` -Then we can create the mar file and deploy it with: -```bash -cd serve/cpp/test/resources/examples/babyllama/babyllama_handler -torch-model-archiver --model-name llm --version 1.0 --handler libbabyllama_handler:BabyLlamaHandler --runtime LSP --extra-files config.json -mkdir model_store && mv llm.mar model_store/ -torchserve --ncs --start --model-store model_store - -curl -v -X POST "http://localhost:8081/models?initial_workers=1&url=llm.mar" -``` -The handler name `libbabyllama_handler:BabyLlamaHandler` consists of our shared library name (as defined in our [CMakeLists.txt](https://github.com/pytorch/serve/blob/master/serve/cpp/src/examples/CMakeLists.txt)) as well as the class name we chose for our [custom handler class](https://github.com/pytorch/serve/blob/master/serve/cpp/src/examples/babyllama/baby_llama_handler.cc) which derives its properties from BaseHandler. -To test the model we can run: -```bash -cd serve/cpp/test/resources/examples/babyllama/ -curl http://localhost:8080/predictions/llm -T prompt.txt -``` -##### Mnist example -* Transform data on client side. For example: -``` -import torch -from PIL import Image -from torchvision import transforms - -image_processing = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.1307,), (0.3081,)) - ]) -image = Image.open("examples/image_classifier/mnist/test_data/0.png") -image = image_processing(image) -torch.save(image, "0_png.pt") -``` -* Run model registration and prediction: [Using BaseHandler](serve/cpp/test/backends/torch_scripted/torch_scripted_backend_test.cc#L54) or [Using customized handler](serve/cpp/test/backends/torch_scripted/torch_scripted_backend_test.cc#L72). +#### Examples +We have created a couple of examples that can get you started with the C++ backend. +The examples are all located under serve/examples/cpp and each comes with a detailed description of how to set it up. +The following examples are available: +* [AOTInductor Llama](../examples/cpp/aot_inductor/llama2/) +* [BabyLlama](../examples/cpp/babyllama/) +* [Llama.cpp](../examples/cpp/llamacpp/) +* [MNIST](../examples/cpp/mnist/) From 940695807dfd8a69e2fc6fc44532429f7989b83f Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Thu, 8 Feb 2024 19:41:13 +0000 Subject: [PATCH 10/11] Point llama2.so to temporary fork --- .gitmodules | 3 +-- cpp/third-party/llama2.so | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.gitmodules b/.gitmodules index 04b4d32603..3125a3b997 100644 --- a/.gitmodules +++ b/.gitmodules @@ -9,5 +9,4 @@ url = https://github.com/karpathy/llama2.c [submodule "cpp/third-party/llama2.so"] path = cpp/third-party/llama2.so - url = https://github.com/bertmaher/llama2.so.git - \ No newline at end of file + url = https://github.com/mreso/llama2.so.git diff --git a/cpp/third-party/llama2.so b/cpp/third-party/llama2.so index e957cf13a9..ac438a5049 160000 --- a/cpp/third-party/llama2.so +++ b/cpp/third-party/llama2.so @@ -1 +1 @@ -Subproject commit e957cf13a910658e7c01819566de0ddcd958a6f0 +Subproject commit ac438a5049b5f25f473d49c13c9b26f8f5870d54 From 5786fc2d7204b90353b033df719876913c3e5530 Mon Sep 17 00:00:00 2001 From: Matthias Reso <13337103+mreso@users.noreply.github.com> Date: Thu, 8 Feb 2024 19:46:41 +0000 Subject: [PATCH 11/11] Make aot inductor cpp example run with 2.2 instead of nightlies --- cpp/build.sh | 22 +++++++++++-------- cpp/test/examples/examples_test.cc | 4 ---- examples/cpp/aot_inductor/llama2/README.md | 4 +--- examples/cpp/aot_inductor/llama2/compile.py | 8 ++----- .../llama2/src/llama2.so/llama2.hh | 2 +- 5 files changed, 17 insertions(+), 23 deletions(-) diff --git a/cpp/build.sh b/cpp/build.sh index 89a8e79b43..6f6dbf81e9 100755 --- a/cpp/build.sh +++ b/cpp/build.sh @@ -74,19 +74,19 @@ function install_kineto() { } function install_libtorch() { - TORCH_VERSION="2.1.1" + TORCH_VERSION="2.2.0" if [ "$PLATFORM" = "Mac" ]; then if [ ! -d "$DEPS_DIR/libtorch" ]; then if [[ $(uname -m) == 'x86_64' ]]; then echo -e "${COLOR_GREEN}[ INFO ] Install libtorch on Mac x86_64 ${COLOR_OFF}" - wget https://download.pytorch.org/libtorch/cpu/libtorch-macos-x86_64-2.2.0.zip - unzip libtorch-macos-x86_64-2.2.0.zip - rm libtorch-macos-x86_64-2.2.0.zip + wget https://download.pytorch.org/libtorch/cpu/libtorch-macos-x86_64-${TORCH_VERSION}.zip + unzip libtorch-macos-x86_64-${TORCH_VERSION}.zip + rm libtorch-macos-x86_64-${TORCH_VERSION}.zip else echo -e "${COLOR_GREEN}[ INFO ] Install libtorch on Mac arm64 ${COLOR_OFF}" - wget https://download.pytorch.org/libtorch/cpu/libtorch-macos-arm64-2.2.0.zip - unzip libtorch-macos-arm64-2.2.0.zip - rm libtorch-macos-arm64-2.2.0.zip + wget https://download.pytorch.org/libtorch/cpu/libtorch-macos-arm64-${TORCH_VERSION}.zip + unzip libtorch-macos-arm64-${TORCH_VERSION}.zip + rm libtorch-macos-arm64-${TORCH_VERSION}.zip fi fi elif [ "$PLATFORM" = "Windows" ]; then @@ -190,7 +190,11 @@ function build_llama_cpp() { BWD=$(pwd) LLAMA_CPP_SRC_DIR=$BASE_DIR/third-party/llama.cpp cd "${LLAMA_CPP_SRC_DIR}" - make LLAMA_METAL=OFF + if [ "$PLATFORM" = "Mac" ]; then + make LLAMA_METAL=OFF -j + else + make -j + fi cd "$BWD" || exit } @@ -204,7 +208,7 @@ function prepare_test_files() { if [ ! -f "${EX_DIR}/babyllama/babyllama_handler/stories15M.bin" ]; then wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.bin -O "${EX_DIR}/babyllama/babyllama_handler/stories15M.bin" fi - if [ ! -f "${EX_DIR}/aot_inductor/llama_handler/stories15M.so" ] && [ "$USE_NIGHTLIES" == true ]; then + if [ ! -f "${EX_DIR}/aot_inductor/llama_handler/stories15M.so" ]; then local HANDLER_DIR=${EX_DIR}/aot_inductor/llama_handler/ if [ ! -f "${HANDLER_DIR}/stories15M.pt" ]; then wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt?download=true -O "${HANDLER_DIR}/stories15M.pt" diff --git a/cpp/test/examples/examples_test.cc b/cpp/test/examples/examples_test.cc index 518cc51158..00e5135715 100644 --- a/cpp/test/examples/examples_test.cc +++ b/cpp/test/examples/examples_test.cc @@ -32,10 +32,6 @@ TEST_F(ModelPredictTest, TestLoadPredictAotInductorLlamaHandler) { std::ifstream f1(file1); std::ifstream f2(file2); - if (TORCH_VERSION_MAJOR < 2 || - (TORCH_VERSION_MAJOR >= 2 && TORCH_VERSION_MINOR < 3)) - GTEST_SKIP() << "Skipping TestLoadPredictAotInductorLlamaHandler because " - "it needs at least libtorch version >=2.3.0"; if (!f1.good() || !f2.good()) GTEST_SKIP() << "Skipping TestLoadPredictAotInductorLlamaHandler because " diff --git a/examples/cpp/aot_inductor/llama2/README.md b/examples/cpp/aot_inductor/llama2/README.md index d04d917fe2..b3e9261b84 100644 --- a/examples/cpp/aot_inductor/llama2/README.md +++ b/examples/cpp/aot_inductor/llama2/README.md @@ -4,12 +4,10 @@ The handler C++ source code for this examples can be found [here](src/). ### Setup 1. Follow the instructions in [README.md](../../../../cpp/README.md) to build the TorchServe C++ backend. -Currently, this example required the availability of Pytorch nightlies, so make sure you have that installed. -To build the cpp backend with the newest nightlies of libtorch on Linux we can add the -n flag to the build script. ``` cd serve/cpp -./builld.sh -n +./builld.sh ``` The build script will already create the necessary artifact for this example. diff --git a/examples/cpp/aot_inductor/llama2/compile.py b/examples/cpp/aot_inductor/llama2/compile.py index aff0804b3f..0906e4942f 100644 --- a/examples/cpp/aot_inductor/llama2/compile.py +++ b/examples/cpp/aot_inductor/llama2/compile.py @@ -29,15 +29,11 @@ def load_checkpoint(checkpoint): args = parser.parse_args() model, config = load_checkpoint(args.checkpoint) x = torch.randint(0, config.vocab_size, (1, config.max_seq_len // 2)) - constraints = [ - torch._export.dynamic_dim(x, 1), - torch._export.dynamic_dim(x, 1) <= config.max_seq_len, - torch._export.dynamic_dim(x, 1) >= 1, - ] + seq_len_dim = torch.export.Dim("seq_len", min=1, max=config.max_seq_len) torch._C._GLIBCXX_USE_CXX11_ABI = True so_path = torch._export.aot_compile( model, (x,), - constraints=constraints, + dynamic_shapes={"tokens": (None, seq_len_dim)}, options={"aot_inductor.output_path": args.filepath}, ) diff --git a/examples/cpp/aot_inductor/llama2/src/llama2.so/llama2.hh b/examples/cpp/aot_inductor/llama2/src/llama2.so/llama2.hh index e0344462c7..ef8bbe8f99 100644 --- a/examples/cpp/aot_inductor/llama2/src/llama2.so/llama2.hh +++ b/examples/cpp/aot_inductor/llama2/src/llama2.so/llama2.hh @@ -7,7 +7,7 @@ #include #include #include -#include +#include // ---------------------------------------------------------------------------- // Transformer model