From 08e7fccccde103c41ac4c1a2c41d398463d62cd6 Mon Sep 17 00:00:00 2001 From: Michael Gschwind Date: Sun, 12 May 2024 10:03:31 -0700 Subject: [PATCH 1/2] minimal example for enabling Andrej's runner, from commit 2d477022986843bbe23b60ea0529cd5d2718377b --- .../unsupported/runner-aoti/CMakeLists.txt | 11 + parking_lot/unsupported/runner-aoti/LICENSE | 22 + parking_lot/unsupported/runner-aoti/run.cpp | 6 + .../unsupported/runner-et/CMakeLists.txt | 32 + parking_lot/unsupported/runner-et/LICENSE | 22 + parking_lot/unsupported/runner-et/run.cpp | 6 + parking_lot/unsupported/runner/run.cpp | 805 ++++++++++++++++++ 7 files changed, 904 insertions(+) create mode 100644 parking_lot/unsupported/runner-aoti/CMakeLists.txt create mode 100644 parking_lot/unsupported/runner-aoti/LICENSE create mode 100644 parking_lot/unsupported/runner-aoti/run.cpp create mode 100644 parking_lot/unsupported/runner-et/CMakeLists.txt create mode 100644 parking_lot/unsupported/runner-et/LICENSE create mode 100644 parking_lot/unsupported/runner-et/run.cpp create mode 100644 parking_lot/unsupported/runner/run.cpp diff --git a/parking_lot/unsupported/runner-aoti/CMakeLists.txt b/parking_lot/unsupported/runner-aoti/CMakeLists.txt new file mode 100644 index 000000000..dd82a5e04 --- /dev/null +++ b/parking_lot/unsupported/runner-aoti/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.18 FATAL_ERROR) +project(llama2so LANGUAGES CXX) + +find_package(CUDA) + +find_package(Torch REQUIRED) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g ${TORCH_CXX_FLAGS} -fpermissive") + +add_executable(run run.cpp) +target_link_libraries(run "${TORCH_LIBRARIES}" m) +set_property(TARGET run PROPERTY CXX_STANDARD 17) diff --git a/parking_lot/unsupported/runner-aoti/LICENSE b/parking_lot/unsupported/runner-aoti/LICENSE new file mode 100644 index 000000000..821bd09ce --- /dev/null +++ b/parking_lot/unsupported/runner-aoti/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2023 Andrej Karpathy +Copyright (c) 2024 Meta Platforms + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/parking_lot/unsupported/runner-aoti/run.cpp b/parking_lot/unsupported/runner-aoti/run.cpp new file mode 100644 index 000000000..56d623a06 --- /dev/null +++ b/parking_lot/unsupported/runner-aoti/run.cpp @@ -0,0 +1,6 @@ +/* Inference for Llama-2 Transformer model in pure C */ +/* this uses the same logic regardless of AOTI OR ET */ +/* but requires different data types - ATen vs ETen */ + +#define __AOTI__MODEL +#include "../runner/run.cpp" diff --git a/parking_lot/unsupported/runner-et/CMakeLists.txt b/parking_lot/unsupported/runner-et/CMakeLists.txt new file mode 100644 index 000000000..d94e348aa --- /dev/null +++ b/parking_lot/unsupported/runner-et/CMakeLists.txt @@ -0,0 +1,32 @@ +cmake_minimum_required(VERSION 3.24) +set(CMAKE_CXX_STANDARD 17) + +project(llama-fast) + +include(CMakePrintHelpers) +set(LLAMA_FAST_ROOT $ENV{LLAMA_FAST_ROOT}) +cmake_print_variables(LLAMA_FAST_ROOT) + +find_package(executorch CONFIG REQUIRED PATHS ${LLAMA_FAST_ROOT}/build/install/lib/cmake/ExecuTorch) +set(_common_include_directories ${LLAMA_FAST_ROOT}/build/src) +cmake_print_variables(_common_include_directories) + +target_include_directories(executorch INTERFACE ${_common_include_directories}) # Ideally ET installation process would do this +add_executable(runner_et run.cpp) + +# Link ET runtime + extensions +target_link_libraries( + runner_et PRIVATE + executorch + extension_module + ${LLAMA_FAST_ROOT}/build/src/executorch/cmake-out/extension/data_loader/libextension_data_loader.a # This one does not get installed by ET + optimized_kernels + portable_kernels + cpublas + eigen_blas +) +target_link_libraries(runner_et PRIVATE "$) +target_link_libraries(runner_et PRIVATE "$) +target_link_libraries(runner_et PRIVATE "$) +target_link_libraries(runner_et PRIVATE "$) +target_link_libraries(runner_et PRIVATE "$) diff --git a/parking_lot/unsupported/runner-et/LICENSE b/parking_lot/unsupported/runner-et/LICENSE new file mode 100644 index 000000000..821bd09ce --- /dev/null +++ b/parking_lot/unsupported/runner-et/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2023 Andrej Karpathy +Copyright (c) 2024 Meta Platforms + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/parking_lot/unsupported/runner-et/run.cpp b/parking_lot/unsupported/runner-et/run.cpp new file mode 100644 index 000000000..64d38fa0a --- /dev/null +++ b/parking_lot/unsupported/runner-et/run.cpp @@ -0,0 +1,6 @@ +/* Inference for Llama-2 Transformer model in pure C */ +/* this uses the same logic regardless of AOTI OR ET */ +/* but requires different data types - ATen vs ETen */ + +#define __ET__MODEL +#include "../runner/run.cpp" diff --git a/parking_lot/unsupported/runner/run.cpp b/parking_lot/unsupported/runner/run.cpp new file mode 100644 index 000000000..27c16cc95 --- /dev/null +++ b/parking_lot/unsupported/runner/run.cpp @@ -0,0 +1,805 @@ +/* Inference for Llama-2 Transformer model in pure C */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef DEBUG +#include +#include +#endif + +#if defined(__AOTI_MODEL__) || (defined (__ET_MODEL__) && defined(USE_ATENLIB)) +#include +#endif + +#ifdef __AOTI_MODEL__ +#include +#else // __ET_MODEL__ +#include +#include +#include +#include +#include +#include + +using torch::executor::Module; +using torch::executor::ManagedTensor; +using torch::executor::EValue; +using exec_aten::ScalarType; +using torch::executor::Result; +#endif + + +// ---------------------------------------------------------------------------- +// Transformer model + +typedef struct { + int vocab_size; // vocabulary size, usually 256 (byte-level) + int seq_len; // max sequence length +} Config; + +typedef struct { + float *logits; // output logits + int64_t* toks; // tokens seen so far; no kv-cache :( +} RunState; + +typedef struct { + Config config; // the hyperparameters of the architecture (the blueprint) + RunState state; // buffers for the "wave" of activations in the forward pass + +#ifdef __AOTI_MODEL__ + torch::inductor::AOTIModelContainerRunnerCpu *runner; +#else // __ET_MODEL__ + Module* runner; +#endif + +} Transformer; + +void malloc_run_state(RunState* s, Config* p) { + // we calloc instead of malloc to keep valgrind happy + s->logits = (float *) calloc(p->vocab_size, sizeof(float)); + s->toks = (int64_t *) calloc(p->seq_len, sizeof(int64_t)); + if (!s->logits || !s->toks) { + fprintf(stderr, "malloc failed!\n"); + exit(EXIT_FAILURE); + } +} + +void free_run_state(RunState* s) { + free(s->logits); + free(s->toks); +} + +void read_checkpoint(char* checkpoint, Config* config) { + FILE *file = fopen(checkpoint, "rb"); + if (!file) { fprintf(stderr, "Couldn't open file %s\n", checkpoint); exit(EXIT_FAILURE); } + // read in the config header + if (fread(config, sizeof(Config), 1, file) != 1) { exit(EXIT_FAILURE); } + // negative vocab size is hacky way of signaling unshared weights. bit yikes. + int shared_weights = config->vocab_size > 0 ? 1 : 0; + config->vocab_size = abs(config->vocab_size); +} + +void build_transformer(Transformer *t, char* checkpoint_path, int vocab_size, int seq_len) { + // read in the Config and the Weights from the checkpoint + //read_checkpoint(checkpoint_path, &t->config); + // allocate the RunState buffers + t->config.vocab_size = vocab_size; + t->config.seq_len = seq_len; + malloc_run_state(&t->state, &t->config); + +#ifdef __AOTI_MODEL__ + t->runner = new torch::inductor::AOTIModelContainerRunnerCpu( + /* path to model DSO */ checkpoint_path, + /* thread pool size */ 1 + ); +#else //__ET_MODEL__ + t->runner = new Module( + /* path to PTE model */ checkpoint_path, + /* PTE mmap settings */ Module::MlockConfig::UseMlockIgnoreErrors + ); +#endif + +} + +void free_transformer(Transformer* t) { + // free the RunState buffers + free_run_state(&t->state); + delete t->runner; +} + +// ---------------------------------------------------------------------------- +// neural net blocks; the dynamics of the Transformer + +void softmax(float* x, int size) { + // find max value (for numerical stability) + float max_val = x[0]; + for (int i = 1; i < size; i++) { + if (x[i] > max_val) { + max_val = x[i]; + } + } + // exp and sum + float sum = 0.0f; + for (int i = 0; i < size; i++) { + x[i] = expf(x[i] - max_val); + sum += x[i]; + } + // normalize + for (int i = 0; i < size; i++) { + x[i] /= sum; + } +} + +float* forward(Transformer* transformer, int token, int pos) { + Config* p = &transformer->config; + RunState* s = &transformer->state; + s->toks[pos] = token; + long token_buffer[1] = {token}; + long pos_buffer[1] = {pos}; + +#ifdef DEBUG + std::cerr << "token: " << token << " pos: " << pos << "\n"; +#endif + +#ifdef __AOTI_MODEL__ + torch::Tensor token_tensor = torch::from_blob(token_buffer, {1, 1}, torch::kLong); + torch::Tensor pos_tensor = torch::from_blob(pos_buffer, {1}, torch::kLong); + std::vector inputs{token_tensor, pos_tensor}; + + torch::Tensor result = transformer->runner->run(inputs)[0]; + auto logits = result[0].data_ptr(); + +#else // __ET_MODEL__ + ManagedTensor pos_managed( + pos_buffer, sizeof(int64_t), { 1 }, ScalarType::Long); +#ifndef __KV_CACHE__ + // @lint-ignore CLANGTIDY facebook-hte-LocalUncheckedArrayBounds + ManagedTensor tokens_managed(&(s->toks[pos]), /*ignored*/sizeof(int64_t)*(pos+1), {1, 1}, ScalarType::Long); +#else // __KV_CACHE__ + ManagedTensor tokens_managed( + token_buffer, sizeof(int64_t), {1, 1}, ScalarType::Long); +#endif + std::vector inputs; + auto tmp1 = EValue(tokens_managed.get_aliasing_tensor()); + auto tmp2 = EValue(pos_managed.get_aliasing_tensor()); + + inputs.push_back(tmp1); + inputs.push_back(tmp2); + Result> outputs_res = transformer->runner->forward(inputs); + if (!outputs_res.ok()) { + fprintf(stderr, "Executorch forward() failed."); + exit(EXIT_FAILURE); + } + std::vector result = outputs_res.get(); + auto logits = result[0].toTensor().const_data_ptr(); +#endif + + memcpy(s->logits, logits, p->vocab_size * sizeof(float)); + return s->logits; +} + +// ---------------------------------------------------------------------------- +// The Byte Pair Encoding (BPE) Tokenizer that translates strings <-> tokens + +typedef struct { + const char *str; + int id; +} TokenIndex; + +typedef struct { + char** vocab; + float* vocab_scores; + TokenIndex *sorted_vocab; + int vocab_size; + unsigned int max_token_length; + unsigned char byte_pieces[512]; // stores all single-byte strings +} Tokenizer; + +int compare_tokens(const void *a, const void *b) { + return strcmp(((TokenIndex*)a)->str, ((TokenIndex*)b)->str); +} + +void build_tokenizer(Tokenizer* t, const char* tokenizer_path, int vocab_size) { + // i should have written the vocab_size into the tokenizer file... sigh + t->vocab_size = vocab_size; + // malloc space to hold the scores and the strings + t->vocab = (char**)malloc(vocab_size * sizeof(char*)); + t->vocab_scores = (float*)malloc(vocab_size * sizeof(float)); + t->sorted_vocab = NULL; // initialized lazily + for (int i = 0; i < 256; i++) { + t->byte_pieces[i * 2] = (unsigned char)i; + t->byte_pieces[i * 2 + 1] = '\0'; + } + // read in the file + FILE *file = fopen(tokenizer_path, "rb"); + if (!file) { fprintf(stderr, "couldn't load %s\n", tokenizer_path); exit(EXIT_FAILURE); } + if (fread(&t->max_token_length, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); } + int len; + for (int i = 0; i < vocab_size; i++) { + if (fread(t->vocab_scores + i, sizeof(float), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE);} + if (fread(&len, sizeof(int), 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); } + t->vocab[i] = (char *)malloc(len + 1); + if (fread(t->vocab[i], len, 1, file) != 1) { fprintf(stderr, "failed read\n"); exit(EXIT_FAILURE); } + t->vocab[i][len] = '\0'; // add the string terminating token + } + fclose(file); +} + +void free_tokenizer(Tokenizer* t) { + for (int i = 0; i < t->vocab_size; i++) { free(t->vocab[i]); } + free(t->vocab); + free(t->vocab_scores); + free(t->sorted_vocab); +} + +char* decode(Tokenizer* t, int prev_token, int token) { + char *piece = t->vocab[token]; + // following BOS (1) token, sentencepiece decoder strips any leading whitespace (see PR #89) + if (prev_token == 1 && piece[0] == ' ') { piece++; } + // careful, some tokens designate raw bytes, and look like e.g. '<0x01>' + // parse this and convert and return the actual byte + unsigned char byte_val; + if (sscanf(piece, "<0x%02hhX>", &byte_val) == 1) { + piece = (char*)t->byte_pieces + byte_val * 2; + } + return piece; +} + +void safe_printf(char *piece) { + // piece might be a raw byte token, and we only want to print printable chars or whitespace + // because some of the other bytes can be various control codes, backspace, etc. + if (piece == NULL) { return; } + if (piece[0] == '\0') { return; } + if (piece[1] == '\0') { + unsigned char byte_val = piece[0]; + if (!(isprint(byte_val) || isspace(byte_val))) { + return; // bad byte, don't print it + } + } + printf("%s", piece); +} + +int str_lookup(const char *str, TokenIndex *sorted_vocab, int vocab_size) { + // efficiently find the perfect match for str in vocab, return its index or -1 if not found + TokenIndex tok = { .str = str }; // acts as the key to search for + TokenIndex *res = (TokenIndex *) bsearch(&tok, sorted_vocab, vocab_size, sizeof(TokenIndex), compare_tokens); + return res != NULL ? res->id : -1; +} + +void encode(Tokenizer* t, const char *text, int8_t bos, int8_t eos, int *tokens, int *n_tokens) { + // encode the string text (input) into an upper-bound preallocated tokens[] array + // bos != 0 means prepend the BOS token (=1), eos != 0 means append the EOS token (=2) + if (text == NULL) { fprintf(stderr, "cannot encode NULL text\n"); exit(EXIT_FAILURE); } + + if (t->sorted_vocab == NULL) { + // lazily malloc and sort the vocabulary + t->sorted_vocab = (TokenIndex *)malloc(t->vocab_size * sizeof(TokenIndex)); + for (int i = 0; i < t->vocab_size; i++) { + t->sorted_vocab[i].str = t->vocab[i]; + t->sorted_vocab[i].id = i; + } + qsort(t->sorted_vocab, t->vocab_size, sizeof(TokenIndex), compare_tokens); + } + + // create a temporary buffer that will store merge candidates of always two consecutive tokens + // *2 for concat, +1 for null terminator +2 for UTF8 (in case max_token_length is 1) + const int str_buffer_len = t->max_token_length*2 + 1 + 2; + char* str_buffer = (char *)malloc(str_buffer_len * sizeof(char)); + size_t str_len = 0; + + // start at 0 tokens + *n_tokens = 0; + + // add optional BOS (=1) token, if desired + if (bos) tokens[(*n_tokens)++] = 1; + + // add_dummy_prefix is true by default + // so prepend a dummy prefix token to the input string, but only if text != "" + // TODO: pretty sure this isn't correct in the general case but I don't have the + // energy to read more of the sentencepiece code to figure out what it's doing + if (text[0] != '\0') { + int dummy_prefix = str_lookup(" ", t->sorted_vocab, t->vocab_size); + tokens[(*n_tokens)++] = dummy_prefix; + } + + // Okay UTF-8 time. This will get messy. Here is the reference from Wikipedia: + // Code point ↔ UTF-8 conversion + // First code point Last code point Byte 1 Byte 2 Byte 3 Byte 4 + // U+0000 U+007F 0xxxxxxx + // U+0080 U+07FF 110xxxxx 10xxxxxx + // U+0800 U+FFFF 1110xxxx 10xxxxxx 10xxxxxx + // U+10000 U+10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + + // process the raw (UTF-8) byte sequence of the input string + for (const char *c = text; *c != '\0'; c++) { + + // reset buffer if the current byte is ASCII or a leading byte + // 0xC0 is 11000000, so (*c & 0xC0) keeps the first 2 bits and zeros the rest + // 0x80 is 10000000 + // in UTF-8, all continuation bytes start with "10" in first two bits + // so in English this is: "if this byte is not a continuation byte" + if ((*c & 0xC0) != 0x80) { + // this byte must be either a leading byte (11...) or an ASCII char (0x...) + // => reset our location, as we're starting a new UTF-8 codepoint + str_len = 0; + } + + // append the current byte to the buffer + str_buffer[str_len++] = *c; // ++ is post-increment, incremented after this line + str_buffer[str_len] = '\0'; + + // while the next character is a continuation byte, continue appending + // but if there are too many of them, just stop to avoid overruning str_buffer size. + if ((*(c+1) & 0xC0) == 0x80 && str_len < 4) { + continue; + } + + // ok c+1 is not a continuation byte, so we've read in a full codepoint + int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size); + + if (id != -1) { + // we found this codepoint in vocab, add it as a token + tokens[(*n_tokens)++] = id; + } else { + // byte_fallback encoding: just encode each byte as a token + // +3 is here because the first 3 vocab elements are , , + // so the individual bytes only start at index 3 + for (int i=0; i < str_len; i++) { + tokens[(*n_tokens)++] = (unsigned char)str_buffer[i] + 3; + } + } + str_len = 0; // protect against a sequence of stray UTF8 continuation bytes + } + + // merge the best consecutive pair each iteration, according the scores in vocab_scores + while (1) { + float best_score = -1e10; + int best_id = -1; + int best_idx = -1; + + for (int i=0; i < (*n_tokens-1); i++) { + // check if we can merge the pair (tokens[i], tokens[i+1]) + snprintf(str_buffer, str_buffer_len, "%s%s", t->vocab[tokens[i]], t->vocab[tokens[i+1]]); + int id = str_lookup(str_buffer, t->sorted_vocab, t->vocab_size); + if (id != -1 && t->vocab_scores[id] > best_score) { + // this merge pair exists in vocab! record its score and position + best_score = t->vocab_scores[id]; + best_id = id; + best_idx = i; + } + } + + if (best_idx == -1) { + break; // we couldn't find any more pairs to merge, so we're done + } + + // merge the consecutive pair (best_idx, best_idx+1) into new token best_id + tokens[best_idx] = best_id; + // delete token at position best_idx+1, shift the entire sequence back 1 + for (int i = best_idx+1; i < (*n_tokens-1); i++) { + tokens[i] = tokens[i+1]; + } + (*n_tokens)--; // token length decreased + } + + // add optional EOS (=2) token, if desired + if (eos) tokens[(*n_tokens)++] = 2; + + free(str_buffer); +} + +// ---------------------------------------------------------------------------- +// The Sampler, which takes logits and returns a sampled token +// sampling can be done in a few ways: greedy argmax, sampling, top-p sampling + +typedef struct { + float prob; + int index; +} ProbIndex; // struct used when sorting probabilities during top-p sampling + +typedef struct { + int vocab_size; + ProbIndex* probindex; // buffer used in top-p sampling + float temperature; + float topp; + unsigned long long rng_state; +} Sampler; + +int sample_argmax(float* probabilities, int n) { + // return the index that has the highest probability + int max_i = 0; + float max_p = probabilities[0]; + for (int i = 1; i < n; i++) { + if (probabilities[i] > max_p) { + max_i = i; + max_p = probabilities[i]; + } + } + return max_i; +} + +int sample_mult(float* probabilities, int n, float coin) { + // sample index from probabilities (they must sum to 1!) + // coin is a random number in [0, 1), usually from random_f32() + float cdf = 0.0f; + for (int i = 0; i < n; i++) { + cdf += probabilities[i]; + if (coin < cdf) { + return i; + } + } + return n - 1; // in case of rounding errors +} + +int compare(const void* a, const void* b) { + ProbIndex* a_ = (ProbIndex*) a; + ProbIndex* b_ = (ProbIndex*) b; + if (a_->prob > b_->prob) return -1; + if (a_->prob < b_->prob) return 1; + return 0; +} + +int sample_topp(float* probabilities, int n, float topp, ProbIndex* probindex, float coin) { + // top-p sampling (or "nucleus sampling") samples from the smallest set of + // tokens that exceed probability topp. This way we never sample tokens that + // have very low probabilities and are less likely to go "off the rails". + // coin is a random number in [0, 1), usually from random_f32() + + int n0 = 0; + // quicksort indices in descending order of probabilities + // values smaller than (1 - topp) / (n - 1) cannot be part of the result + // so for efficiency we crop these out as candidates before sorting + const float cutoff = (1.0f - topp) / (n - 1); + for (int i = 0; i < n; i++) { + if (probabilities[i] >= cutoff) { + probindex[n0].index = i; + probindex[n0].prob = probabilities[i]; + n0++; + } + } + qsort(probindex, n0, sizeof(ProbIndex), compare); + + // truncate the list where cumulative probability exceeds topp + float cumulative_prob = 0.0f; + int last_idx = n0 - 1; // in case of rounding errors consider all elements + for (int i = 0; i < n0; i++) { + cumulative_prob += probindex[i].prob; + if (cumulative_prob > topp) { + last_idx = i; + break; // we've exceeded topp by including last_idx + } + } + + // sample from the truncated list + float r = coin * cumulative_prob; + float cdf = 0.0f; + for (int i = 0; i <= last_idx; i++) { + cdf += probindex[i].prob; + if (r < cdf) { + return probindex[i].index; + } + } + return probindex[last_idx].index; // in case of rounding errors +} + +void build_sampler(Sampler* sampler, int vocab_size, float temperature, float topp, unsigned long long rng_seed) { + sampler->vocab_size = vocab_size; + sampler->temperature = temperature; + sampler->topp = topp; + sampler->rng_state = rng_seed; + // buffer only used with nucleus sampling; may not need but it's ~small + sampler->probindex = (ProbIndex *) malloc(sampler->vocab_size * sizeof(ProbIndex)); +} + +void free_sampler(Sampler* sampler) { + free(sampler->probindex); +} + +unsigned int random_u32(unsigned long long *state) { + // xorshift rng: https://en.wikipedia.org/wiki/Xorshift#xorshift.2A + *state ^= *state >> 12; + *state ^= *state << 25; + *state ^= *state >> 27; + return (*state * 0x2545F4914F6CDD1Dull) >> 32; +} +float random_f32(unsigned long long *state) { // random float32 in [0,1) + return (random_u32(state) >> 8) / 16777216.0f; +} + +int sample(Sampler* sampler, float* logits) { + // sample the token given the logits and some hyperparameters + int next; + if (sampler->temperature == 0.0f) { + // greedy argmax sampling: take the token with the highest probability + next = sample_argmax(logits, sampler->vocab_size); + } else { + // apply the temperature to the logits + for (int q=0; qvocab_size; q++) { logits[q] /= sampler->temperature; } + // apply softmax to the logits to get the probabilities for next token + softmax(logits, sampler->vocab_size); + // flip a (float) coin (this is our source of entropy for sampling) + float coin = random_f32(&sampler->rng_state); + // we sample from this distribution to get the next token + if (sampler->topp <= 0 || sampler->topp >= 1) { + // simply sample from the predicted probability distribution + next = sample_mult(logits, sampler->vocab_size, coin); + } else { + // top-p (nucleus) sampling, clamping the least likely tokens to zero + next = sample_topp(logits, sampler->vocab_size, sampler->topp, sampler->probindex, coin); + } + } + return next; +} + +// ---------------------------------------------------------------------------- +// utilities: time + +long time_in_ms() { + // return time in milliseconds, for benchmarking the model speed + struct timespec time; + clock_gettime(CLOCK_REALTIME, &time); + return time.tv_sec * 1000 + time.tv_nsec / 1000000; +} + +// ---------------------------------------------------------------------------- +// generation loop + +void generate(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, const char *prompt, int steps) { + const char *default_prompt = "Once upon a time"; + if (prompt == NULL) { prompt = default_prompt; } + + // encode the (string) prompt into tokens sequence + int num_prompt_tokens = 0; + int* prompt_tokens = (int*)malloc((strlen(prompt)+3) * sizeof(int)); // +3 for '\0', ?BOS, ?EOS + encode(tokenizer, prompt, 1, 0, prompt_tokens, &num_prompt_tokens); + if (num_prompt_tokens < 1) { + fprintf(stderr, "something is wrong, expected at least 1 prompt token\n"); + exit(EXIT_FAILURE); + } + + #ifdef DEBUG + std::cerr << "# " << num_prompt_tokens << "\n"; + for(int i = 0; i < num_prompt_tokens; i++) + std::cerr << "[" << i << "] " << prompt_tokens[i]; + std::cerr << "\n"; + #endif + + // start the main loop + long start = 0; // used to time our code, only initialized after first iteration + int next; // will store the next token in the sequence + int token = prompt_tokens[0]; // kick off with the first token in the prompt + int pos = 0; // position in the sequence + while (pos < steps) { + + // forward the transformer to get logits for the next token + float* logits = forward(transformer, token, pos); + + // advance the state machine + if (pos < num_prompt_tokens - 1) { + // if we are still processing the input prompt, force the next prompt token + next = prompt_tokens[pos + 1]; + } else { + // otherwise sample the next token from the logits + next = sample(sampler, logits); + } + pos++; + + // data-dependent terminating condition: the BOS (=1) token delimits sequences + if (next == 1) { break; } + + // print the token as string, decode it with the Tokenizer object + char* piece = decode(tokenizer, token, next); + safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes + fflush(stdout); + token = next; + + // init the timer here because the first iteration can be slower + if (start == 0) { start = time_in_ms(); } + } + printf("\n"); + + // report achieved tok/s (pos-1 because the timer starts after first iteration) + if (pos > 1) { + long end = time_in_ms(); + fprintf(stderr, "achieved tok/s: %f\n", (pos-1) / (double)(end-start)*1000); + } + + free(prompt_tokens); +} + +void read_stdin(const char* guide, char* buffer, size_t bufsize) { + // read a line from stdin, up to but not including \n + printf("%s", guide); + if (fgets(buffer, bufsize, stdin) != NULL) { + size_t len = strlen(buffer); + if (len > 0 && buffer[len - 1] == '\n') { + buffer[len - 1] = '\0'; // strip newline + } + } +} + +// ---------------------------------------------------------------------------- +// chat loop +// I manually inspected the tokens for a few chat conversations compared to +// python reference and that seemed ok, but this was not thoroughly tested and +// is not safely implemented, it's more a proof of concept atm. + +void chat(Transformer *transformer, Tokenizer *tokenizer, Sampler *sampler, + const char *cli_user_prompt, const char *cli_system_prompt, int steps) { + + // buffers for reading the system prompt and user prompt from stdin + // you'll notice they are soomewhat haphazardly and unsafely set atm + char system_prompt[512]; + char user_prompt[512]; + char rendered_prompt[1152]; + int num_prompt_tokens = 0; + int* prompt_tokens = (int*)malloc(1152 * sizeof(int)); + int user_idx; + + // start the main loop + int8_t user_turn = 1; // user starts + int next; // will store the next token in the sequence + int token; // stores the current token to feed into the transformer + int prev_token; + int pos = 0; // position in the sequence + while (pos < steps) { + + // when it is the user's turn to contribute tokens to the dialog... + if (user_turn) { + // get the (optional) system prompt at position 0 + if (pos == 0) { + // at position 0, the user can also contribute a system prompt + if (cli_system_prompt == NULL) { + // system prompt was not passed in, attempt to get it from stdin + read_stdin("Enter system prompt (optional): ", system_prompt, sizeof(system_prompt)); + } else { + // system prompt was passed in, use it + strcpy(system_prompt, cli_system_prompt); + } + } + // get the user prompt + if (pos == 0 && cli_user_prompt != NULL) { + // user prompt for position 0 was passed in, use it + strcpy(user_prompt, cli_user_prompt); + } else { + // otherwise get user prompt from stdin + read_stdin("User: ", user_prompt, sizeof(user_prompt)); + } + // render user/system prompts into the Llama 2 Chat schema + if (pos == 0 && system_prompt[0] != '\0') { + char system_template[] = "[INST] <>\n%s\n<>\n\n%s [/INST]"; + snprintf(rendered_prompt, 1151, system_template, system_prompt, user_prompt); + } else { + char user_template[] = "[INST] %s [/INST]"; + snprintf(rendered_prompt, 1151, user_template, user_prompt); + } + // encode the rendered prompt into tokens + encode(tokenizer, rendered_prompt, 1, 0, prompt_tokens, &num_prompt_tokens); + user_idx = 0; // reset the user index + user_turn = 0; + printf("Assistant: "); + } + + // determine the token to pass into the transformer next + if (user_idx < num_prompt_tokens) { + // if we are still processing the input prompt, force the next prompt token + token = prompt_tokens[user_idx++]; + } else { + // otherwise use the next token sampled from previous turn + token = next; + } + // EOS (=2) token ends the Assistant turn + if (token == 2) { user_turn = 1; } + + // forward the transformer to get logits for the next token + float* logits = forward(transformer, token, pos); + next = sample(sampler, logits); + pos++; + + if (user_idx >= num_prompt_tokens && next != 2) { + // the Assistant is responding, so print its output + char* piece = decode(tokenizer, token, next); + safe_printf(piece); // same as printf("%s", piece), but skips "unsafe" bytes + fflush(stdout); + } + if (next == 2) { printf("\n"); } + } + printf("\n"); + free(prompt_tokens); +} + + +// ---------------------------------------------------------------------------- +// CLI, include only if not testing +#ifndef TESTING + +void error_usage() { + fprintf(stderr, "Usage: run [options]\n"); + fprintf(stderr, "Example: run model.bin -n 256 -i \"Once upon a time\"\n"); + fprintf(stderr, "Options:\n"); + fprintf(stderr, " -t temperature in [0,inf], default 1.0\n"); + fprintf(stderr, " -p p value in top-p (nucleus) sampling in [0,1] default 0.9\n"); + fprintf(stderr, " -s random seed, default time(NULL)\n"); + fprintf(stderr, " -n number of steps to run for, default 256. 0 = max_seq_len\n"); + fprintf(stderr, " -i input prompt\n"); + fprintf(stderr, " -z optional path to custom tokenizer\n"); + fprintf(stderr, " -m mode: generate|chat, default: generate\n"); + fprintf(stderr, " -y (optional) system prompt in chat mode\n"); + exit(EXIT_FAILURE); +} + +int main(int argc, char *argv[]) { + + // default parameters + char *checkpoint_path = NULL; // e.g. out/model.bin + const char *tokenizer_path = "tokenizer.bin"; + float temperature = 1.0f; // 0.0 = greedy deterministic. 1.0 = original. don't set higher + float topp = 0.9f; // top-p in nucleus sampling. 1.0 = off. 0.9 works well, but slower + int vocab_size = 32000; + int steps = 256; // number of steps to run for + const char *prompt = NULL; // prompt string + unsigned long long rng_seed = 0; // seed rng with time by default + const char *mode = "generate"; // generate|chat + char *system_prompt = NULL; // the (optional) system prompt to use in chat mode + + // poor man's C argparse so we can override the defaults above from the command line + if (argc >= 2) { checkpoint_path = argv[1]; } else { error_usage(); } + for (int i = 2; i < argc; i+=2) { + // do some basic validation + if (i + 1 >= argc) { error_usage(); } // must have arg after flag + if (argv[i][0] != '-') { error_usage(); } // must start with dash + if (strlen(argv[i]) != 2) { error_usage(); } // must be -x (one dash, one letter) + // read in the args + if (argv[i][1] == 't') { temperature = atof(argv[i + 1]); } + else if (argv[i][1] == 'p') { topp = atof(argv[i + 1]); } + else if (argv[i][1] == 's') { rng_seed = atoi(argv[i + 1]); } + else if (argv[i][1] == 'n') { steps = atoi(argv[i + 1]); } + else if (argv[i][1] == 'v') { vocab_size = atoi(argv[i + 1]); } + else if (argv[i][1] == 'i') { prompt = argv[i + 1]; } + else if (argv[i][1] == 'z') { tokenizer_path = argv[i + 1]; } + else if (argv[i][1] == 'm') { mode = argv[i + 1]; } + else if (argv[i][1] == 'y') { system_prompt = argv[i + 1]; } + else { error_usage(); } + } + + // parameter validation/overrides + if (rng_seed <= 0) rng_seed = (unsigned int)time(NULL); + if (temperature < 0.0) temperature = 0.0; + if (topp < 0.0 || 1.0 < topp) topp = 0.9; + if (steps < 0) steps = 0; + + // build the Transformer via the model .bin file + Transformer transformer; + build_transformer(&transformer, checkpoint_path, vocab_size, steps); + + // build the Tokenizer via the tokenizer .bin file + Tokenizer tokenizer; + build_tokenizer(&tokenizer, tokenizer_path, transformer.config.vocab_size); + + // build the Sampler + Sampler sampler; + build_sampler(&sampler, transformer.config.vocab_size, temperature, topp, rng_seed); + + // run! + if (strcmp(mode, "generate") == 0) { + generate(&transformer, &tokenizer, &sampler, prompt, steps); + } else if (strcmp(mode, "chat") == 0) { + chat(&transformer, &tokenizer, &sampler, prompt, system_prompt, steps); + } else { + fprintf(stderr, "unknown mode: %s\n", mode); + error_usage(); + } + + // memory and file handles cleanup + free_sampler(&sampler); + free_tokenizer(&tokenizer); + free_transformer(&transformer); + return 0; +} +#endif From 226870ca4387c3d7294688e6e5234e1cc2bb6ca9 Mon Sep 17 00:00:00 2001 From: Michael Gschwind Date: Sun, 12 May 2024 10:17:39 -0700 Subject: [PATCH 2/2] Minimal example --- parking_lot/unsupported/README.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 parking_lot/unsupported/README.md diff --git a/parking_lot/unsupported/README.md b/parking_lot/unsupported/README.md new file mode 100644 index 000000000..9961256c6 --- /dev/null +++ b/parking_lot/unsupported/README.md @@ -0,0 +1,28 @@ +# Enabling Models from Server to Mobile + +THIS DIRECTORY AND ITS SUBDIRECTORIES CONTAIN AN UNSUPPORTED EXAMPLE. + +This directory is a minimal example for integrating PyTorch models +exported with either AOT Inductor as a shared library, also known as +dynamic shared object (DSO), and as ExecuTorch-exported PTE model file +in a C/C++ app. + +The example is derived from Andrej Karpathy's llama2.c executor, as +modified by Bert Maher for llama2.so, and distributed under Andrej's +original license. + +Please refer to the documentation at +https://github.com/karpathy/llama2.c (and Bert Maher's +https://github.com/bertmaher/llama2.so for modifications to serve as +execution environment for PyTorch models) for a discussion of +downloading and and preparing tokenizer models and invoking the model. + +This runner is limited to llama2-style models using the SentencePiece +tokenizer to highlight the minimum example of how to enable an +arbitrary application to call a PyTorch model in either a DSO or PTE +format. In additioon to header files, these changes include +maintaining a pointer to the AOT Inductor or ExecTorch runtime +executor, and the `forward()` function in runner/run.cpp as well as +CMake files in runner-aoti and runner-et to build the runner with +Executorch and AOT Inductor runtimes, specifically. +