From e94bd9c7b90541ac82a7ccc161914a87e61f73a0 Mon Sep 17 00:00:00 2001 From: Gary Linscott <glinscott@gmail.com> Date: Sat, 18 Mar 2023 14:03:20 -0700 Subject: [PATCH 1/4] Compute perplexity over prompt --- main.cpp | 64 ++++++++++++++++++++++++++++++++++++++++++++++++------- utils.cpp | 16 ++++++++------ utils.h | 2 ++ 3 files changed, 67 insertions(+), 15 deletions(-) diff --git a/main.cpp b/main.cpp index c88405b82956a..c623b8b6195dd 100644 --- a/main.cpp +++ b/main.cpp @@ -547,7 +547,7 @@ bool llama_eval( static void * buf = malloc(buf_size); if (mem_per_token > 0 && mem_per_token*N > buf_size) { - const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead + const size_t buf_size_new = 1.3*(mem_per_token*N); // add 30% to account for ggml object overhead //fprintf(stderr, "\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); // reallocate @@ -747,6 +747,49 @@ bool llama_eval( return true; } +std::vector<double> softmax(const std::vector<float>& logits) { + std::vector<double> probs(logits.size()); + float max_logit = logits[0]; + for (float v : logits) max_logit = std::max(max_logit, v); + double sum_exp = 0.0; + for (size_t i = 0; i < logits.size(); i++) { + // Subtract the maximum logit value from the current logit value for numerical stability + float logit = logits[i] - max_logit; + double exp_logit = std::exp(logit); + sum_exp += exp_logit; + probs[i] = exp_logit; + } + for (size_t i = 0; i < probs.size(); i++) probs[i] /= sum_exp; + return probs; +} + +void perplexity(const gpt_vocab &vocab, const llama_model &model, const gpt_params ¶ms, size_t mem_per_token) { + // Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research + // Run `./main --perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw` + // Output: `perplexity: 13.5106 [114/114]` + std::vector<gpt_vocab::id> tokens = ::llama_tokenize(vocab, params.prompt, true); + + double nll = 0.0; + int seq_count = tokens.size() / params.n_ctx; + for (int i = 0; i < seq_count; ++i) { + int start = i * params.n_ctx; + int end = start + params.n_ctx - 1; + std::vector<gpt_vocab::id> embd(tokens.begin() + start, tokens.begin() + end); + std::vector<float> logits; + if (!llama_eval(model, params.n_threads, 0, embd, logits, mem_per_token)) { + fprintf(stderr, "Failed to predict\n"); + return; + } + // Calculate probability of next token, given the previous ones. + double prob = softmax(logits)[tokens[end]]; + nll += -std::log(prob); + // perplexity is e^(average negative log-likelihood) + printf("perplexity: %.4lf [%d/%d] \r", std::exp(nll / (i + 1)), i + 1, seq_count); + fflush(stdout); + } + printf("\n"); +} + static bool is_interacting = false; #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) @@ -815,7 +858,7 @@ int main(int argc, char ** argv) { // load the model { const int64_t t_start_us = ggml_time_us(); - if (!llama_model_load(params.model, model, vocab, params.n_ctx)) { + if (!llama_model_load(params.model, model, vocab, params.n_ctx)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); return 1; } @@ -830,13 +873,22 @@ int main(int argc, char ** argv) { params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info()); } + std::vector<float> logits; + + // determine the required inference memory per token: + size_t mem_per_token = 0; + llama_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); + + if (params.perplexity) { + perplexity(vocab, model, params, mem_per_token); + exit(0); + } + int n_past = 0; int64_t t_sample_us = 0; int64_t t_predict_us = 0; - std::vector<float> logits; - // Add a space in front of the first character to match OG llama tokenizer behavior params.prompt.insert(0, 1, ' '); // tokenize the prompt @@ -881,10 +933,6 @@ int main(int argc, char ** argv) { std::vector<gpt_vocab::id> embd; - // determine the required inference memory per token: - size_t mem_per_token = 0; - llama_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); - int last_n_size = params.repeat_last_n; std::vector<gpt_vocab::id> last_n_tokens(last_n_size); std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); diff --git a/utils.cpp b/utils.cpp index efa2e3c35f728..a04c47722bdfc 100644 --- a/utils.cpp +++ b/utils.cpp @@ -44,7 +44,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt)); - } else if (arg == "-n" || arg == "--n_predict") { params.n_predict = std::stoi(argv[++i]); } else if (arg == "--top_k") { @@ -72,6 +71,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { params.use_color = true; } else if (arg == "-r" || arg == "--reverse-prompt") { params.antiprompt = argv[++i]; + } else if (arg == "--perplexity") { + params.perplexity = true; } else if (arg == "-h" || arg == "--help") { gpt_print_usage(argc, argv, params); exit(0); @@ -109,6 +110,7 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params) { fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx); fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp); fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); + fprintf(stderr, " --perplexity compute perplexity over the prompt\n"); fprintf(stderr, " -m FNAME, --model FNAME\n"); fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); fprintf(stderr, "\n"); @@ -322,9 +324,9 @@ std::vector<gpt_vocab::id> llama_tokenize(const gpt_vocab & vocab, const std::st while (i > 0) { gpt_vocab::id token_id = prev[i]; if (token_id == 0) { - // TODO: Return error or something more meaningful - printf("failed to tokenize string!\n"); - break; + // TODO: Return error or something more meaningful + printf("failed to tokenize string at %d!\n", i); + break; } res.push_back(token_id); auto token = (*vocab.id_to_token.find(token_id)).second; @@ -398,7 +400,7 @@ gpt_vocab::id llama_sample_top_p_top_k( logits_id.push_back(std::make_pair(logits[i]*scale*repeat_penalty, i)); } else { logits_id.push_back(std::make_pair(logits[i]*scale/repeat_penalty, i)); - } + } } else { logits_id.push_back(std::make_pair(logits[i]*scale, i)); } @@ -527,7 +529,7 @@ size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t char * pdst = (char *) dst; - for (int j = 0; j < n; j += k) { + for (int j = 0; j < n; j += k) { uint8_t * pd = (uint8_t *) (pdst + (j/k)*row_size + 0*bs); uint8_t * pm = (uint8_t *) (pdst + (j/k)*row_size + 0*bs + sizeof(float)); uint8_t * pb = (uint8_t *) (pdst + (j/k)*row_size + 0*bs + 2*sizeof(float)); @@ -550,7 +552,7 @@ size_t ggml_quantize_q4_1(float * src, void * dst, int n, int k, int qk, int64_t *(float *) pd = d; *(float *) pm = min; - pd += bs; + pd += bs; pm += bs; for (int l = 0; l < qk; l += 2) { diff --git a/utils.h b/utils.h index c1a8498a78d68..9684f766ce5b2 100644 --- a/utils.h +++ b/utils.h @@ -35,6 +35,8 @@ struct gpt_params { bool interactive = false; // interactive mode bool interactive_start = false; // reverse prompt immediately std::string antiprompt = ""; // string upon seeing which more user input is prompted + + bool perplexity = false; }; bool gpt_params_parse(int argc, char ** argv, gpt_params & params); From 91d71fe0c109227debe86536899caf2b5b2235c3 Mon Sep 17 00:00:00 2001 From: Gary Linscott <glinscott@gmail.com> Date: Sun, 19 Mar 2023 13:33:12 -0700 Subject: [PATCH 2/4] More accurate perplexity calculation - over all logits in the context window (so 512x more tokens!) --- main.cpp | 44 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/main.cpp b/main.cpp index c623b8b6195dd..a7940d08849c3 100644 --- a/main.cpp +++ b/main.cpp @@ -527,7 +527,8 @@ bool llama_eval( const int n_past, const std::vector<gpt_vocab::id> & embd_inp, std::vector<float> & embd_w, - size_t & mem_per_token) { + size_t & mem_per_token, + bool return_all_logits = false) { const int N = embd_inp.size(); const auto & hparams = model.hparams; @@ -733,9 +734,14 @@ bool llama_eval( //embd_w.resize(n_vocab*N); //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); - // return result for just the last token - embd_w.resize(n_vocab); - memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); + if (return_all_logits) { + embd_w.resize(n_vocab * N); + memcpy(embd_w.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N); + } else { + // return result for just the last token + embd_w.resize(n_vocab); + memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); + } if (mem_per_token == 0) { mem_per_token = ggml_used_mem(ctx0)/N; @@ -769,6 +775,7 @@ void perplexity(const gpt_vocab &vocab, const llama_model &model, const gpt_para // Output: `perplexity: 13.5106 [114/114]` std::vector<gpt_vocab::id> tokens = ::llama_tokenize(vocab, params.prompt, true); + int count = 0; double nll = 0.0; int seq_count = tokens.size() / params.n_ctx; for (int i = 0; i < seq_count; ++i) { @@ -776,15 +783,34 @@ void perplexity(const gpt_vocab &vocab, const llama_model &model, const gpt_para int end = start + params.n_ctx - 1; std::vector<gpt_vocab::id> embd(tokens.begin() + start, tokens.begin() + end); std::vector<float> logits; - if (!llama_eval(model, params.n_threads, 0, embd, logits, mem_per_token)) { + if (!llama_eval(model, params.n_threads, 0, embd, logits, mem_per_token, true)) { fprintf(stderr, "Failed to predict\n"); return; } - // Calculate probability of next token, given the previous ones. - double prob = softmax(logits)[tokens[end]]; - nll += -std::log(prob); + // We get the logits for all the tokens in the context window (params.n_ctx) + // from llama_eval above. Now, based on https://huggingface.co/docs/transformers/perplexity, + // calculate the perplexity over the last half the window (so the model always has + // some context to predict the token). + // + // We rely on the fact that attention in the forward pass only looks at previous + // tokens here, so the logits returned for each token are an accurate representation + // of what the model would have predicted at that point. + // + // Example, we have a context window of 512, we will compute perplexity for each of the + // last 256 tokens. Then, we split the input up into context window size chunks to + // process the entire prompt. + for (int j = params.n_ctx / 2; j < params.n_ctx - 1; ++j) { + // Calculate probability of next token, given the previous ones. + int n_vocab = model.hparams.n_vocab; + std::vector<float> tok_logits( + logits.begin() + j * n_vocab, + logits.begin() + (j + 1) * n_vocab); + double prob = softmax(tok_logits)[tokens[start + j + 1]]; + nll += -std::log(prob); + ++count; + } // perplexity is e^(average negative log-likelihood) - printf("perplexity: %.4lf [%d/%d] \r", std::exp(nll / (i + 1)), i + 1, seq_count); + printf("perplexity: %.4lf [%d/%d] \r", std::exp(nll / count), i + 1, seq_count); fflush(stdout); } printf("\n"); From 2f8ab68d72ed5005ef6c80e65b25bd5a231543d7 Mon Sep 17 00:00:00 2001 From: Gary Linscott <glinscott@gmail.com> Date: Tue, 21 Mar 2023 07:10:42 -0700 Subject: [PATCH 3/4] Output all perplexitiies --- main.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/main.cpp b/main.cpp index cb799fdba0f5e..e77007943fdb5 100644 --- a/main.cpp +++ b/main.cpp @@ -795,6 +795,7 @@ void perplexity(const gpt_vocab &vocab, const llama_model &model, const gpt_para int count = 0; double nll = 0.0; int seq_count = tokens.size() / params.n_ctx; + printf("Calculating perplexity over %d chunks\n", seq_count); for (int i = 0; i < seq_count; ++i) { int start = i * params.n_ctx; int end = start + params.n_ctx - 1; @@ -827,7 +828,7 @@ void perplexity(const gpt_vocab &vocab, const llama_model &model, const gpt_para ++count; } // perplexity is e^(average negative log-likelihood) - printf("perplexity: %.4lf [%d/%d] \r", std::exp(nll / count), i + 1, seq_count); + printf("[%d]%.4lf,", i + 1, std::exp(nll / count)); fflush(stdout); } printf("\n"); From 35ae689f78330c161cad8de63d76c4ac9e120b4d Mon Sep 17 00:00:00 2001 From: Gary Linscott <glinscott@gmail.com> Date: Tue, 21 Mar 2023 07:29:23 -0700 Subject: [PATCH 4/4] Add timing/ETA --- main.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/main.cpp b/main.cpp index e77007943fdb5..6e42894e43ca0 100644 --- a/main.cpp +++ b/main.cpp @@ -801,10 +801,16 @@ void perplexity(const gpt_vocab &vocab, const llama_model &model, const gpt_para int end = start + params.n_ctx - 1; std::vector<gpt_vocab::id> embd(tokens.begin() + start, tokens.begin() + end); std::vector<float> logits; + auto start_t = std::chrono::high_resolution_clock::now(); if (!llama_eval(model, params.n_threads, 0, embd, logits, mem_per_token, true)) { fprintf(stderr, "Failed to predict\n"); return; } + auto end_t = std::chrono::high_resolution_clock::now(); + if (i == 0) { + double seconds = std::chrono::duration<double>(end_t - start_t).count(); + printf("%.2f seconds per pass - ETA %.2f hours\n", seconds, (seconds * seq_count) / (60.0*60.0)); + } // We get the logits for all the tokens in the context window (params.n_ctx) // from llama_eval above. Now, based on https://huggingface.co/docs/transformers/perplexity, // calculate the perplexity over the last half the window (so the model always has