From 5f022185a1170b2cf1eec37dd07b6db8638a5d88 Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Sat, 5 Aug 2023 22:39:44 -0400 Subject: [PATCH 01/14] test pp_threads --- llama.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 839739870eb3e..a44f648df77a0 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1814,9 +1814,12 @@ static bool llama_eval_internal( // fprintf(stderr, "graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); + int32_t pp_threads = 3; + // for big prompts, if BLAS is enabled, it is better to use only one thread // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance - n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads; + pp_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : pp_threads; + n_threads = N > 1 ? pp_threads : n_threads; struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2]; From 1de711d4f8c50dfff2c797c309c7be8d1127b981 Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Sat, 5 Aug 2023 23:45:58 -0400 Subject: [PATCH 02/14] builds fine --- examples/common.cpp | 12 ++++++++++++ examples/common.h | 1 + examples/embd-input/embd-input-lib.cpp | 4 ++-- examples/embedding/embedding.cpp | 2 +- examples/main/main.cpp | 8 ++++---- examples/perplexity/perplexity.cpp | 4 ++-- examples/server/server.cpp | 2 +- examples/simple/simple.cpp | 2 +- llama.cpp | 19 +++++++++++-------- llama.h | 7 +++++-- 10 files changed, 40 insertions(+), 21 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index 21f4a0357d422..7502c87ea1911 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -119,6 +119,15 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { if (params.n_threads <= 0) { params.n_threads = std::thread::hardware_concurrency(); } + } else if (arg == "-ppt" || arg == "--pp-threads") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.pp_threads = std::stoi(argv[i]); + if (params.pp_threads <= 0) { + params.pp_threads = params.n_threads; + } } else if (arg == "-p" || arg == "--prompt") { if (++i >= argc) { invalid_param = true; @@ -524,6 +533,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stdout, " --color colorise output to distinguish prompt and user input from generations\n"); fprintf(stdout, " -s SEED, --seed SEED RNG seed (default: -1, use random seed for < 0)\n"); fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + fprintf(stdout, " -ppt N, --pp-threads N\n"); + fprintf(stdout, " number of threads to use during prompt processing (default is equal to --threads)\n"); fprintf(stdout, " -p PROMPT, --prompt PROMPT\n"); fprintf(stdout, " prompt to start generation with (default: empty)\n"); fprintf(stdout, " -e process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n"); @@ -657,6 +668,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param lparams.embedding = params.embedding; lparams.rope_freq_base = params.rope_freq_base; lparams.rope_freq_scale = params.rope_freq_scale; + lparams.pp_threads = params.pp_threads; return lparams; } diff --git a/examples/common.h b/examples/common.h index 375bc0a3db416..d59861aa0b255 100644 --- a/examples/common.h +++ b/examples/common.h @@ -19,6 +19,7 @@ int32_t get_num_physical_cores(); struct gpt_params { uint32_t seed = -1; // RNG seed int32_t n_threads = get_num_physical_cores(); + int32_t pp_threads = get_num_physical_cores(); int32_t n_predict = -1; // new tokens to predict int32_t n_ctx = 512; // context size int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) diff --git a/examples/embd-input/embd-input-lib.cpp b/examples/embd-input/embd-input-lib.cpp index 2185b9b0e2839..0480eee190294 100644 --- a/examples/embd-input/embd-input-lib.cpp +++ b/examples/embd-input/embd-input-lib.cpp @@ -83,7 +83,7 @@ bool eval_float(void * model, float * input, int N){ if (n_eval > n_batch) { n_eval = n_batch; } - if (llama_eval_embd(ctx, (input+i*n_emb), n_eval, n_past, params.n_threads)) { + if (llama_eval_embd(ctx, (input+i*n_emb), n_eval, n_past, params.n_threads, params.pp_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return false; } @@ -104,7 +104,7 @@ bool eval_tokens(void * model, std::vector tokens) { if (n_eval > params.n_batch) { n_eval = params.n_batch; } - if (llama_eval(ctx, &tokens[i], n_eval, n_past, params.n_threads)) { + if (llama_eval(ctx, &tokens[i], n_eval, n_past, params.n_threads, params.pp_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return false; } diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 5192d6df5c2f8..e931d7f39e3ee 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -74,7 +74,7 @@ int main(int argc, char ** argv) { if (params.embedding){ if (embd_inp.size() > 0) { - if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) { + if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads, params.pp_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 56ada7e69d99d..dba3bdab2a945 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -144,7 +144,7 @@ int main(int argc, char ** argv) { fprintf(stderr, "%s: testing memory usage for n_batch = %d, n_ctx = %d\n", __func__, params.n_batch, params.n_ctx); const std::vector tmp(params.n_batch, llama_token_bos()); - llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads); + llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads, params.pp_threads); } llama_print_timings(ctx); @@ -406,7 +406,7 @@ int main(int argc, char ** argv) { // do one empty run to warm up the model { const std::vector tmp = { llama_token_bos(), }; - llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); + llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads, params.pp_threads); llama_reset_timings(ctx); } @@ -509,7 +509,7 @@ int main(int argc, char ** argv) { for (int i = 0; i < input_size; i += params.n_batch) { int n_eval = std::min(input_size - i, params.n_batch); - if (llama_eval(ctx_guidance, input_buf + i, n_eval, n_past_guidance, params.n_threads)) { + if (llama_eval(ctx_guidance, input_buf + i, n_eval, n_past_guidance, params.n_threads, params.pp_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } @@ -523,7 +523,7 @@ int main(int argc, char ** argv) { if (n_eval > params.n_batch) { n_eval = params.n_batch; } - if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) { + if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads, params.pp_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 62433e983df9c..f75b1ec41e34e 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -66,7 +66,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) { tokens[batch_start] = llama_token_bos(); } - if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) { + if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads, params.pp_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return; } @@ -233,7 +233,7 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) { } // Evaluate the query - if (llama_eval(ctx, query_embd.data(), query_embd.size(), 0, params.n_threads)) { + if (llama_eval(ctx, query_embd.data(), query_embd.size(), 0, params.n_threads, params.pp_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return; } diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 6f7a66da108c8..853cd74cdf3f0 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -350,7 +350,7 @@ struct llama_server_context { n_eval = params.n_batch; } - if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads)) + if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads, params.pp_threads)) { LOG_ERROR("failed to eval", { {"n_eval", n_eval}, diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 97137a6584aa3..5fc6074bccfea 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -123,7 +123,7 @@ int main(int argc, char ** argv) // Evaluate the tokens : //--------------------------------- - if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads ) ) + if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads , params.pp_threads ) ) { fprintf( stderr, "%s : failed to eval\n" , __func__ ); return 1; diff --git a/llama.cpp b/llama.cpp index a44f648df77a0..630a4357abae7 100644 --- a/llama.cpp +++ b/llama.cpp @@ -895,6 +895,7 @@ struct llama_context_params llama_context_default_params() { /*.rms_norm_eps =*/ LLAMA_DEFAULT_RMS_EPS, /*.gpu_layers =*/ 0, /*.main_gpu =*/ 0, + /*.pp_threads =*/ GGML_DEFAULT_N_THREADS, /*.tensor_split =*/ nullptr, /*.rope_freq_base =*/ 10000.0f, /*.rope_freq_scale =*/ 1.0f, @@ -1772,6 +1773,7 @@ static struct ggml_cgraph * llama_build_graph( // - n_tokens number of tokens // - n_past: the context size so far // - n_threads: number of threads to use +// - pp_threads: number of threads to use for prompt processing // static bool llama_eval_internal( llama_context & lctx, @@ -1780,6 +1782,7 @@ static bool llama_eval_internal( int n_tokens, int n_past, int n_threads, + int pp_threads, const char * cgraph_fname) { LLAMA_ASSERT((!tokens && embd) || (tokens && !embd)); @@ -1814,8 +1817,6 @@ static bool llama_eval_internal( // fprintf(stderr, "graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); - int32_t pp_threads = 3; - // for big prompts, if BLAS is enabled, it is better to use only one thread // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance pp_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : pp_threads; @@ -3365,7 +3366,7 @@ struct llama_context * llama_new_context_with_model( if (ggml_mpi_rank(ctx->ctx_mpi) > 0) { // Enter a blocking eval loop with dummy input, letting rank=0 drive the process const std::vector tmp(ctx->model.hparams.n_ctx, llama_token_bos()); - while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {}; + while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0, 0)) {}; llama_backend_free(); exit(1); } @@ -4057,8 +4058,9 @@ int llama_eval( const llama_token * tokens, int n_tokens, int n_past, - int n_threads) { - if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) { + int n_threads, + int pp_threads) { + if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, pp_threads, nullptr)) { fprintf(stderr, "%s: failed to eval\n", __func__); return 1; } @@ -4079,8 +4081,9 @@ int llama_eval_embd( const float * embd, int n_tokens, int n_past, - int n_threads) { - if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) { + int n_threads, + int pp_threads) { + if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, pp_threads, nullptr)) { fprintf(stderr, "%s: failed to eval\n", __func__); return 1; } @@ -4101,7 +4104,7 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) { const std::vector tmp(n_batch, llama_token_bos()); - if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) { + if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, 1, fname)) { fprintf(stderr, "%s: failed to eval\n", __func__); return 1; } diff --git a/llama.h b/llama.h index fa1977f2d9492..0bbdd6759921f 100644 --- a/llama.h +++ b/llama.h @@ -94,6 +94,7 @@ extern "C" { float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams) int32_t n_gpu_layers; // number of layers to store in VRAM int32_t main_gpu; // the GPU that is used for scratch and small tensors + int32_t pp_threads; // number of threads used for prompt processing only const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES) @@ -291,7 +292,8 @@ extern "C" { const llama_token * tokens, int n_tokens, int n_past, - int n_threads); + int n_threads, + int pp_threads); // Same as llama_eval, but use float matrix input directly. LLAMA_API int llama_eval_embd( @@ -299,7 +301,8 @@ extern "C" { const float * embd, int n_tokens, int n_past, - int n_threads); + int n_threads, + int pp_threads); // Export a static computation graph for context of 511 and batch size of 1 // NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these From 590feeac1d78a4da3bb0cc68040f5e52a909063e Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Sun, 6 Aug 2023 00:13:02 -0400 Subject: [PATCH 03/14] add printout of pp_threads --- examples/common.cpp | 2 +- examples/main/main.cpp | 4 ++-- examples/save-load-state/save-load-state.cpp | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index 7502c87ea1911..c531639fb0161 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -534,7 +534,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stdout, " -s SEED, --seed SEED RNG seed (default: -1, use random seed for < 0)\n"); fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); fprintf(stdout, " -ppt N, --pp-threads N\n"); - fprintf(stdout, " number of threads to use during prompt processing (default is equal to --threads)\n"); + fprintf(stdout, " number of threads to use during prompt processing (default: %d)\n", params.pp_threads); fprintf(stdout, " -p PROMPT, --prompt PROMPT\n"); fprintf(stdout, " prompt to start generation with (default: empty)\n"); fprintf(stdout, " -e process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n"); diff --git a/examples/main/main.cpp b/examples/main/main.cpp index dba3bdab2a945..59fc8a295b914 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -133,8 +133,8 @@ int main(int argc, char ** argv) { // print system information { fprintf(stderr, "\n"); - fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", - params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info()); + fprintf(stderr, "system_info: n_threads = %d / %d | pp_threads = %d / %d | %s\n", + params.n_threads, std::thread::hardware_concurrency(), params.pp_threads, std::thread::hardware_concurrency(), llama_print_system_info()); } // determine the maximum memory usage needed to do inference for the given n_batch and n_ctx parameters diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index 61c71c3589fdf..4821fad5df66d 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -56,7 +56,7 @@ int main(int argc, char ** argv) { } // evaluate prompt - llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past, params.n_threads); + llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past, params.n_threads, params.pp_threads); last_n_tokens_data.insert(last_n_tokens_data.end(), tokens.data(), tokens.data() + n_prompt_tokens); n_past += n_prompt_tokens; @@ -93,7 +93,7 @@ int main(int argc, char ** argv) { last_n_tokens_data.push_back(next_token); printf("%s", next_token_str); - if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads)) { + if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads, params.pp_threads)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_free(ctx); llama_free_model(model); @@ -153,7 +153,7 @@ int main(int argc, char ** argv) { last_n_tokens_data.push_back(next_token); printf("%s", next_token_str); - if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads)) { + if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads, params.pp_threads)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_free(ctx2); llama_free_model(model); From 215e2f21d016912ed8a326d607222bf280d158c8 Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Sun, 6 Aug 2023 00:22:14 -0400 Subject: [PATCH 04/14] only activate pp_threads for main for now --- examples/embd-input/embd-input-lib.cpp | 4 ++-- examples/embedding/embedding.cpp | 2 +- examples/perplexity/perplexity.cpp | 4 ++-- examples/save-load-state/save-load-state.cpp | 6 +++--- examples/server/server.cpp | 2 +- examples/simple/simple.cpp | 2 +- llama.cpp | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/examples/embd-input/embd-input-lib.cpp b/examples/embd-input/embd-input-lib.cpp index 0480eee190294..d5df68637f4ec 100644 --- a/examples/embd-input/embd-input-lib.cpp +++ b/examples/embd-input/embd-input-lib.cpp @@ -83,7 +83,7 @@ bool eval_float(void * model, float * input, int N){ if (n_eval > n_batch) { n_eval = n_batch; } - if (llama_eval_embd(ctx, (input+i*n_emb), n_eval, n_past, params.n_threads, params.pp_threads)) { + if (llama_eval_embd(ctx, (input+i*n_emb), n_eval, n_past, params.n_threads, params.n_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return false; } @@ -104,7 +104,7 @@ bool eval_tokens(void * model, std::vector tokens) { if (n_eval > params.n_batch) { n_eval = params.n_batch; } - if (llama_eval(ctx, &tokens[i], n_eval, n_past, params.n_threads, params.pp_threads)) { + if (llama_eval(ctx, &tokens[i], n_eval, n_past, params.n_threads, params.n_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return false; } diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index e931d7f39e3ee..6f732ce46bfbe 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -74,7 +74,7 @@ int main(int argc, char ** argv) { if (params.embedding){ if (embd_inp.size() > 0) { - if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads, params.pp_threads)) { + if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads, params.n_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index f75b1ec41e34e..7ed661ec5fd79 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -66,7 +66,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) { tokens[batch_start] = llama_token_bos(); } - if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads, params.pp_threads)) { + if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads, params.n_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return; } @@ -233,7 +233,7 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) { } // Evaluate the query - if (llama_eval(ctx, query_embd.data(), query_embd.size(), 0, params.n_threads, params.pp_threads)) { + if (llama_eval(ctx, query_embd.data(), query_embd.size(), 0, params.n_threads, params.n_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return; } diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index 4821fad5df66d..c8586718e13a2 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -56,7 +56,7 @@ int main(int argc, char ** argv) { } // evaluate prompt - llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past, params.n_threads, params.pp_threads); + llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past, params.n_threads, params.n_threads); last_n_tokens_data.insert(last_n_tokens_data.end(), tokens.data(), tokens.data() + n_prompt_tokens); n_past += n_prompt_tokens; @@ -93,7 +93,7 @@ int main(int argc, char ** argv) { last_n_tokens_data.push_back(next_token); printf("%s", next_token_str); - if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads, params.pp_threads)) { + if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads, params.n_threads)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_free(ctx); llama_free_model(model); @@ -153,7 +153,7 @@ int main(int argc, char ** argv) { last_n_tokens_data.push_back(next_token); printf("%s", next_token_str); - if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads, params.pp_threads)) { + if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads, params.n_threads)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_free(ctx2); llama_free_model(model); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 853cd74cdf3f0..10b666c1c1cb4 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -350,7 +350,7 @@ struct llama_server_context { n_eval = params.n_batch; } - if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads, params.pp_threads)) + if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads, params.n_threads)) { LOG_ERROR("failed to eval", { {"n_eval", n_eval}, diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 5fc6074bccfea..f093da32a3789 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -123,7 +123,7 @@ int main(int argc, char ** argv) // Evaluate the tokens : //--------------------------------- - if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads , params.pp_threads ) ) + if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads , params.n_threads ) ) { fprintf( stderr, "%s : failed to eval\n" , __func__ ); return 1; diff --git a/llama.cpp b/llama.cpp index 630a4357abae7..97c5e0cb0a936 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1782,7 +1782,7 @@ static bool llama_eval_internal( int n_tokens, int n_past, int n_threads, - int pp_threads, + int pp_threads, const char * cgraph_fname) { LLAMA_ASSERT((!tokens && embd) || (tokens && !embd)); From ce6d86ec4116aee90c4369419c036ca15ad67e10 Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Sun, 6 Aug 2023 00:40:13 -0400 Subject: [PATCH 05/14] fix --- examples/common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/common.cpp b/examples/common.cpp index c531639fb0161..031868d90769e 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -126,7 +126,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { } params.pp_threads = std::stoi(argv[i]); if (params.pp_threads <= 0) { - params.pp_threads = params.n_threads; + params.pp_threads = std::thread::hardware_concurrency(); } } else if (arg == "-p" || arg == "--prompt") { if (++i >= argc) { From 0480362f12298c7e62db3137144a1de720f2a8b3 Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Sun, 6 Aug 2023 00:44:29 -0400 Subject: [PATCH 06/14] remove from llama_context_params --- examples/common.cpp | 1 - llama.cpp | 1 - llama.h | 1 - 3 files changed, 3 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index 031868d90769e..46ceef966c6b3 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -668,7 +668,6 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param lparams.embedding = params.embedding; lparams.rope_freq_base = params.rope_freq_base; lparams.rope_freq_scale = params.rope_freq_scale; - lparams.pp_threads = params.pp_threads; return lparams; } diff --git a/llama.cpp b/llama.cpp index 97c5e0cb0a936..01c904a3489ff 100644 --- a/llama.cpp +++ b/llama.cpp @@ -895,7 +895,6 @@ struct llama_context_params llama_context_default_params() { /*.rms_norm_eps =*/ LLAMA_DEFAULT_RMS_EPS, /*.gpu_layers =*/ 0, /*.main_gpu =*/ 0, - /*.pp_threads =*/ GGML_DEFAULT_N_THREADS, /*.tensor_split =*/ nullptr, /*.rope_freq_base =*/ 10000.0f, /*.rope_freq_scale =*/ 1.0f, diff --git a/llama.h b/llama.h index 0bbdd6759921f..c57e846546fcd 100644 --- a/llama.h +++ b/llama.h @@ -94,7 +94,6 @@ extern "C" { float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams) int32_t n_gpu_layers; // number of layers to store in VRAM int32_t main_gpu; // the GPU that is used for scratch and small tensors - int32_t pp_threads; // number of threads used for prompt processing only const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES) From d854348992fe5041332d261f75e9a585b7fd3e1b Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Tue, 8 Aug 2023 21:30:12 -0400 Subject: [PATCH 07/14] perplexity only uses pp_threads --- examples/perplexity/perplexity.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 7ed661ec5fd79..534b0ae3fe4a7 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -66,7 +66,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) { tokens[batch_start] = llama_token_bos(); } - if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads, params.n_threads)) { + if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads, params.pp_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return; } @@ -337,8 +337,8 @@ int main(int argc, char ** argv) { // print system information { fprintf(stderr, "\n"); - fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", - params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info()); + fprintf(stderr, "system_info: pp_threads = %d / %d | %s\n", + params.pp_threads, std::thread::hardware_concurrency(), llama_print_system_info()); } if (params.hellaswag) { From be26777a6aee651131c7cca2e0aa3e2b9a14a772 Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Tue, 8 Aug 2023 22:19:59 -0400 Subject: [PATCH 08/14] add pp_threads support to other files --- examples/embedding/embedding.cpp | 6 +++--- examples/save-load-state/save-load-state.cpp | 7 ++++--- examples/server/server.cpp | 14 +++++++++++++- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 6f732ce46bfbe..58fa2753edd45 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -50,8 +50,8 @@ int main(int argc, char ** argv) { // print system information { fprintf(stderr, "\n"); - fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", - params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info()); + fprintf(stderr, "system_info: n_threads = %d / %d | pp_threads = %d / %d | %s\n", + params.n_threads, std::thread::hardware_concurrency(), params.pp_threads, std::thread::hardware_concurrency(), llama_print_system_info()); } int n_past = 0; @@ -74,7 +74,7 @@ int main(int argc, char ** argv) { if (params.embedding){ if (embd_inp.size() > 0) { - if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads, params.n_threads)) { + if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads, params.pp_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index c8586718e13a2..2a466c2e20fac 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -10,6 +10,7 @@ int main(int argc, char ** argv) { gpt_params params; params.seed = 42; params.n_threads = 4; + params.pp_threads = 4; params.repeat_last_n = 64; params.prompt = "The quick brown fox"; @@ -56,7 +57,7 @@ int main(int argc, char ** argv) { } // evaluate prompt - llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past, params.n_threads, params.n_threads); + llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past, params.n_threads, params.pp_threads); last_n_tokens_data.insert(last_n_tokens_data.end(), tokens.data(), tokens.data() + n_prompt_tokens); n_past += n_prompt_tokens; @@ -93,7 +94,7 @@ int main(int argc, char ** argv) { last_n_tokens_data.push_back(next_token); printf("%s", next_token_str); - if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads, params.n_threads)) { + if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads, params.pp_threads)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_free(ctx); llama_free_model(model); @@ -153,7 +154,7 @@ int main(int argc, char ** argv) { last_n_tokens_data.push_back(next_token); printf("%s", next_token_str); - if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads, params.n_threads)) { + if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads, params.pp_threads)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_free(ctx2); llama_free_model(model); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index fe91b2343316c..d86bab5b3c96b 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -382,7 +382,7 @@ struct llama_server_context { n_eval = params.n_batch; } - if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads, params.n_threads)) + if (llama_eval(ctx, &embd[n_past], n_eval, n_past, params.n_threads, params.pp_threads)) { LOG_ERROR("failed to eval", { {"n_eval", n_eval}, @@ -648,6 +648,8 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, fprintf(stdout, " -h, --help show this help message and exit\n"); fprintf(stdout, " -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled"); fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + fprintf(stdout, " -ppt N, --pp-threads N\n"); + fprintf(stdout, " number of threads to use during prompt processing (default: %d)\n", params.pp_threads); fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx); fprintf(stdout, " -gqa N, --gqa N grouped-query attention factor (TEMP!!! use 8 for LLaMAv2 70B) (default: %d)\n", params.n_gqa); fprintf(stdout, " -eps N, --rms-norm-eps N rms norm eps (TEMP!!! use 1e-5 for LLaMAv2) (default: %.1e)\n", params.rms_norm_eps); @@ -818,6 +820,15 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, } params.n_threads = std::stoi(argv[i]); } + else if (arg == "-ppt" || arg == "--pp-threads") + { + if (++i >= argc) + { + invalid_param = true; + break; + } + params.pp_threads = std::stoi(argv[i]); + } else if (arg == "-b" || arg == "--batch-size") { if (++i >= argc) @@ -1178,6 +1189,7 @@ int main(int argc, char **argv) {"commit", BUILD_COMMIT}}); LOG_INFO("system info", { {"n_threads", params.n_threads}, + {"pp_threads", params.pp_threads}, {"total_threads", std::thread::hardware_concurrency()}, {"system_info", llama_print_system_info()}, }); From 193f295a3a6d7144d59449b762ad97feb6d6e248 Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Tue, 8 Aug 2023 22:47:34 -0400 Subject: [PATCH 09/14] Update llama.cpp --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index f58ca74873439..fbe04958bdf66 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1771,7 +1771,7 @@ static struct ggml_cgraph * llama_build_graph( // - embd embeddings input // - n_tokens number of tokens // - n_past: the context size so far -// - n_threads: number of threads to use +// - n_threads: number of threads to use for inference // - pp_threads: number of threads to use for prompt processing // static bool llama_eval_internal( From 3919e67421f6e7b7b87238add37b47f185080d51 Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Tue, 8 Aug 2023 22:58:46 -0400 Subject: [PATCH 10/14] Update README.md --- examples/server/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/server/README.md b/examples/server/README.md index e56ca063a9f0e..2548be1306525 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -5,6 +5,7 @@ This example demonstrates a simple HTTP API server and a simple web front end to Command line options: - `--threads N`, `-t N`: Set the number of threads to use during computation. +- `-ppt N`, `--pp-threads N`: Set the number of threads to use during prompt processing only. - `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`). - `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses. - `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096. From 49f0bfd69ddf801bb8ce5c2e506c2f0a22844bfa Mon Sep 17 00:00:00 2001 From: Eve <139727413+netrunnereve@users.noreply.github.com> Date: Tue, 8 Aug 2023 22:58:53 -0400 Subject: [PATCH 11/14] Update README.md --- examples/main/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/main/README.md b/examples/main/README.md index 55c16096f03b7..a78fb6b945d6e 100644 --- a/examples/main/README.md +++ b/examples/main/README.md @@ -259,6 +259,7 @@ These options help improve the performance and memory usage of the LLaMA models. ### Number of Threads - `-t N, --threads N`: Set the number of threads to use during computation. For optimal performance, it is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). Using the correct number of threads can greatly improve performance. +- `-ppt N, --pp-threads N`: Set the number of threads to use during prompt processing only. ### Mlock From 1c154e9ea5a7cfe65fb1ce6908501b957fba9926 Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Fri, 18 Aug 2023 17:49:04 -0400 Subject: [PATCH 12/14] lazy fix for llama-bench (runs without pp_threads support) --- examples/llama-bench/llama-bench.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 266c8eab3b2f6..d69dcaf1a06b6 100755 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -853,7 +853,7 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_bat int n_processed = 0; while (n_processed < n_prompt) { int n_tokens = std::min(n_prompt - n_processed, n_batch); - llama_eval(ctx, tokens.data(), n_tokens, n_past + n_processed, n_threads); + llama_eval(ctx, tokens.data(), n_tokens, n_past + n_processed, n_threads, n_threads); n_processed += n_tokens; } } @@ -861,7 +861,7 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_bat static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) { llama_token token = llama_token_bos(); for (int i = 0; i < n_gen; i++) { - llama_eval(ctx, &token, 1, n_past + i, n_threads); + llama_eval(ctx, &token, 1, n_past + i, n_threads, n_threads); } } From 471e469ae28cf4f98015f909e8fbcd043df360f4 Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Wed, 23 Aug 2023 23:53:06 -0400 Subject: [PATCH 13/14] pre gguf merge --- examples/perplexity/perplexity.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 9dbd6b328a3e3..51bb5267cd6c9 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -130,7 +130,7 @@ std::vector hellaswag_evaluate_tokens(llama_context * ctx, const std::vec for (size_t i_chunk = 0; i_chunk < n_chunk; ++i_chunk) { size_t n_tokens = tokens.size() - i_chunk * n_batch; n_tokens = std::min(n_tokens, size_t(n_batch)); - if (llama_eval(ctx, tokens.data() + i_chunk * n_batch, n_tokens, n_past, n_thread)) { + if (llama_eval(ctx, tokens.data() + i_chunk * n_batch, n_tokens, n_past, n_thread, n_thread)) { fprintf(stderr, "%s : failed to eval\n", __func__); return {}; } @@ -304,7 +304,7 @@ void hellaswag_score(llama_context * ctx, const gpt_params & params) { //} // Evaluate the query - logits = hellaswag_evaluate_tokens(ctx, query_embd, context_size, params.n_batch, n_vocab, params.n_threads, params.pp_threads); + logits = hellaswag_evaluate_tokens(ctx, query_embd, context_size, params.n_batch, n_vocab, params.n_threads); if (logits.empty()) { fprintf(stderr, "%s : failed to eval\n", __func__); return; From 8209b5d6a2349baf7c6f6682ff50ca6a26b52c54 Mon Sep 17 00:00:00 2001 From: netrunnereve <139727413+netrunnereve@users.noreply.github.com> Date: Thu, 24 Aug 2023 20:26:19 -0400 Subject: [PATCH 14/14] revert llama_eval, create main example --- examples/main/main.cpp | 10 ++++++---- llama.cpp | 19 +++++++------------ llama.h | 6 ++---- 3 files changed, 15 insertions(+), 20 deletions(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 887c75561c64a..e0db33a370975 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -144,7 +144,7 @@ int main(int argc, char ** argv) { fprintf(stderr, "%s: testing memory usage for n_batch = %d, n_ctx = %d\n", __func__, params.n_batch, params.n_ctx); const std::vector tmp(params.n_batch, llama_token_bos()); - llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads, params.pp_threads); + llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads); } llama_print_timings(ctx); @@ -406,7 +406,7 @@ int main(int argc, char ** argv) { // do one empty run to warm up the model { const std::vector tmp = { llama_token_bos(), }; - llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads, params.pp_threads); + llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); llama_reset_timings(ctx); } @@ -513,7 +513,8 @@ int main(int argc, char ** argv) { for (int i = 0; i < input_size; i += params.n_batch) { int n_eval = std::min(input_size - i, params.n_batch); - if (llama_eval(ctx_guidance, input_buf + i, n_eval, n_past_guidance, params.n_threads, params.pp_threads)) { + int eval_thr = n_eval > 1 ? params.pp_threads : params.n_threads; + if (llama_eval(ctx_guidance, input_buf + i, n_eval, n_past_guidance, eval_thr)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } @@ -527,7 +528,8 @@ int main(int argc, char ** argv) { if (n_eval > params.n_batch) { n_eval = params.n_batch; } - if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads, params.pp_threads)) { + int eval_thr = n_eval > 1 ? params.pp_threads : params.n_threads; + if (llama_eval(ctx, &embd[i], n_eval, n_past, eval_thr)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } diff --git a/llama.cpp b/llama.cpp index 7c009ddee9b7d..ef2672ae9d510 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1787,7 +1787,6 @@ static struct ggml_cgraph * llama_build_graph( // - n_tokens number of tokens // - n_past: the context size so far // - n_threads: number of threads to use for inference -// - pp_threads: number of threads to use for prompt processing // static bool llama_eval_internal( llama_context & lctx, @@ -1796,7 +1795,6 @@ static bool llama_eval_internal( int n_tokens, int n_past, int n_threads, - int pp_threads, const char * cgraph_fname) { LLAMA_ASSERT((!tokens && embd) || (tokens && !embd)); @@ -1840,8 +1838,7 @@ static bool llama_eval_internal( // for big prompts, if BLAS is enabled, it is better to use only one thread // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance - pp_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : pp_threads; - n_threads = N > 1 ? pp_threads : n_threads; + n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads; struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2]; @@ -3487,7 +3484,7 @@ struct llama_context * llama_new_context_with_model( if (ggml_mpi_rank(ctx->ctx_mpi) > 0) { // Enter a blocking eval loop with dummy input, letting rank=0 drive the process const std::vector tmp(ctx->model.hparams.n_ctx, llama_token_bos()); - while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0, 0)) {}; + while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {}; llama_backend_free(); exit(1); } @@ -4179,9 +4176,8 @@ int llama_eval( const llama_token * tokens, int n_tokens, int n_past, - int n_threads, - int pp_threads) { - if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, pp_threads, nullptr)) { + int n_threads) { + if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) { LLAMA_LOG_ERROR("%s: failed to eval\n", __func__); return 1; } @@ -4202,9 +4198,8 @@ int llama_eval_embd( const float * embd, int n_tokens, int n_past, - int n_threads, - int pp_threads) { - if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, pp_threads, nullptr)) { + int n_threads) { + if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) { LLAMA_LOG_ERROR("%s: failed to eval\n", __func__); return 1; } @@ -4225,7 +4220,7 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) { const std::vector tmp(n_batch, llama_token_bos()); - if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, 1, fname)) { + if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) { LLAMA_LOG_ERROR("%s: failed to eval\n", __func__); return 1; } diff --git a/llama.h b/llama.h index addeae45c7c16..9d732f914cbb1 100644 --- a/llama.h +++ b/llama.h @@ -308,8 +308,7 @@ extern "C" { const llama_token * tokens, int n_tokens, int n_past, - int n_threads, - int pp_threads); + int n_threads); // Same as llama_eval, but use float matrix input directly. LLAMA_API int llama_eval_embd( @@ -317,8 +316,7 @@ extern "C" { const float * embd, int n_tokens, int n_past, - int n_threads, - int pp_threads); + int n_threads); // Export a static computation graph for context of 511 and batch size of 1 // NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these