Skip to content

Commit 8f53f32

Browse files
committed
Remove vocab_only from constructor of llama_model_loader
1 parent 2ba3f4b commit 8f53f32

File tree

1 file changed

+4
-5
lines changed

1 file changed

+4
-5
lines changed

llama.cpp

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -571,7 +571,7 @@ struct llama_model_loader {
571571
struct ggml_context * ggml_ctx = NULL;
572572
std::unique_ptr<llama_mmap> mapping;
573573

574-
llama_model_loader(const std::string & fname_base, bool use_mmap, bool vocab_only) {
574+
llama_model_loader(const std::string & fname_base, bool use_mmap) {
575575
file_loader = std::unique_ptr<llama_file_loader>(new llama_file_loader(fname_base.c_str(), tensors_map));
576576
if (!llama_mmap::SUPPORTED) {
577577
use_mmap = false;
@@ -916,7 +916,7 @@ static void llama_model_load_internal(
916916

917917
model.t_start_us = ggml_time_us();
918918

919-
std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap, vocab_only));
919+
std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap));
920920

921921
vocab = std::move(ml->file_loader->vocab);
922922
model.hparams = ml->file_loader->hparams;
@@ -2299,8 +2299,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
22992299
nthread = std::thread::hardware_concurrency();
23002300
}
23012301

2302-
std::unique_ptr<llama_model_loader> model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false,
2303-
/*vocab_only*/ false));
2302+
std::unique_ptr<llama_model_loader> model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false));
23042303
llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loader.get(), params->ftype);
23052304

23062305
#ifdef GGML_USE_K_QUANTS
@@ -2733,7 +2732,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
27332732
llama_buffer base_buf;
27342733
if (path_base_model) {
27352734
fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model);
2736-
model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*vocab_only*/ false));
2735+
model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
27372736

27382737
size_t ctx_size;
27392738
size_t mmapped_size;

0 commit comments

Comments
 (0)