Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 099119f

Browse files
committedAug 7, 2023
Fixes to rebase
1 parent f6d5fe3 commit 099119f

File tree

2 files changed

+3
-8
lines changed

2 files changed

+3
-8
lines changed
 

‎convert.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1124,13 +1124,8 @@ def write_file_header(self, params: Params, file_type: GGMLFileType) -> None:
11241124
params.n_mult,
11251125
params.n_head,
11261126
params.n_layer,
1127-
<<<<<<< HEAD
1128-
params.n_embd // params.n_head, # rot (obsolete)
1129-
file_type.value,
1130-
=======
11311127
params.n_vocab_base | 0xF0000000, # reuse obsolete rot value to store vocab_base
1132-
params.file_type.value,
1133-
>>>>>>> bfccc62 (Use some tricks to eliminate the necessity for a new format)
1128+
file_type.value,
11341129
]
11351130
self.fout.write(struct.pack("I" * len(values), *values))
11361131

‎llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -555,7 +555,7 @@ struct llama_file_loader {
555555
// LLaMAv2
556556
// TODO: read from header
557557
hparams.n_head_kv = hparams.n_head;
558-
=======
558+
}
559559
void read_vocab() {
560560
vocab.id_to_token.resize(hparams.n_vocab);
561561

@@ -1442,7 +1442,7 @@ static struct ggml_cgraph * llama_build_graph(
14421442
const int64_t n_embd_head = hparams.n_embd_head();
14431443
const int64_t n_embd_gqa = hparams.n_embd_gqa();
14441444

1445-
LLAMA_ASSERT(n_embd_head == hparams.n_rot);
1445+
LLAMA_ASSERT(n_embd_head == hparams.n_embd/hparams.n_head);
14461446

14471447
const float freq_base = hparams.rope_freq_base;
14481448
const float freq_scale = hparams.rope_freq_scale;

0 commit comments

Comments
 (0)
Please sign in to comment.