We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent fb2c5f7 commit 556c7edCopy full SHA for 556c7ed
llama_cpp/llama.py
@@ -811,9 +811,16 @@ def _create_completion(
811
if self.verbose:
812
llama_cpp.llama_reset_timings(self.ctx)
813
814
- if len(prompt_tokens) + max_tokens > self._n_ctx:
+ if len(prompt_tokens) > self._n_ctx:
815
raise ValueError(f"Requested tokens exceed context window of {self._n_ctx}")
816
817
+ # Truncate max_tokens if requested tokens would exceed the context window
818
+ max_tokens = (
819
+ max_tokens
820
+ if max_tokens + len(prompt_tokens) < self._n_ctx
821
+ else (self._n_ctx - len(prompt_tokens))
822
+ )
823
+
824
if stop != []:
825
stop_sequences = [s.encode("utf-8") for s in stop]
826
else:
0 commit comments