Skip to content

graph : fix equal_seq() check #14986

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion src/llama-context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,15 @@ llama_context::llama_context(
}
}

{
const char * LLAMA_GRAPH_REUSE_DISABLE = getenv("LLAMA_GRAPH_REUSE_DISABLE");
graph_reuse_disable = LLAMA_GRAPH_REUSE_DISABLE ? (atoi(LLAMA_GRAPH_REUSE_DISABLE) != 0) : graph_reuse_disable;

if (graph_reuse_disable) {
LLAMA_LOG_WARN("%s: graph reuse disabled\n", __func__);
}
}

const uint32_t n_ctx_per_seq = cparams.n_ctx / cparams.n_seq_max;

LLAMA_LOG_INFO("%s: n_seq_max = %u\n", __func__, cparams.n_seq_max);
Expand Down Expand Up @@ -716,7 +725,7 @@ llm_graph_result * llama_context::process_ubatch(const llama_ubatch & ubatch, ll
// in order to correctly reuse a graph, it's full topology has to be uniquely determined by these parameters
const auto gparams = graph_params(res, ubatch, mctx, gtype);

if (res->can_reuse(gparams)) {
if (!graph_reuse_disable && res->can_reuse(gparams)) {
//LLAMA_LOG_DEBUG("%s: reusing previous graph\n", __func__);

n_reused++;
Expand Down
3 changes: 3 additions & 0 deletions src/llama-context.h
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,9 @@ struct llama_context {
// ref: https://github.com/ggml-org/llama.cpp/pull/14285
bool supports_set_rows = false;

// env: LLAMA_GRAPH_REUSE_DISABLE
bool graph_reuse_disable = false;

// perf
mutable int64_t t_start_us = 0;
mutable int64_t t_load_us = 0;
Expand Down
4 changes: 3 additions & 1 deletion src/llama-graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,9 @@ struct llm_graph_params {
(!ubatch.embd && !other.ubatch.embd)
);

if (can_reuse_ubatch && !ubatch.equal_seqs()) {
// when we split the batch using "equal_seqs" we have to verify that the participating sequences are the same
// the reason is because the set of attention streams would be different for different sequences
if (can_reuse_ubatch && ubatch.equal_seqs()) {
if (!ubatch.data) {
// if the old ubatch does not own it's data, then we cannot guarantee that it is still alive, and
// therefore we cannot perform the sequence id check. normally should never happen
Expand Down
Loading