Skip to content

Commit 210e6e5

Browse files
committed
llama : remove obsolete map for layer counting
1 parent 79ad734 commit 210e6e5

File tree

1 file changed

+1
-4
lines changed

1 file changed

+1
-4
lines changed

llama.cpp

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5469,9 +5469,6 @@ static struct ggml_cgraph * llama_build_graph(
54695469
// check if we should build the worst-case graph (for memory measurement)
54705470
const bool worst_case = ggml_allocr_is_measure(lctx.alloc);
54715471

5472-
// count the number of times a tensor with a given name has been offloaded
5473-
std::unordered_map<std::string, int> offload_n;
5474-
54755472
// keep track of the input that has already been allocated
54765473
bool alloc_inp_tokens = false;
54775474
bool alloc_inp_embd = false;
@@ -5654,7 +5651,7 @@ static struct ggml_cgraph * llama_build_graph(
56545651
break;
56555652
case OFFLOAD_FUNC:
56565653
if (n_gpu_layers < n_layer) {
5657-
if (offload_n[name]++ < i_gpu_start) {
5654+
if (il < i_gpu_start) {
56585655
func_e = OFFLOAD_FUNC_NOP;
56595656
}
56605657
}

0 commit comments

Comments
 (0)