From a00646e3e992f9953189141250895e3d2d4f8c86 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Mon, 20 Jan 2025 07:22:17 +0100 Subject: [PATCH] llama : add stdexcept header for std::runtime_error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds the stdexcept header to llama-arch.cpp to allow the use of std::runtime_error. Currently the following compilation is generated: ```console /work/ai/new-vision-api/src/llama-arch.cpp: In member function‘std::string BASE_TN_IMPL::str() const [with Tname = llm_arch; Ttensor = llm_tensor; std::string = std::__cxx11::basic_string]’: /home/danbev/work/ai/new-vision-api/src/llama-arch.cpp:1544:20: error: ‘runtime_error’ is not a member of ‘std’ 1544 | throw std::runtime_error(format("Cannot find tensor name mapping for arch %d", arch)); | ^~~~~~~~~~~~~ ``` --- src/llama-arch.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index e2908c0ae0956..1f02304a4a596 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -4,6 +4,7 @@ #include #include +#include static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_LLAMA, "llama" },