From 7fd4320d5005940278fdf9f2193c111ace58801a Mon Sep 17 00:00:00 2001 From: lukasugar <lukasugar@gmail.com> Date: Thu, 12 Jun 2025 19:46:14 +0100 Subject: [PATCH 1/3] Update README.md --- README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index c4d050d..aed1cfe 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ If you're planning on using any API-based models, make sure you define your rele The images and text are stored on the HuggingFace hub, as a .zip. You may download it directly from there, using `huggingface-cli` (recommended): ```bash -huggingface-cli download answerdotai/ReadBench readbench.zip --repo-type dataset +huggingface-cli download answerdotai/ReadBench readbench.zip --repo-type dataset --local-dir . ``` Alternatively, if you are unable to use `huggingface-cli`, you may use the direct download URL, as provided by HuggingFace: @@ -42,15 +42,16 @@ unzip readbench.zip The authors of GPQA have requested that the dataset should not be reshared as-is, to minimise model contamination. We follow their wishes, which means you need to generate the GPQA images yourself, absed on the original GPQA dataset. You can do so by running the following commands: ```bash -python data_prep.py --datasets gpqa +python datagen.py --datasets gpqa ``` +You might get an error that the dataset is gated and that you need to accept terms on the HF hub. To resolve, just follow the link, accept, and try again. 5. **Prepare the benchmark** You may now run the following command to prepare the metadata file which will be used to run the benchmark: ```bash -python downsampler.py --root rendered_images_ft12 --split standard +python data_prep.py --root rendered_images_ft12 --split standard ``` #### tl;dr @@ -58,10 +59,10 @@ python downsampler.py --root rendered_images_ft12 --split standard Running the commands below will download and prepare the full ReadBench benchmark, as used in the paper: ```bash -huggingface-cli download answerdotai/ReadBench readbench.zip --type dataset +huggingface-cli download answerdotai/ReadBench readbench.zip --repo-type dataset --local_dir . unzip readbench.zip -python data_prep.py --datasets gpqa -python downsampler.py --root rendered_images_ft12 --split standard +python datagen.py --datasets gpqa +python data_prep.py --root rendered_images_ft12 --split standard ``` From c3655cd284fcc805bdb791b1889e82eebc44e2a6 Mon Sep 17 00:00:00 2001 From: lukasugar <lukasugar@gmail.com> Date: Thu, 12 Jun 2025 20:17:35 +0100 Subject: [PATCH 2/3] Fix json --- eval_config/dataset2prompt.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eval_config/dataset2prompt.json b/eval_config/dataset2prompt.json index 39073a2..df9f7cb 100644 --- a/eval_config/dataset2prompt.json +++ b/eval_config/dataset2prompt.json @@ -2,5 +2,5 @@ "narrativeqa": "You are given a story, which can be either a novel or a movie script, and a question. Answer the question asconcisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nStory: {context}\n\nNow, answer the question based on the story asconcisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nQuestion: {input}\n\nAnswer:", "hotpotqa": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:", "2wikimqa": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:", - "triviaqa": "Answer the question based on the given passage. Only give me the answer and do not output any other words. The following are some examples.\n\n{context}\n\n{input}", + "triviaqa": "Answer the question based on the given passage. Only give me the answer and do not output any other words. The following are some examples.\n\n{context}\n\n{input}" } \ No newline at end of file From 9abdd2071230067cc37e5be62565f7bc80622490 Mon Sep 17 00:00:00 2001 From: lukasugar <lukasugar@gmail.com> Date: Mon, 23 Jun 2025 10:19:17 +0200 Subject: [PATCH 3/3] Update run_eval.py --- run_eval.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/run_eval.py b/run_eval.py index 07d5531..c4df194 100644 --- a/run_eval.py +++ b/run_eval.py @@ -33,11 +33,14 @@ from cosette import Client as CosetteClient from vertexauth import get_claudette_client from openai import AzureOpenAI -azure_endpoint = AzureOpenAI( - azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT"), - api_key=os.getenv("AZURE_OPENAI_API_KEY"), - api_version="2024-12-01-preview", - ) +try: + # Azure secrets should be set if using Azure OpenAI models + azure_endpoint = AzureOpenAI( + azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT"), + api_key=os.getenv("AZURE_OPENAI_API_KEY"), + api_version="2024-12-01-preview", + ) +except Exception as e: pass import babilong_prompts as bp import longbench_prompts as lbp @@ -117,6 +120,10 @@ def question_tag(ds: str, rec: dict) -> str | None: "o3-mini-2025-01-31": {"in": 1.10 / 1_000_000, "out": 4.40 / 1_000_000}, # o1-mini "o1-mini-2024-09-12": {"in": 1.10 / 1_000_000, "out": 4.40 / 1_000_000}, + "gemini-2.5-pro-preview-06-05": {"in": 0.15 / 1_000_000, "out": 3.50 / 1_000_000}, + "gemini-2.5-pro-preview": {"in": 0.15 / 1_000_000, "out": 3.50 / 1_000_000}, + "gemini-2.5-flash": {"in": 0.30 / 1_000_000, "out": 2.50 / 1_000_000}, + "gemini-2.5-flash-lite-preview-06-17": {"in": 0.10 / 1_000_000, "out": 0.40 / 1_000_000}, "gemini-2.0-flash": {"in": 0.10 / 1_000_000, "out": 0.40 / 1_000_000}, "gemini-2.5-flash-preview-04-17": {"in": 0.15 / 1_000_000, "out": 0.60 / 1_000_000}, "gemini-2.0-flash-lite": {"in": 0.0075 / 1_000_000, "out": 0.30 / 1_000_000}, @@ -232,7 +239,10 @@ def gemini_model_call( *, debug: bool = False, ) -> Tuple[str, int | None, int | None]: - client = genai.Client(api_key=os.getenv("GEMINI_API_KEY")) + client = genai.Client( + api_key=os.getenv("GEMINI_API_KEY"), + # 5 minutes timeout per call + http_options={"timeout": 5 * 60 * 1000}) parts = [] for seg in segments: