Skip to content

Commit 4d070d3

Browse files
authored
chore: fix typo in tests (#5092)
Signed-off-by: Fanrong Li <[email protected]>
1 parent e462677 commit 4d070d3

File tree

6 files changed

+8
-8
lines changed

6 files changed

+8
-8
lines changed

tests/integration/defs/.test_durations

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -294,7 +294,7 @@
294294
"test_e2e.py::test_ptp_quickstart_advanced[Llama3.1-8B-FP8-llama-3.1-model/Llama-3.1-8B-Instruct-FP8]": 81.43792725296225,
295295
"test_e2e.py::test_ptp_quickstart_advanced_eagle3[Llama-3.1-8b-Instruct-llama-3.1-model/Llama-3.1-8B-Instruct-EAGLE3-LLaMA3.1-Instruct-8B]": 109.26379436196294,
296296
"test_e2e.py::test_ptp_quickstart_advanced_mixed_precision": 80.88908524392173,
297-
"test_e2e.py::test_ptq_quickstart_advanced_mtp[DeepSeek-V3-Lite-BF16-DeepSeek-V3-Lite/bf16]": 99.42739840806462,
297+
"test_e2e.py::test_ptp_quickstart_advanced_mtp[DeepSeek-V3-Lite-BF16-DeepSeek-V3-Lite/bf16]": 99.42739840806462,
298298
"test_unittests.py::test_unittests_v2[unittest/_torch/speculative/test_eagle3.py]": 317.8708840459585,
299299
"accuracy/test_cli_flow.py::TestLlama7B::test_auto_dtype": 402.75543826818466,
300300
"examples/test_bert.py::test_llm_bert_general[compare_hf-disable_remove_input_padding-disable_attention_plugin-disable_context_fmha-tp:1-pp:1-float32-BertModel-bert/bert-base-uncased]": 111.17977902293205,

tests/integration/defs/test_e2e.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1601,7 +1601,7 @@ def test_ptp_quickstart_advanced(llm_root, llm_venv, model_name, model_path):
16011601
@pytest.mark.parametrize("model_name,model_path", [
16021602
("DeepSeek-V3-Lite-BF16", "DeepSeek-V3-Lite/bf16"),
16031603
])
1604-
def test_ptq_quickstart_advanced_mtp(llm_root, llm_venv, model_name,
1604+
def test_ptp_quickstart_advanced_mtp(llm_root, llm_venv, model_name,
16051605
model_path):
16061606
print(f"Testing {model_name}.")
16071607
example_root = Path(os.path.join(llm_root, "examples", "pytorch"))
@@ -1626,7 +1626,7 @@ def test_ptq_quickstart_advanced_mtp(llm_root, llm_venv, model_name,
16261626

16271627

16281628
@pytest.mark.skip_less_device(4)
1629-
def test_ptq_quickstart_advanced_bs1(llm_root, llm_venv):
1629+
def test_ptp_quickstart_advanced_bs1(llm_root, llm_venv):
16301630
model_name = "DeepSeek-V3-Lite-FP8"
16311631
model_path = "DeepSeek-V3-Lite/fp8"
16321632
print(f"Testing {model_name}.")
@@ -1653,7 +1653,7 @@ def test_ptq_quickstart_advanced_bs1(llm_root, llm_venv):
16531653
@pytest.mark.parametrize("model_name,model_path", [
16541654
("Llama-3.1-8B-Instruct", "llama-3.1-model/Llama-3.1-8B-Instruct"),
16551655
])
1656-
def test_ptq_quickstart_advanced_ngram(llm_root, llm_venv, model_name,
1656+
def test_ptp_quickstart_advanced_ngram(llm_root, llm_venv, model_name,
16571657
model_path):
16581658
print(f"Testing {model_name}.")
16591659
example_root = Path(os.path.join(llm_root, "examples", "pytorch"))

tests/integration/test_lists/qa/examples_test_list.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -508,7 +508,7 @@ test_e2e.py::test_ptp_quickstart_advanced[Llama3.2-11B-BF16-llama-3.2-models/Lla
508508
test_e2e.py::test_ptp_quickstart_advanced[Nemotron4_4B-BF16-nemotron/Minitron-4B-Base]
509509
test_e2e.py::test_ptp_quickstart_advanced[Nemotron-H-8B-Nemotron-H-8B-Base-8K]
510510
test_e2e.py::test_ptp_quickstart_advanced[Qwen3-30B-A3B-Qwen3/Qwen3-30B-A3B]
511-
test_e2e.py::test_ptq_quickstart_advanced_ngram[Llama-3.1-8B-Instruct-llama-3.1-model/Llama-3.1-8B-Instruct]
511+
test_e2e.py::test_ptp_quickstart_advanced_ngram[Llama-3.1-8B-Instruct-llama-3.1-model/Llama-3.1-8B-Instruct]
512512
test_e2e.py::test_ptp_quickstart_advanced_8gpus[Llama3.1-70B-BF16-llama-3.1-model/Meta-Llama-3.1-70B]
513513
test_e2e.py::test_ptp_quickstart_advanced_8gpus[Llama3.1-70B-FP8-llama-3.1-model/Llama-3.1-70B-Instruct-FP8]
514514
test_e2e.py::test_ptp_quickstart_advanced_8gpus[Llama3.1-405B-FP8-llama-3.1-model/Llama-3.1-405B-Instruct-FP8]

tests/integration/test_lists/test-db/l0_b200.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ l0_b200:
4444
- accuracy/test_llm_api_pytorch.py::TestQwen3_30B_A3B::test_nvfp4[latency_moe_trtllm]
4545
- test_e2e.py::test_ptp_quickstart_advanced[Llama3.1-8B-NVFP4-nvfp4-quantized/Meta-Llama-3.1-8B]
4646
- test_e2e.py::test_ptp_quickstart_advanced[Llama3.1-8B-FP8-llama-3.1-model/Llama-3.1-8B-Instruct-FP8]
47-
- test_e2e.py::test_ptq_quickstart_advanced_mtp[DeepSeek-V3-Lite-BF16-DeepSeek-V3-Lite/bf16]
47+
- test_e2e.py::test_ptp_quickstart_advanced_mtp[DeepSeek-V3-Lite-BF16-DeepSeek-V3-Lite/bf16]
4848
- test_e2e.py::test_ptp_quickstart_advanced_mixed_precision
4949
- test_e2e.py::test_ptp_quickstart_advanced_eagle3[Llama-3.1-8b-Instruct-llama-3.1-model/Llama-3.1-8B-Instruct-EAGLE3-LLaMA3.1-Instruct-8B]
5050
- test_e2e.py::test_trtllm_bench_pytorch_backend_sanity[meta-llama/Llama-3.1-8B-llama-3.1-8b-False-False]

tests/integration/test_lists/test-db/l0_dgx_h100.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ l0_dgx_h100:
3838
- disaggregated/test_disaggregated.py::test_disaggregated_overlap[TinyLlama-1.1B-Chat-v1.0]
3939
- accuracy/test_disaggregated_serving.py::TestLlama3_1_8BInstruct::test_auto_dtype[False]
4040
- accuracy/test_disaggregated_serving.py::TestLlama3_1_8BInstruct::test_auto_dtype[True]
41-
- test_e2e.py::test_ptq_quickstart_advanced_bs1
41+
- test_e2e.py::test_ptp_quickstart_advanced_bs1
4242
- condition:
4343
ranges:
4444
system_gpu_count:

tests/integration/test_lists/waives.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -423,7 +423,7 @@ accuracy/test_disaggregated_serving.py::TestLlama3_1_8BInstruct::test_auto_dtype
423423
accuracy/test_disaggregated_serving.py::TestLlama3_1_8BInstruct::test_auto_dtype[True] SKIP (https://nvbugs/5303573)
424424
test_e2e.py::test_openai_multi_chat_example SKIP (https://nvbugs/5236980)
425425
test_e2e.py::test_ptp_quickstart_advanced[Llama3.1-70B-FP8-llama-3.1-model/Llama-3.1-70B-Instruct-FP8] SKIP (https://nvbugs/5318059)
426-
test_e2e.py::test_ptq_quickstart_advanced_ngram[Llama-3.1-8B-Instruct-llama-3.1-model/Llama-3.1-8B-Instruct] SKIP (https://nvbugspro.nvidia.com/bug/5324239)
426+
test_e2e.py::test_ptp_quickstart_advanced_ngram[Llama-3.1-8B-Instruct-llama-3.1-model/Llama-3.1-8B-Instruct] SKIP (https://nvbugspro.nvidia.com/bug/5324239)
427427
unittest/_torch/auto_deploy/integration/test_ad_build.py SKIP (https://nvbugs/5318103)
428428
accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_bfloat16_4gpus[tp2pp2-attn_backend=TRTLLM-torch_compile=False] SKIP (https://nvbugs/5318143)
429429
accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_bfloat16_4gpus[tp2pp2-attn_backend=TRTLLM-torch_compile=True] SKIP (https://nvbugs/5318143)

0 commit comments

Comments
 (0)