Skip to content

Commit 81b979f

Browse files
authored
[V1] Fix yapf (#11538)
Signed-off-by: Woosuk Kwon <[email protected]>
1 parent 371d04d commit 81b979f

File tree

2 files changed

+21
-19
lines changed

2 files changed

+21
-19
lines changed

vllm/v1/sample/ops/penalties.py

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
import torch
44

5-
from vllm.model_executor.layers.utils import (
6-
apply_penalties as _apply_penalties)
5+
from vllm.model_executor.layers.utils import apply_penalties
76
from vllm.utils import is_pin_memory_available, make_tensor_with_pad
87

98

@@ -17,27 +16,30 @@ def apply_min_token_penalties(logits: torch.Tensor,
1716
"""
1817
min_tokens_logits_to_penalize: List[Tuple[int, int]] = []
1918
for index, min_token in enumerate(min_tokens):
20-
if (len(output_token_ids[index]) < min_token):
19+
if len(output_token_ids[index]) < min_token:
2120
for stop_token_id in stop_token_ids[index]:
2221
min_tokens_logits_to_penalize.append((index, stop_token_id))
2322
if min_tokens_logits_to_penalize:
2423
logits[tuple(zip(*min_tokens_logits_to_penalize))] = -float("inf")
2524

2625

27-
def apply_penalties(logits: torch.Tensor, prompt_token_ids: torch.Tensor,
28-
presence_penalties: torch.Tensor,
29-
frequency_penalties: torch.Tensor,
30-
repetition_penalties: torch.Tensor,
31-
output_token_ids: List[List[int]]) -> torch.Tensor:
26+
def apply_all_penalties(
27+
logits: torch.Tensor,
28+
prompt_token_ids: torch.Tensor,
29+
presence_penalties: torch.Tensor,
30+
frequency_penalties: torch.Tensor,
31+
repetition_penalties: torch.Tensor,
32+
output_token_ids: List[List[int]],
33+
) -> torch.Tensor:
3234
"""
3335
Applies presence, frequency and repetition penalties to the logits.
3436
"""
3537
_, vocab_size = logits.shape
3638
output_tokens_t = _convert_to_tensors(output_token_ids, vocab_size,
3739
logits.device)
38-
return _apply_penalties(logits, prompt_token_ids, output_tokens_t,
39-
presence_penalties, frequency_penalties,
40-
repetition_penalties)
40+
return apply_penalties(logits, prompt_token_ids, output_tokens_t,
41+
presence_penalties, frequency_penalties,
42+
repetition_penalties)
4143

4244

4345
def _convert_to_tensors(output_token_ids: List[List[int]], vocab_size: int,

vllm/v1/sample/sampler.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,8 @@
66

77
from vllm.v1.outputs import SamplerOutput
88
from vllm.v1.sample.metadata import SamplingMetadata
9-
from vllm.v1.sample.ops.penalties import (apply_min_token_penalties,
10-
apply_penalties)
9+
from vllm.v1.sample.ops.penalties import (apply_all_penalties,
10+
apply_min_token_penalties)
1111
from vllm.v1.sample.ops.topk_topp_sampler import TopKTopPSampler
1212

1313
_SAMPLING_EPS = 1e-5
@@ -127,10 +127,10 @@ def apply_penalties(
127127
sampling_metadata.min_tokens)
128128
if not sampling_metadata.no_penalties:
129129
assert sampling_metadata.prompt_token_ids is not None
130-
logits = apply_penalties(logits,
131-
sampling_metadata.prompt_token_ids,
132-
sampling_metadata.presence_penalties,
133-
sampling_metadata.frequency_penalties,
134-
sampling_metadata.repetition_penalties,
135-
sampling_metadata.output_token_ids)
130+
logits = apply_all_penalties(
131+
logits, sampling_metadata.prompt_token_ids,
132+
sampling_metadata.presence_penalties,
133+
sampling_metadata.frequency_penalties,
134+
sampling_metadata.repetition_penalties,
135+
sampling_metadata.output_token_ids)
136136
return logits

0 commit comments

Comments
 (0)