Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ follow_imports = "silent"
# move the directory here and remove it from tools/mypy.sh
files = [
"vllm/*.py",
"vllm/adapter_commons",
"vllm/assets",
"vllm/entrypoints",
"vllm/core",
Expand Down
Empty file removed vllm/adapter_commons/__init__.py
Empty file.
16 changes: 0 additions & 16 deletions vllm/adapter_commons/layers.py

This file was deleted.

106 changes: 0 additions & 106 deletions vllm/adapter_commons/models.py

This file was deleted.

26 changes: 0 additions & 26 deletions vllm/adapter_commons/request.py

This file was deleted.

93 changes: 0 additions & 93 deletions vllm/adapter_commons/utils.py

This file was deleted.

39 changes: 0 additions & 39 deletions vllm/adapter_commons/worker_manager.py

This file was deleted.

11 changes: 8 additions & 3 deletions vllm/lora/layers/utils.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,22 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project

from dataclasses import dataclass

import torch
import torch.nn as nn

from vllm.adapter_commons.layers import AdapterMapping


@dataclass
class LoRAMapping(AdapterMapping):
class LoRAMapping:
index_mapping: tuple[int, ...]
prompt_mapping: tuple[int, ...]
is_prefill: bool = False

def __post_init__(self):
self.index_mapping = tuple(self.index_mapping)
self.prompt_mapping = tuple(self.prompt_mapping)


def _get_lora_device(base_layer: nn.Module) -> torch.device:
# code borrowed from https://github.com/fmmoret/vllm/blob/fm-support-lora-on-quantized-models/vllm/lora/layers.py#L34
Expand Down
Loading