Skip to content

Commit

Permalink
add supports quant to molo
Browse files Browse the repository at this point in the history
Signed-off-by: Kyle Sayers <[email protected]>
  • Loading branch information
kylesayrs committed Feb 15, 2025
1 parent 345cba7 commit 64ee7d0
Showing 1 changed file with 7 additions and 5 deletions.
12 changes: 7 additions & 5 deletions vllm/model_executor/models/molmo.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@
from vllm.sequence import IntermediateTensors
from vllm.utils import JSONTree, json_map_leaves

from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsPP
from .interfaces import (SupportsLoRA, SupportsMultiModal, SupportsPP,
SupportsQuant)
from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory, make_layers,
Expand Down Expand Up @@ -633,7 +634,8 @@ def forward(
return hidden_states, residual


class MolmoVisionBackbone(nn.Module):
class MolmoVisionBackbone(nn.Module, SupportsQuant):
packed_modules_mapping = {"merged_linear": ["gate_proj", "up_proj"]}

def __init__(
self,
Expand Down Expand Up @@ -794,7 +796,7 @@ def load_weights(self, weights: Iterable[Tuple[str,


@support_torch_compile
class MolmoModel(nn.Module):
class MolmoModel(nn.Module, SupportsQuant):

def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
Expand Down Expand Up @@ -1402,8 +1404,8 @@ def get_replacement_molmo(item_idx: int):
@MULTIMODAL_REGISTRY.register_processor(MolmoMultiModalProcessor,
info=MolmoProcessingInfo,
dummy_inputs=MolmoDummyInputsBuilder)
class MolmoForCausalLM(nn.Module, SupportsMultiModal, SupportsPP,
SupportsLoRA):
class MolmoForCausalLM(nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA,
SupportsQuant):
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_substr={
# vision backbone mapping
Expand Down

0 comments on commit 64ee7d0

Please sign in to comment.