Skip to content

Commit

Permalink
rename adapter_types -> adapter_methods
Browse files Browse the repository at this point in the history
  • Loading branch information
calpt committed Feb 9, 2025
1 parent 788bc8d commit 3d085fd
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 7 deletions.
12 changes: 10 additions & 2 deletions src/adapters/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,14 @@
class AdapterMethod:
"""
Enum of all supported adapter method types.
Attributes:
bottleneck: Adapter methods using bottleneck layers.
prefix_tuning: Adapters methods based on Prefix Tuning.
lora: Adapter methods based on low-rank adaptation.
prompt_tuning: Adapter methods based on Prompt Tuning.
reft: Adapters methods based on Representation Fine-Tuning.
invertible: Adapter methods using invertible modules.
"""

bottleneck = "bottleneck"
Expand Down Expand Up @@ -51,7 +59,7 @@ class AdapterModelInterface:
This interface translates generic accessor names to model-specific attribute names.
Args:
adapter_types (List[str]): List of adapter types that are supported by the model.
adapter_methods (List[str]): List of adapter types that are supported by the model.
model_embeddings (str): Name of the model's embedding layer.
model_layers (str): Name of the model's layer list.
layer_self_attn (str): Name of the self-attention layer in a transformer layer.
Expand All @@ -69,7 +77,7 @@ class AdapterModelInterface:
layer_ln_2 (Optional[str]): Layer norm *after* the feed forward layer. Used for extended bottleneck adapter support.
"""

adapter_types: List[str]
adapter_methods: List[str]

model_embeddings: str
model_layers: str
Expand Down
4 changes: 2 additions & 2 deletions src/adapters/model_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ def init_adapters(self, model_config, adapters_config):

# Initialize adapter types defined in interface
if getattr(self.base_model, "adapter_interface", None) is not None:
for adapter_type in self.base_model.adapter_interface.adapter_types:
for adapter_type in self.base_model.adapter_interface.adapter_methods:
init_func = METHOD_INIT_MAPPING[adapter_type]
init_func(self)
else:
Expand Down Expand Up @@ -499,7 +499,7 @@ def supports_adapter(self, type_or_config: Union[str, AdapterConfig]) -> bool:
supported = []
for _type in types:
if getattr(self.base_model, "adapter_interface", None) is not None:
supported.append(_type in self.base_model.adapter_interface.adapter_types)
supported.append(_type in self.base_model.adapter_interface.adapter_methods)
elif _type == AdapterMethod.prompt_tuning:
supported.append(self.base_model.support_prompt_tuning)
elif _type == AdapterMethod.invertible:
Expand Down
2 changes: 1 addition & 1 deletion tests/test_methods/test_on_custom_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class CustomInterfaceModelTestBase(TextAdapterTestBase):
)
tokenizer_name = "yujiepan/gemma-2-tiny-random"
adapter_interface = AdapterModelInterface(
adapter_types=["bottleneck", "lora", "reft", "invertible"],
adapter_methods=["bottleneck", "lora", "reft", "invertible"],
model_embeddings="embed_tokens",
model_layers="layers",
layer_self_attn="self_attn",
Expand Down
4 changes: 2 additions & 2 deletions tests/test_misc/test_custom_interface_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class CustomInterfaceCompatTest(unittest.TestCase):
pad_token_id=0,
)
llama_interface = AdapterModelInterface(
adapter_types=["bottleneck", "lora", "reft", "invertible"],
adapter_methods=["bottleneck", "lora", "reft", "invertible"],
model_embeddings="embed_tokens",
model_layers="layers",
layer_self_attn="self_attn",
Expand All @@ -52,7 +52,7 @@ class CustomInterfaceCompatTest(unittest.TestCase):
layer_ln_2=None,
)
bert_interface = AdapterModelInterface(
adapter_types=["bottleneck", "lora", "reft", "prompt_tuning", "invertible"],
adapter_methods=["bottleneck", "lora", "reft", "prompt_tuning", "invertible"],
model_embeddings="embeddings",
model_layers="encoder.layer",
layer_self_attn="attention",
Expand Down

0 comments on commit 3d085fd

Please sign in to comment.