diff --git a/src/adapters/interface.py b/src/adapters/interface.py index e16f2df29..8152e3afc 100644 --- a/src/adapters/interface.py +++ b/src/adapters/interface.py @@ -12,6 +12,14 @@ class AdapterMethod: """ Enum of all supported adapter method types. + + Attributes: + bottleneck: Adapter methods using bottleneck layers. + prefix_tuning: Adapters methods based on Prefix Tuning. + lora: Adapter methods based on low-rank adaptation. + prompt_tuning: Adapter methods based on Prompt Tuning. + reft: Adapters methods based on Representation Fine-Tuning. + invertible: Adapter methods using invertible modules. """ bottleneck = "bottleneck" @@ -51,7 +59,7 @@ class AdapterModelInterface: This interface translates generic accessor names to model-specific attribute names. Args: - adapter_types (List[str]): List of adapter types that are supported by the model. + adapter_methods (List[str]): List of adapter types that are supported by the model. model_embeddings (str): Name of the model's embedding layer. model_layers (str): Name of the model's layer list. layer_self_attn (str): Name of the self-attention layer in a transformer layer. @@ -69,7 +77,7 @@ class AdapterModelInterface: layer_ln_2 (Optional[str]): Layer norm *after* the feed forward layer. Used for extended bottleneck adapter support. """ - adapter_types: List[str] + adapter_methods: List[str] model_embeddings: str model_layers: str diff --git a/src/adapters/model_mixin.py b/src/adapters/model_mixin.py index 8e4f4dee5..2327ace00 100644 --- a/src/adapters/model_mixin.py +++ b/src/adapters/model_mixin.py @@ -456,7 +456,7 @@ def init_adapters(self, model_config, adapters_config): # Initialize adapter types defined in interface if getattr(self.base_model, "adapter_interface", None) is not None: - for adapter_type in self.base_model.adapter_interface.adapter_types: + for adapter_type in self.base_model.adapter_interface.adapter_methods: init_func = METHOD_INIT_MAPPING[adapter_type] init_func(self) else: @@ -499,7 +499,7 @@ def supports_adapter(self, type_or_config: Union[str, AdapterConfig]) -> bool: supported = [] for _type in types: if getattr(self.base_model, "adapter_interface", None) is not None: - supported.append(_type in self.base_model.adapter_interface.adapter_types) + supported.append(_type in self.base_model.adapter_interface.adapter_methods) elif _type == AdapterMethod.prompt_tuning: supported.append(self.base_model.support_prompt_tuning) elif _type == AdapterMethod.invertible: diff --git a/tests/test_methods/test_on_custom_interface.py b/tests/test_methods/test_on_custom_interface.py index d8a681e5f..67bccb606 100644 --- a/tests/test_methods/test_on_custom_interface.py +++ b/tests/test_methods/test_on_custom_interface.py @@ -27,7 +27,7 @@ class CustomInterfaceModelTestBase(TextAdapterTestBase): ) tokenizer_name = "yujiepan/gemma-2-tiny-random" adapter_interface = AdapterModelInterface( - adapter_types=["bottleneck", "lora", "reft", "invertible"], + adapter_methods=["bottleneck", "lora", "reft", "invertible"], model_embeddings="embed_tokens", model_layers="layers", layer_self_attn="self_attn", diff --git a/tests/test_misc/test_custom_interface_compat.py b/tests/test_misc/test_custom_interface_compat.py index 079c4590b..1e19aade2 100644 --- a/tests/test_misc/test_custom_interface_compat.py +++ b/tests/test_misc/test_custom_interface_compat.py @@ -34,7 +34,7 @@ class CustomInterfaceCompatTest(unittest.TestCase): pad_token_id=0, ) llama_interface = AdapterModelInterface( - adapter_types=["bottleneck", "lora", "reft", "invertible"], + adapter_methods=["bottleneck", "lora", "reft", "invertible"], model_embeddings="embed_tokens", model_layers="layers", layer_self_attn="self_attn", @@ -52,7 +52,7 @@ class CustomInterfaceCompatTest(unittest.TestCase): layer_ln_2=None, ) bert_interface = AdapterModelInterface( - adapter_types=["bottleneck", "lora", "reft", "prompt_tuning", "invertible"], + adapter_methods=["bottleneck", "lora", "reft", "prompt_tuning", "invertible"], model_embeddings="embeddings", model_layers="encoder.layer", layer_self_attn="attention",