Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add model architecture parametrization #31

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,11 @@ dataloader:
batch_size: 64

model:
architecture: BiLSTM
embedding:
embedding_dim: 128
rnn:
rnn_unit: nn.LSTM
rnn_unit: LSTM # GRU, RNN
hidden_size: 256
num_layers: 1
dropout: 0
Expand Down
18 changes: 11 additions & 7 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from torch.utils.data import DataLoader

from pytorch_ner.dataset import NERCollator, NERDataset
from pytorch_ner.nn_modules.architecture import BiLSTM
from pytorch_ner.nn_modules.embedding import Embedding
from pytorch_ner.nn_modules.linear import LinearHead
from pytorch_ner.nn_modules.rnn import DynamicRNN
Expand All @@ -19,6 +18,7 @@
)
from pytorch_ner.save import save_model
from pytorch_ner.train import train
from pytorch_ner.utils import str_to_class


def main(path_to_config: str):
Expand Down Expand Up @@ -148,10 +148,11 @@ def main(path_to_config: str):
)

rnn_layer = DynamicRNN(
rnn_unit=eval(config["model"]["rnn"]["rnn_unit"]), # TODO: fix eval
input_size=config["model"]["embedding"][
"embedding_dim"
], # reference to embedding_dim
rnn_unit=str_to_class(
module_name="torch.nn",
class_name=config["model"]["rnn"]["rnn_unit"],
),
input_size=config["model"]["embedding"]["embedding_dim"], # ref to emb_dim
hidden_size=config["model"]["rnn"]["hidden_size"],
num_layers=config["model"]["rnn"]["num_layers"],
dropout=config["model"]["rnn"]["dropout"],
Expand All @@ -169,9 +170,12 @@ def main(path_to_config: str):
),
)

# TODO: add model architecture in config
# TODO: add attention if needed
model = BiLSTM(
model_class = str_to_class(
module_name="pytorch_ner.nn_modules.architecture",
class_name=config["model"]["architecture"],
)
model = model_class(
embedding_layer=embedding_layer,
rnn_layer=rnn_layer,
linear_head=linear_head,
Expand Down
14 changes: 14 additions & 0 deletions pytorch_ner/utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import importlib
import os
import random
import shutil
Expand Down Expand Up @@ -44,3 +45,16 @@ def rmdir(path: str):

if os.path.exists(path):
shutil.rmtree(path)


def str_to_class(module_name, class_name):
"""
Convert string to Python class object.
https://stackoverflow.com/questions/1176136/convert-string-to-python-class-object
"""

# load the module, will raise ImportError if module cannot be loaded
module = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
cls = getattr(module, class_name)
return cls
3 changes: 2 additions & 1 deletion tests/test_onnx.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from test_nn_modules.test_architecture import model_bilstm as model

from pytorch_ner.onnx import onnx_export_and_check
from pytorch_ner.utils import mkdir
from tests.test_nn_modules.test_architecture import model_bilstm as model

path_to_save = "models/model.onnx"
mkdir("models")
Expand Down