Skip to content
This repository has been archived by the owner on Dec 16, 2022. It is now read-only.

Commit

Permalink
updates for torch 1.6 (#103)
Browse files Browse the repository at this point in the history
* updates for torch 1.6

* Update requirements.txt
  • Loading branch information
epwalsh authored Jul 31, 2020
1 parent e7b8247 commit 04561a8
Show file tree
Hide file tree
Showing 5 changed files with 7 additions and 24 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## Unreleased

### Changed

- Updated to PyTorch 1.6.

### Fixed

- Updated the RoBERTa SST config to make proper use of the CLS token
Expand Down
3 changes: 0 additions & 3 deletions dev-requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,6 @@ codecov
# For running tests that aren't 100% reliable
flaky

# Required for automatic mixed precision (AMP) training
git+https://github.com/NVIDIA/apex.git@master

#### DOC-RELATED PACKAGES ####

# YAML manipulation
Expand Down
8 changes: 1 addition & 7 deletions tests/mc/models/transformer_mc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,6 @@
from tests import FIXTURES_ROOT
import pytest

try:
from apex import amp
except ImportError:
amp = None

import allennlp_models.mc.models


Expand Down Expand Up @@ -53,11 +48,10 @@ def test_forward_pass_runs_correctly(self):


@requires_gpu
@pytest.mark.skipif(amp is None, reason="Apex is not installed.")
class TransformerMcMixedPrecisionTest(AllenNlpTestCase):
def test_model_can_train_save_and_load_with_mixed_precision(self):
train_model_from_file(
FIXTURES_ROOT / "mc" / "transformer_mc" / "experiment.jsonnet",
self.TEST_DIR,
overrides="{'trainer.opt_level':'O2','trainer.cuda_device':0}",
overrides="{'trainer.use_amp':true,'trainer.cuda_device':0}",
)
8 changes: 1 addition & 7 deletions tests/rc/models/bidaf_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,6 @@
from numpy.testing import assert_almost_equal
import torch

try:
from apex import amp
except ImportError:
amp = None

from allennlp.commands.train import train_model_from_file
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
Expand Down Expand Up @@ -141,12 +136,11 @@ def test_mismatching_dimensions_throws_configuration_error(self):


@requires_gpu
@pytest.mark.skipif(amp is None, reason="Apex is not installed.")
class BidirectionalAttentionFlowMixedPrecisionTest(AllenNlpTestCase):
@flaky(max_runs=5)
def test_model_can_train_save_and_load_with_mixed_precision(self):
train_model_from_file(
FIXTURES_ROOT / "rc" / "bidaf" / "experiment.json",
self.TEST_DIR,
overrides="{'trainer.opt_level':'O2','trainer.cuda_device':0}",
overrides="{'trainer.use_amp':true,'trainer.cuda_device':0}",
)
8 changes: 1 addition & 7 deletions tests/rc/models/transformer_qa_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,6 @@
from tests import FIXTURES_ROOT
import pytest

try:
from apex import amp
except ImportError:
amp = None

import allennlp_models.rc


Expand Down Expand Up @@ -59,11 +54,10 @@ def test_forward_pass_runs_correctly(self):


@requires_gpu
@pytest.mark.skipif(amp is None, reason="Apex is not installed.")
class TransformerQaMixedPrecisionTest(AllenNlpTestCase):
def test_model_can_train_save_and_load_with_mixed_precision(self):
train_model_from_file(
FIXTURES_ROOT / "rc" / "transformer_qa" / "experiment.jsonnet",
self.TEST_DIR,
overrides="{'trainer.opt_level':'O2','trainer.cuda_device':0}",
overrides="{'trainer.use_amp':true,'trainer.cuda_device':0}",
)

0 comments on commit 04561a8

Please sign in to comment.