From 39aae8a5373134aadfc2554d693ab9b54a977443 Mon Sep 17 00:00:00 2001 From: Isaac Corley <22203655+isaaccorley@users.noreply.github.com> Date: Sun, 1 Aug 2021 22:22:35 -0500 Subject: [PATCH] formatting fix --- torchrs/datasets/eurosat.py | 4 ++-- torchrs/datasets/fair1m.py | 3 ++- torchrs/models/__init__.py | 2 +- torchrs/models/fc_cd.py | 3 +-- torchrs/models/oscd.py | 10 ++++------ torchrs/models/rams.py | 2 -- torchrs/transforms.py | 2 +- 7 files changed, 11 insertions(+), 15 deletions(-) diff --git a/torchrs/datasets/eurosat.py b/torchrs/datasets/eurosat.py index 2ac8393..9885d6f 100644 --- a/torchrs/datasets/eurosat.py +++ b/torchrs/datasets/eurosat.py @@ -11,7 +11,7 @@ class EuroSATRGB(ImageFolder): """ Sentinel-2 RGB Land Cover Classification dataset from 'EuroSAT: A Novel Dataset and Deep Learning Benchmark for Land Use and Land Cover Classification', Helber at al. (2017) https://arxiv.org/abs/1709.00029 - + 'We present a novel dataset based on Sentinel-2 satellite images covering 13 spectral bands and consisting out of 10 classes with in total 27,000 labeled and geo-referenced images.' @@ -32,7 +32,7 @@ class EuroSATMS(ImageFolder): """ Sentinel-2 RGB Land Cover Classification dataset from 'EuroSAT: A Novel Dataset and Deep Learning Benchmark for Land Use and Land Cover Classification', Helber at al. (2017) https://arxiv.org/abs/1709.00029 - + 'We present a novel dataset based on Sentinel-2 satellite images covering 13 spectral bands and consisting out of 10 classes with in total 27,000 labeled and geo-referenced images.' diff --git a/torchrs/datasets/fair1m.py b/torchrs/datasets/fair1m.py index 2eecaec..a831b0c 100644 --- a/torchrs/datasets/fair1m.py +++ b/torchrs/datasets/fair1m.py @@ -1,7 +1,7 @@ import os from glob import glob from xml.etree import ElementTree -from typing import List, Dict, Tuple +from typing import List, Dict import torch import numpy as np @@ -74,6 +74,7 @@ class FAIR1M(torch.utils.data.Dataset): "Intersection": {"id": 35, "category": "Road"}, "Bridge": {"id": 36, "category": "Road"} } + def __init__( self, root: str = ".data/fair1m", diff --git a/torchrs/models/__init__.py b/torchrs/models/__init__.py index 3cb8212..911b5cc 100644 --- a/torchrs/models/__init__.py +++ b/torchrs/models/__init__.py @@ -4,5 +4,5 @@ __all__ = [ - "RAMS", "EarlyFusion", "FCEF", "FCSiamConc", "FCSiamDiff" + "RAMS", "EarlyFusion", "Siam", "FCEF", "FCSiamConc", "FCSiamDiff" ] diff --git a/torchrs/models/fc_cd.py b/torchrs/models/fc_cd.py index 37791ef..7b7cd71 100644 --- a/torchrs/models/fc_cd.py +++ b/torchrs/models/fc_cd.py @@ -25,7 +25,6 @@ def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: return self.pool(x), x - class DeConvBlock(nn.Sequential): def __init__(self, filters: List[int], kernel_size: int = 3, dropout: float = 0.2): @@ -154,7 +153,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: # Concat skips skips = [rearrange(skip, "(b t) c h w -> b (t c) h w", t=t) for skip in skips] - + # Only first input encoding is passed directly to decoder x = rearrange(x, "(b t) c h w -> b t c h w", t=t) x = x[:, 0, ...] diff --git a/torchrs/models/oscd.py b/torchrs/models/oscd.py index 8a3e0ca..db99c8b 100644 --- a/torchrs/models/oscd.py +++ b/torchrs/models/oscd.py @@ -26,7 +26,6 @@ class EarlyFusion(nn.Module): def __init__(self, channels: int = 3, t: int = 2, num_classes: int = 2): super().__init__() filters = [channels * t, 32, 32, 64, 64, 128, 128] - kernel_size = 3 dropout = 0.2 self.encoder = nn.Sequential( *[ConvBlock(filters[i-1], filters[i]) for i in range(1, len(filters))], @@ -35,9 +34,9 @@ def __init__(self, channels: int = 3, t: int = 2, num_classes: int = 2): ) self.mlp = nn.Sequential( nn.Linear(128, 8), - nn.BatchNorm1d(8), + nn.BatchNorm1d(8), nn.ReLU(), - nn.Dropout2d(dropout), + nn.Dropout2d(dropout), nn.Linear(8, num_classes) ) @@ -60,7 +59,6 @@ class Siam(nn.Module): def __init__(self, channels: int = 3, t: int = 2, num_classes: int = 2): super().__init__() filters = [channels, 64, 64, 128] - kernel_size = 3 dropout = 0.2 self.encoder = nn.Sequential( *[ConvBlock(filters[i-1], filters[i]) for i in range(1, len(filters))], @@ -68,9 +66,9 @@ def __init__(self, channels: int = 3, t: int = 2, num_classes: int = 2): ) self.mlp = nn.Sequential( nn.Linear(t*128*7*7, 64), - nn.BatchNorm1d(64), + nn.BatchNorm1d(64), nn.ReLU(), - nn.Dropout2d(dropout), + nn.Dropout2d(dropout), nn.Linear(64, num_classes) ) diff --git a/torchrs/models/rams.py b/torchrs/models/rams.py index 5471126..5903a3b 100644 --- a/torchrs/models/rams.py +++ b/torchrs/models/rams.py @@ -1,6 +1,4 @@ """ Referenced from official TF implementation https://github.com/EscVM/RAMS/blob/master/utils/network.py """ -import math - import torch import torch.nn as nn from einops import rearrange diff --git a/torchrs/transforms.py b/torchrs/transforms.py index 5ad8945..bd5afa9 100644 --- a/torchrs/transforms.py +++ b/torchrs/transforms.py @@ -49,7 +49,7 @@ def __call__(self, x: np.ndarray) -> torch.Tensor: class ToDtype(object): - + def __init__(self, dtype: torch.dtype): self.dtype = dtype