Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OS X - No module named 'model' #20

Open
krummrey opened this issue Jan 21, 2024 · 1 comment
Open

OS X - No module named 'model' #20

krummrey opened this issue Jan 21, 2024 · 1 comment

Comments

@krummrey
Copy link

I've had it working your over a year. Now I get

Traceback (most recent call last):
  File "/informative-drawings/test.py", line 15, in <module>
    from model import Generator, GlobalGenerator2, InceptionV3
ModuleNotFoundError: No module named 'model'

Is it a simple path issue? Any idea on how to fix it?

@krummrey
Copy link
Author

To answer my own question, I've used Claude 3.5 to solve it for me. This simplified test script works for me on OS X

#!/usr/bin/python3

import argparse
import sys
import os

import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch

from model import Generator
from dataset import UnpairedDepthDataset
from PIL import Image

parser = argparse.ArgumentParser()
parser.add_argument('--name', required=True, type=str, help='name of this experiment')
parser.add_argument('--checkpoints_dir', type=str, default='checkpoints', help='Where the model checkpoints are saved')
parser.add_argument('--results_dir', type=str, default='results', help='where to save result images')
parser.add_argument('--batchSize', type=int, default=1, help='size of the batches')
parser.add_argument('--dataroot', type=str, default='', help='root directory of the dataset')

parser.add_argument('--input_nc', type=int, default=3, help='number of channels of input data')
parser.add_argument('--output_nc', type=int, default=1, help='number of channels of output data')
parser.add_argument('--n_blocks', type=int, default=3, help='number of resnet blocks for generator')
parser.add_argument('--size', type=int, default=256, help='size of the data (squared assumed)')
parser.add_argument('--use_gpu', action='store_true', help='use GPU computation if available')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load from')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio')

parser.add_argument('--mode', type=str, default='test', help='train, val, test, etc')
parser.add_argument('--load_size', type=int, default=256, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')

parser.add_argument('--how_many', type=int, default=100, help='number of images to test')

opt = parser.parse_args()
print(opt)

device = torch.device("mps") if torch.backends.mps.is_available() else torch.device("cpu")
print(f"Using device: {device}")

with torch.no_grad():
    # Networks
    net_G = Generator(opt.input_nc, opt.output_nc, opt.n_blocks).to(device)

    # Load state dicts
    net_G.load_state_dict(torch.load(os.path.join(opt.checkpoints_dir, opt.name, f'netG_A_{opt.which_epoch}.pth'), map_location=device, weights_only=True))
    print(f'Loaded model from {os.path.join(opt.checkpoints_dir, opt.name, f"netG_A_{opt.which_epoch}.pth")}')

    net_G.eval()

    transforms_r = [
        transforms.Resize(int(opt.size), Image.BICUBIC),
        transforms.ToTensor()
    ]

    test_data = UnpairedDepthDataset(
        opt.dataroot, '', opt, transforms_r=transforms_r,
        mode=opt.mode
    )

    dataloader = DataLoader(test_data, batch_size=opt.batchSize, shuffle=False)

    full_output_dir = os.path.join(opt.results_dir, opt.name)
    os.makedirs(full_output_dir, exist_ok=True)

    for i, batch in enumerate(dataloader):
        if i > opt.how_many:
            break
        img_r = Variable(batch['r']).to(device)
        real_A = img_r

        name = batch['name'][0]
        input_image = real_A
        image = net_G(input_image)
        save_image(image.data, os.path.join(full_output_dir, f'{name}_out.png'))

        sys.stdout.write(f'\rGenerated images {i+1:04d} of {opt.how_many:04d}')

    sys.stdout.write('\n')

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant