Skip to content

Commit

Permalink
Creates spectral ops test suite (pytorch#42157)
Browse files Browse the repository at this point in the history
Summary:
In preparation for creating the new torch.fft namespace and NumPy-like fft functions, as well as supporting our goal of refactoring and reducing the size of test_torch.py, this PR creates a test suite for our spectral ops.

The existing spectral op tests from test_torch.py and test_cuda.py are moved to test_spectral_ops.py and updated to run under the device generic test framework.

Pull Request resolved: pytorch#42157

Reviewed By: albanD

Differential Revision: D22811096

Pulled By: mruberry

fbshipit-source-id: e5c50f0016ea6bb8b093cd6df2dbcef6db9bb6b6
  • Loading branch information
Mike Ruberry authored and facebook-github-bot committed Jul 29, 2020
1 parent 029007c commit 4b6e5f4
Show file tree
Hide file tree
Showing 4 changed files with 519 additions and 491 deletions.
1 change: 1 addition & 0 deletions test/run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
'test_vulkan',
'test_quantization',
'test_sparse',
'test_spectral_ops',
'test_serialization',
'test_show_pickle',
'test_torch',
Expand Down
72 changes: 0 additions & 72 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from itertools import repeat, chain
import os
import gc
from contextlib import contextmanager
import threading
import queue
import pickle
Expand Down Expand Up @@ -1366,77 +1365,6 @@ def test_prod_large(self):
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)

@skipIfRocm
def test_fft_ifft_rfft_irfft(self):
AbstractTestCases._TestTorchMixin._test_fft_ifft_rfft_irfft(self, device=torch.device('cuda'))

@contextmanager
def plan_cache_max_size(n, device=None):
if device is None:
plan_cache = torch.backends.cuda.cufft_plan_cache
else:
plan_cache = torch.backends.cuda.cufft_plan_cache[device]
original = plan_cache.max_size
plan_cache.max_size = n
yield
plan_cache.max_size = original

with plan_cache_max_size(max(1, torch.backends.cuda.cufft_plan_cache.size - 10)):
AbstractTestCases._TestTorchMixin._test_fft_ifft_rfft_irfft(self, device=torch.device('cuda'))

with plan_cache_max_size(0):
AbstractTestCases._TestTorchMixin._test_fft_ifft_rfft_irfft(self, device=torch.device('cuda'))

torch.backends.cuda.cufft_plan_cache.clear()

# check that stll works after clearing cache
with plan_cache_max_size(10):
AbstractTestCases._TestTorchMixin._test_fft_ifft_rfft_irfft(self, device=torch.device('cuda'))

with self.assertRaisesRegex(RuntimeError, r"must be non-negative"):
torch.backends.cuda.cufft_plan_cache.max_size = -1

with self.assertRaisesRegex(RuntimeError, r"read-only property"):
torch.backends.cuda.cufft_plan_cache.size = -1

with self.assertRaisesRegex(RuntimeError, r"but got device with index"):
torch.backends.cuda.cufft_plan_cache[torch.cuda.device_count() + 10]

if TEST_MULTIGPU:
# Test that different GPU has different cache
x0 = torch.randn(2, 3, 3, device='cuda:0')
x1 = x0.cuda(1)
self.assertEqual(x0.rfft(2), x1.rfft(2))
# If a plan is used across different devices, the following line (or
# the assert above) would trigger illegal memory access. Other ways
# to trigger the error include
# (1) setting CUDA_LAUNCH_BLOCKING=1 (pytorch/pytorch#19224) and
# (2) printing a device 1 tensor.
x0.copy_(x1)

# Test that un-indexed `torch.backends.cuda.cufft_plan_cache` uses current device
with plan_cache_max_size(10, device='cuda:0'):
with plan_cache_max_size(11, device='cuda:1'):
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
self.assertEqual(torch.backends.cuda.cufft_plan_cache[1].max_size, 11)

self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
with torch.cuda.device(1):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
with torch.cuda.device(0):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0

self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
with torch.cuda.device(1):
with plan_cache_max_size(11): # default is cuda:1
self.assertEqual(torch.backends.cuda.cufft_plan_cache[0].max_size, 10)
self.assertEqual(torch.backends.cuda.cufft_plan_cache[1].max_size, 11)

self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1
with torch.cuda.device(0):
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 10) # default is cuda:0
self.assertEqual(torch.backends.cuda.cufft_plan_cache.max_size, 11) # default is cuda:1

def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
Expand Down
Loading

0 comments on commit 4b6e5f4

Please sign in to comment.