@@ -10897,10 +10897,6 @@ def helper(self, device, dtype, ptype, t_transform, std_transform):
10897
10897
@dtypes(torch.float, torch.double, torch.half)
10898
10898
@dtypesIfCUDA(torch.float, torch.double, torch.half, torch.bfloat16)
10899
10899
def test_uniform_from_to(self, device, dtype):
10900
- # TODO: https://github.com/pytorch/pytorch/issues/33793
10901
- if IS_WINDOWS and device.startswith('cuda') and dtype == torch.bfloat16:
10902
- raise unittest.SkipTest("Crashes with CUDA error: unspecified launch failure")
10903
-
10904
10900
size = 2000
10905
10901
alpha = 0.1
10906
10902
@@ -11119,10 +11115,6 @@ def test_exp(self, device, dtype):
11119
11115
@skipIfNoSciPy
11120
11116
@dtypes(*torch.testing.get_all_fp_dtypes())
11121
11117
def test_uniform_kstest(self, device, dtype):
11122
- # TODO: https://github.com/pytorch/pytorch/issues/33793
11123
- if IS_WINDOWS and device.startswith('cuda') and dtype == torch.bfloat16:
11124
- raise unittest.SkipTest("Crashes with CUDA error: unspecified launch failure")
11125
-
11126
11118
from scipy import stats
11127
11119
size = 1000
11128
11120
for from_ in [-42, 0, 4.2]:
@@ -12244,10 +12236,7 @@ def test_bool_tensor_value_change(self, device):
12244
12236
def test_unfold_all_devices_and_dtypes(self, device):
12245
12237
for dt in torch.testing.get_all_dtypes():
12246
12238
12247
- if dt == torch.bfloat16 and device.startswith('cuda') and IS_WINDOWS:
12248
- # TODO: https://github.com/pytorch/pytorch/issues/33793
12249
- self.assertRaises(RuntimeError, lambda: torch.randint(5, (0, 1, 3, 0), dtype=dt, device=device))
12250
- elif dt == torch.bool:
12239
+ if dt == torch.bool:
12251
12240
x = torch.empty((0, 1, 3, 0), dtype=dt, device=device)
12252
12241
self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape)
12253
12242
else:
@@ -17629,10 +17618,6 @@ def test_random_from_to_bool(self, device):
17629
17618
17630
17619
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))
17631
17620
def test_random_full_range(self, device, dtype):
17632
- # TODO: https://github.com/pytorch/pytorch/issues/33793
17633
- if IS_WINDOWS and device.startswith('cuda') and dtype == torch.bfloat16:
17634
- raise unittest.SkipTest("Crashes with CUDA error: unspecified launch failure")
17635
-
17636
17621
size = 2000
17637
17622
alpha = 0.1
17638
17623
@@ -17667,10 +17652,6 @@ def test_random_full_range(self, device, dtype):
17667
17652
17668
17653
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))
17669
17654
def test_random_from_to(self, device, dtype):
17670
- # TODO: https://github.com/pytorch/pytorch/issues/33793
17671
- if IS_WINDOWS and device.startswith('cuda') and dtype == torch.bfloat16:
17672
- raise unittest.SkipTest("Crashes with CUDA error: unspecified launch failure")
17673
-
17674
17655
size = 2000
17675
17656
alpha = 0.1
17676
17657
@@ -17760,10 +17741,6 @@ def test_random_from_to(self, device, dtype):
17760
17741
17761
17742
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))
17762
17743
def test_random_to(self, device, dtype):
17763
- # TODO: https://github.com/pytorch/pytorch/issues/33793
17764
- if IS_WINDOWS and device.startswith('cuda') and dtype == torch.bfloat16:
17765
- raise unittest.SkipTest("Crashes with CUDA error: unspecified launch failure")
17766
-
17767
17744
size = 2000
17768
17745
alpha = 0.1
17769
17746
@@ -17822,10 +17799,6 @@ def test_random_to(self, device, dtype):
17822
17799
17823
17800
@dtypes(*(torch.testing.get_all_int_dtypes() + torch.testing.get_all_fp_dtypes()))
17824
17801
def test_random_default(self, device, dtype):
17825
- # TODO: https://github.com/pytorch/pytorch/issues/33793
17826
- if IS_WINDOWS and device.startswith('cuda') and dtype == torch.bfloat16:
17827
- raise unittest.SkipTest("Crashes with CUDA error: unspecified launch failure")
17828
-
17829
17802
size = 2000
17830
17803
alpha = 0.1
17831
17804
0 commit comments