Skip to content

Commit 5d748e6

Browse files
Mikhail Zolotukhinfacebook-github-bot
Mikhail Zolotukhin
authored andcommittedSep 7, 2020
[TensorExpr] Re-enable tests. (pytorch#44218)
Summary: Pull Request resolved: pytorch#44218 Differential Revision: D23546100 Test Plan: Imported from OSS Reviewed By: ngimel Pulled By: ZolotukhinM fbshipit-source-id: 4c4c5378ec9891ef72b60ffb59081a009e0df049
1 parent 589a202 commit 5d748e6

File tree

1 file changed

+16
-31
lines changed

1 file changed

+16
-31
lines changed
 

‎test/test_tensorexpr.py

+16-31
Original file line numberDiff line numberDiff line change
@@ -986,7 +986,6 @@ def test_cat_cpu(self):
986986
def test_cat_cuda(self):
987987
self._test_cat('cuda')
988988

989-
@unittest.skip("temporarily disable")
990989
def test_scalar(self):
991990
@torch.jit.script
992991
def test_float(x, y, z, a, b):
@@ -1010,23 +1009,22 @@ def test_int(x, y, z, a, b):
10101009
# FIXME: interp.elapsed_value() also increments due to simplifier
10111010
assert llvm.elapsed_value() == 1 or interp.elapsed_value() > 1
10121011

1013-
# FIXME: Blocked on profiling executor changes
1014-
# def test_loop():
1015-
# @torch.jit.script
1016-
# def test(x, y, z):
1017-
# # type: (Tensor, Tensor, int) -> Tensor
1018-
# b = y
1019-
# for i in range(0, z):
1020-
# a = x + y
1021-
# b = b + y
1022-
# return b
1023-
#
1024-
# llvm = LLVMCodeGenExecuted()
1025-
# interp = SimpleIREvalExecuted()
1026-
# x, y, z = (torch.zeros(32, 32), torch.ones(32, 32), 4)
1027-
# test(x, y, z)
1028-
# r = test(x, y, z)
1029-
# assert llvm.elapsed_value == 1 or interp.elapsed_value() > 1
1012+
def test_loop(self):
1013+
@torch.jit.script
1014+
def test(x, y, z):
1015+
# type: (Tensor, Tensor, int) -> Tensor
1016+
b = y
1017+
for i in range(0, z):
1018+
a = x + y
1019+
b = b + y
1020+
return b
1021+
1022+
llvm = LLVMCodeGenExecuted()
1023+
interp = SimpleIREvalExecuted()
1024+
x, y, z = (torch.zeros(32, 32), torch.ones(32, 32), 4)
1025+
test(x, y, z)
1026+
r = test(x, y, z)
1027+
assert llvm.elapsed_value == 1 or interp.elapsed_value() > 1
10301028

10311029
@unittest.skip("no shape inference for aten::slice yet")
10321030
def test_slice(self):
@@ -1195,19 +1193,6 @@ def run_where(x, y):
11951193
y = run_where(a, b)
11961194
np.testing.assert_allclose(x.numpy(), y.numpy())
11971195

1198-
@unittest.skipIf(not torch.cuda.is_available(), "requires CUDA")
1199-
@unittest.skip("temporarily disable")
1200-
def test_unused(self):
1201-
def test(x, y):
1202-
return x * x + torch.rand_like(y)
1203-
a = torch.rand(1, device="cuda")
1204-
b = torch.rand(1, device="cuda")
1205-
scripted = torch.jit.script(test)
1206-
scripted(a, b)
1207-
cx = CudaCodeGenExecuted()
1208-
scripted(a, b)
1209-
assert cx.elapsed_value() == 0
1210-
12111196
@unittest.skipIf(not torch.cuda.is_available(), "requires CUDA")
12121197
def test_multi_rand(self):
12131198
def test(x):

0 commit comments

Comments
 (0)
Please sign in to comment.