Skip to content

Commit 4972cf0

Browse files
Meghan Lelefacebook-github-bot
Meghan Lele
authored andcommittedJul 14, 2020
[JIT] Add out-of-source-tree to_backend tests (pytorch#41145)
Summary: Pull Request resolved: pytorch#41145 **Summary** This commit adds out-of-source-tree tests for `to_backend`. These tests check that a Module can be lowered to a backend, exported, loaded (in both Python and C++) and executed. **Fixes** This commit fixes pytorch#40067. Test Plan: Imported from OSS Reviewed By: jamesr66a Differential Revision: D22510076 Pulled By: SplitInfinity fbshipit-source-id: f65964ef3092a095740f06636ed5b1eb0884492d
1 parent 0e7b9d4 commit 4972cf0

12 files changed

+366
-1
lines changed
 

‎.jenkins/pytorch/build.sh

+11
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,17 @@ else
233233
make VERBOSE=1
234234
popd
235235
assert_git_not_dirty
236+
237+
# Build custom backend tests.
238+
CUSTOM_BACKEND_BUILD="$PWD/../custom-backend-build"
239+
CUSTOM_BACKEND_TEST="$PWD/test/custom_backend"
240+
python --version
241+
mkdir "$CUSTOM_BACKEND_BUILD"
242+
pushd "$CUSTOM_BACKEND_BUILD"
243+
cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPYTHON_EXECUTABLE="$(which python)"
244+
make VERBOSE=1
245+
popd
246+
assert_git_not_dirty
236247
else
237248
# Test standalone c10 build
238249
if [[ "$BUILD_ENVIRONMENT" == *xenial-cuda10.1-cudnn7-py3* ]]; then

‎.jenkins/pytorch/macos-test.sh

+22
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,26 @@ test_libtorch() {
9999
fi
100100
}
101101

102+
test_custom_backend() {
103+
echo "Testing custom backends"
104+
pushd test/custom_backend
105+
rm -rf build && mkdir build
106+
pushd build
107+
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
108+
CMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" cmake ..
109+
make VERBOSE=1
110+
popd
111+
112+
# Run Python tests and export a lowered module.
113+
python test_custom_backend.py -v
114+
python backend.py --export-module-to=model.pt
115+
# Run C++ tests using the exported module.
116+
build/test_custom_backend ./model.pt
117+
rm -f ./model.pt
118+
popd
119+
assert_git_not_dirty
120+
}
121+
102122
test_custom_script_ops() {
103123
echo "Testing custom script operators"
104124
pushd test/custom_operator
@@ -124,11 +144,13 @@ if [ -z "${BUILD_ENVIRONMENT}" ] || [[ "${BUILD_ENVIRONMENT}" == *-test ]]; then
124144
test_python_all
125145
test_libtorch
126146
test_custom_script_ops
147+
test_custom_backend
127148
else
128149
if [[ "${BUILD_ENVIRONMENT}" == *-test1 ]]; then
129150
test_python_all
130151
elif [[ "${BUILD_ENVIRONMENT}" == *-test2 ]]; then
131152
test_libtorch
132153
test_custom_script_ops
154+
test_custom_backend
133155
fi
134156
fi

‎.jenkins/pytorch/test.sh

+19
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,23 @@ test_libtorch() {
228228
fi
229229
}
230230

231+
test_custom_backend() {
232+
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *asan* ]] ; then
233+
echo "Testing custom backends"
234+
CUSTOM_BACKEND_BUILD="$PWD/../custom-backend-build"
235+
pushd test/custom_backend
236+
cp -a "$CUSTOM_BACKEND_BUILD" build
237+
# Run tests Python-side and export a lowered module.
238+
python test_custom_backend.py -v
239+
python backend.py --export-module-to=model.pt
240+
# Run tests C++-side and load the exported lowered module.
241+
build/test_custom_backend ./model.pt
242+
rm -f ./model.pt
243+
popd
244+
assert_git_not_dirty
245+
fi
246+
}
247+
231248
test_custom_script_ops() {
232249
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]] && [[ "$BUILD_ENVIRONMENT" != *asan* ]] ; then
233250
echo "Testing custom script operators"
@@ -331,6 +348,7 @@ elif [[ "${BUILD_ENVIRONMENT}" == *-test2 || "${JOB_BASE_NAME}" == *-test2 ]]; t
331348
test_aten
332349
test_libtorch
333350
test_custom_script_ops
351+
test_custom_backend
334352
test_torch_function_benchmark
335353
elif [[ "${BUILD_ENVIRONMENT}" == *-bazel-* ]]; then
336354
test_bazel
@@ -346,5 +364,6 @@ else
346364
test_aten
347365
test_libtorch
348366
test_custom_script_ops
367+
test_custom_backend
349368
test_torch_function_benchmark
350369
fi
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
call %SCRIPT_HELPERS_DIR%\setup_pytorch_env.bat
2+
3+
git submodule update --init --recursive third_party/pybind11
4+
cd test\custom_backend
5+
6+
:: Build the custom backend library.
7+
mkdir build
8+
pushd build
9+
10+
echo "Executing CMake for custom_backend test..."
11+
12+
:: Note: Caffe2 does not support MSVC + CUDA + Debug mode (has to be Release mode)
13+
cmake -DCMAKE_PREFIX_PATH=%TMP_DIR_WIN%\build\torch -DCMAKE_BUILD_TYPE=Release -GNinja ..
14+
if ERRORLEVEL 1 exit /b 1
15+
16+
echo "Executing Ninja for custom_backend test..."
17+
18+
ninja -v
19+
if ERRORLEVEL 1 exit /b 1
20+
21+
echo "Ninja succeeded for custom_backend test."
22+
23+
popd
24+
25+
:: Run tests Python-side and export a script module.
26+
python test_custom_backend.py -v
27+
if ERRORLEVEL 1 exit /b 1
28+
29+
python backend.py --export-module-to="build/model.pt"
30+
if ERRORLEVEL 1 exit /b 1
31+
32+
:: Run tests C++-side and load the exported script module.
33+
cd build
34+
set PATH=C:\Program Files\NVIDIA Corporation\NvToolsExt\bin\x64;%TMP_DIR_WIN%\build\torch\lib;%PATH%
35+
test_custom_backend.exe model.pt
36+
if ERRORLEVEL 1 exit /b 1

‎.jenkins/pytorch/win-test.sh

+2
Original file line numberDiff line numberDiff line change
@@ -41,13 +41,15 @@ run_tests() {
4141
$SCRIPT_HELPERS_DIR/test_python_nn.bat "$DETERMINE_FROM" && \
4242
$SCRIPT_HELPERS_DIR/test_python_all_except_nn.bat "$DETERMINE_FROM" && \
4343
$SCRIPT_HELPERS_DIR/test_custom_script_ops.bat && \
44+
$SCRIPT_HELPERS_DIR/test_custom_backend.bat && \
4445
$SCRIPT_HELPERS_DIR/test_libtorch.bat
4546
else
4647
if [[ "${JOB_BASE_NAME}" == *-test1 ]]; then
4748
$SCRIPT_HELPERS_DIR/test_python_nn.bat "$DETERMINE_FROM" && \
4849
$SCRIPT_HELPERS_DIR/test_libtorch.bat
4950
elif [[ "${JOB_BASE_NAME}" == *-test2 ]]; then
5051
$SCRIPT_HELPERS_DIR/test_python_all_except_nn.bat "$DETERMINE_FROM" && \
52+
$SCRIPT_HELPERS_DIR/test_custom_backend.bat && \
5153
$SCRIPT_HELPERS_DIR/test_custom_script_ops.bat
5254
fi
5355
fi

‎test/cpp/jit/test_backend.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
#include <torch/csrc/jit/api/module.h>
21
#include <torch/csrc/jit/backends/backend.h>
32

43
namespace torch {

‎test/custom_backend/CMakeLists.txt

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# Basic CMake setup
2+
cmake_minimum_required(VERSION 3.1 FATAL_ERROR)
3+
project(custom_backend)
4+
5+
find_package(Torch REQUIRED)
6+
7+
add_library(custom_backend SHARED custom_backend.cpp)
8+
set_property(TARGET custom_backend PROPERTY CXX_STANDARD 14)
9+
target_link_libraries(custom_backend "${TORCH_LIBRARIES}")
10+
11+
add_executable(test_custom_backend test_custom_backend.cpp)
12+
set_property(TARGET test_custom_backend PROPERTY CXX_STANDARD 14)
13+
target_link_libraries(test_custom_backend custom_backend)

‎test/custom_backend/backend.py

+72
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import argparse
2+
import os.path
3+
import sys
4+
import torch
5+
6+
7+
def get_custom_backend_library_path():
8+
"""
9+
Get the path to the library containing the custom backend.
10+
11+
Return:
12+
The path to the custom backend object, customized by platform.
13+
"""
14+
if sys.platform.startswith("win32"):
15+
library_filename = "custom_backend.dll"
16+
elif sys.platform.startswith("darwin"):
17+
library_filename = "libcustom_backend.dylib"
18+
else:
19+
library_filename = "libcustom_backend.so"
20+
path = os.path.abspath("build/{}".format(library_filename))
21+
assert os.path.exists(path), path
22+
return path
23+
24+
25+
def to_custom_backend(module):
26+
"""
27+
This is a helper that wraps torch._C._jit_to_test_backend and compiles
28+
only the forward method with an empty compile spec.
29+
30+
Args:
31+
module: input ScriptModule.
32+
33+
Returns:
34+
The module, lowered so that it can run on TestBackend.
35+
"""
36+
lowered_module = torch._C._jit_to_backend("custom_backend", module._c, {"forward": {"": ""}})
37+
return lowered_module
38+
39+
40+
class Model(torch.nn.Module):
41+
"""
42+
Simple model used for testing that to_backend API supports saving, loading,
43+
and executing in C++.
44+
"""
45+
46+
def __init__(self):
47+
super(Model, self).__init__()
48+
49+
def forward(self, a, b):
50+
return (a + b, a - b)
51+
52+
53+
def main():
54+
parser = argparse.ArgumentParser(
55+
description="Lower a Module to a custom backend"
56+
)
57+
parser.add_argument("--export-module-to", required=True)
58+
options = parser.parse_args()
59+
60+
# Load the library containing the custom backend.
61+
library_path = get_custom_backend_library_path()
62+
torch.ops.load_library(library_path)
63+
assert library_path in torch.ops.loaded_libraries
64+
65+
# Lower an instance of Model to the custom backend and export it
66+
# to the specified location.
67+
lowered_module = to_custom_backend(torch.jit.script(Model()))
68+
torch.jit.save(lowered_module, options.export_module_to)
69+
70+
71+
if __name__ == "__main__":
72+
main()
+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
#include "custom_backend.h"
2+
3+
namespace torch {
4+
namespace custom_backend {
5+
namespace {
6+
constexpr auto kBackendName = "custom_backend";
7+
static auto cls = torch::jit::backend<CustomBackend>(kBackendName);
8+
}
9+
10+
std::string getBackendName() {
11+
return std::string(kBackendName);
12+
}
13+
}
14+
}

‎test/custom_backend/custom_backend.h

+84
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
#include <torch/csrc/jit/backends/backend.h>
2+
3+
namespace torch {
4+
namespace custom_backend {
5+
// This custom JIT backend is intended to do the minimal amount of work
6+
// necessary to test that the JIT backend registration endpoints and
7+
// code generation are working correctly. It is not intended to
8+
// produce numerically correct results.
9+
class CustomBackend : public torch::jit::PyTorchBackendInterface {
10+
public:
11+
// Constructor.
12+
explicit CustomBackend() {}
13+
virtual ~CustomBackend() = default;
14+
15+
c10::IValue preprocess(
16+
c10::IValue mod,
17+
c10::impl::GenericDict method_compile_spec) override {
18+
return mod;
19+
}
20+
21+
c10::impl::GenericDict compile(
22+
c10::IValue processed,
23+
c10::impl::GenericDict method_compile_spec) override {
24+
auto spec =
25+
c10::impl::toTypedDict<std::string, at::IValue>(method_compile_spec);
26+
27+
// Return the same string as a value for every key in method_compile_spec.
28+
auto handles = c10::Dict<std::string, std::string>();
29+
for (auto it = spec.begin(), end = spec.end(); it != end; ++it) {
30+
handles.insert(it->key(), it->key());
31+
}
32+
return c10::impl::toGenericDict(handles);
33+
}
34+
c10::impl::GenericList execute(
35+
c10::IValue handle,
36+
c10::impl::GenericList inputs) override {
37+
TORCH_INTERNAL_ASSERT(handle.isString());
38+
TORCH_INTERNAL_ASSERT(inputs.size() > 0);
39+
40+
c10::List<at::Tensor> output_list;
41+
42+
// Implement simple accumulator and negative accumulator (?) ops. Return one
43+
// or both of them depending on the handle to make sure multiple outputs are
44+
// handled.
45+
c10::IValue value = inputs[0];
46+
at::Tensor accum = value.toTensor();
47+
accum = accum.clone();
48+
at::Tensor sub_accum = value.toTensor();
49+
sub_accum = sub_accum.clone();
50+
51+
for (size_t i = 1, e = inputs.size(); i < e; ++i) {
52+
value = inputs[i];
53+
accum.add_(value.toTensor(), 1.0);
54+
sub_accum.sub_(value.toTensor(), 1.0);
55+
}
56+
57+
if (handle.toStringRef() == "accum") {
58+
output_list.emplace_back(accum);
59+
} else if (handle.toStringRef() == "sub_accum") {
60+
output_list.emplace_back(sub_accum);
61+
} else if (handle.toStringRef() == "forward") {
62+
output_list.emplace_back(accum);
63+
output_list.emplace_back(sub_accum);
64+
}
65+
66+
return c10::impl::toList(output_list);
67+
}
68+
};
69+
70+
// clang-format off
71+
# if defined(_WIN32)
72+
# if defined(custom_ops_EXPORTS)
73+
# define CUSTOM_BACKEND_API __declspec(dllexport)
74+
# else
75+
# define CUSTOM_BACKEND_API __declspec(dllimport)
76+
# endif
77+
# else
78+
# define CUSTOM_BACKEND_API
79+
# endif
80+
// clang-format on
81+
82+
CUSTOM_BACKEND_API std::string getBackendName();
83+
} // namespace custom_backend
84+
} // namespace torch
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
#include <torch/cuda.h>
2+
#include <torch/script.h>
3+
4+
#include <string>
5+
6+
#include "custom_backend.h"
7+
8+
// Load a module lowered for the custom backend from \p path and test that
9+
// it can be executed and produces correct results.
10+
void load_serialized_lowered_module_and_execute(const std::string& path) {
11+
torch::jit::Module module = torch::jit::load(path);
12+
// The custom backend is hardcoded to compute f(a, b) = (a + b, a - b).
13+
auto tensor = torch::ones(5);
14+
std::vector<torch::jit::IValue> inputs{tensor, tensor};
15+
auto output = module.forward(inputs);
16+
AT_ASSERT(output.isTuple());
17+
auto output_elements = output.toTuple()->elements();
18+
for (auto& e : output_elements) {
19+
AT_ASSERT(e.isTensor());
20+
}
21+
AT_ASSERT(output_elements.size(), 2);
22+
AT_ASSERT(output_elements[0].toTensor().allclose(tensor + tensor));
23+
AT_ASSERT(output_elements[1].toTensor().allclose(tensor - tensor));
24+
}
25+
26+
int main(int argc, const char* argv[]) {
27+
if (argc != 2) {
28+
std::cerr
29+
<< "usage: test_custom_backend <path-to-exported-script-module>\n";
30+
return -1;
31+
}
32+
const std::string path_to_exported_script_module = argv[1];
33+
34+
std::cout << "Testing " << torch::custom_backend::getBackendName() << "\n";
35+
load_serialized_lowered_module_and_execute(path_to_exported_script_module);
36+
37+
std::cout << "OK\n";
38+
return 0;
39+
}
+54
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
import os
2+
import tempfile
3+
import torch
4+
import unittest
5+
6+
from backend import Model, to_custom_backend, get_custom_backend_library_path
7+
8+
9+
class TestCustomBackend(unittest.TestCase):
10+
def setUp(self):
11+
# Load the library containing the custom backend.
12+
self.library_path = get_custom_backend_library_path()
13+
torch.ops.load_library(self.library_path)
14+
# Create an instance of the test Module and lower it for
15+
# the custom backend.
16+
self.model = to_custom_backend(torch.jit.script(Model()))
17+
18+
def test_execute(self):
19+
"""
20+
Test execution using the custom backend.
21+
"""
22+
a = torch.randn(4)
23+
b = torch.randn(4)
24+
# The custom backend is hardcoded to compute f(a, b) = (a + b, a - b).
25+
expected = (a + b, a - b)
26+
out = self.model(a, b)
27+
self.assertTrue(expected[0].allclose(out[0]))
28+
self.assertTrue(expected[1].allclose(out[1]))
29+
30+
def test_save_load(self):
31+
"""
32+
Test that a lowered module can be executed correctly
33+
after saving and loading.
34+
"""
35+
# Test execution before saving and loading to make sure
36+
# the lowered module works in the first place.
37+
self.test_execute()
38+
39+
# Save and load.
40+
f = tempfile.NamedTemporaryFile(delete=False)
41+
try:
42+
f.close()
43+
torch.jit.save(self.model, f.name)
44+
loaded = torch.jit.load(f.name)
45+
finally:
46+
os.unlink(f.name)
47+
self.model = loaded
48+
49+
# Test execution again.
50+
self.test_execute()
51+
52+
53+
if __name__ == "__main__":
54+
unittest.main()

0 commit comments

Comments
 (0)
Please sign in to comment.