Skip to content

Commit 3a0e35c

Browse files
ljk53facebook-github-bot
authored andcommittedAug 27, 2020
[pytorch] deprecate static dispatch (pytorch#43564)
Summary: Pull Request resolved: pytorch#43564 Static dispatch was originally introduced for mobile selective build. Since we have added selective build support for dynamic dispatch and tested it in FB production for months, we can deprecate static dispatch to reduce the complexity of the codebase. Test Plan: Imported from OSS Reviewed By: ezyang Differential Revision: D23324452 Pulled By: ljk53 fbshipit-source-id: d2970257616a8c6337f90249076fca1ae93090c7
1 parent 3afd24d commit 3a0e35c

File tree

21 files changed

+8
-198
lines changed

21 files changed

+8
-198
lines changed
 

‎.circleci/cimodel/data/simple/mobile_definitions.py

-5
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,6 @@ def gen_tree(self):
5757
[DOCKER_REQUIREMENT_ASAN],
5858
["build"]
5959
),
60-
MobileJob(
61-
DOCKER_IMAGE_ASAN,
62-
[DOCKER_REQUIREMENT_ASAN],
63-
["custom", "build", "static"]
64-
),
6560

6661
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
6762
MobileJob(

‎.circleci/config.yml

-7
Original file line numberDiff line numberDiff line change
@@ -6652,13 +6652,6 @@ workflows:
66526652
name: pytorch_linux_xenial_py3_clang5_mobile_build
66536653
requires:
66546654
- docker-pytorch-linux-xenial-py3-clang5-asan
6655-
- pytorch_linux_build:
6656-
build_environment: pytorch-linux-xenial-py3-clang5-mobile-custom-build-static
6657-
build_only: "1"
6658-
docker_image: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan
6659-
name: pytorch_linux_xenial_py3_clang5_mobile_custom_build_static
6660-
requires:
6661-
- docker-pytorch-linux-xenial-py3-clang5-asan
66626655
- pytorch_linux_build:
66636656
build_environment: pytorch-linux-xenial-py3-clang5-mobile-custom-build-dynamic
66646657
build_only: "1"

‎.jenkins/pytorch/build-mobile.sh

+1-3
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,7 @@ retry pip install --pre torch torchvision \
2222

2323
# Run end-to-end process of building mobile library, linking into the predictor
2424
# binary, and running forward pass with a real model.
25-
if [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-static* ]]; then
26-
TEST_CUSTOM_BUILD_STATIC=1 test/mobile/custom_build/build.sh
27-
elif [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-dynamic* ]]; then
25+
if [[ "$BUILD_ENVIRONMENT" == *-mobile-custom-build-dynamic* ]]; then
2826
export LLVM_DIR="$(llvm-config-5.0 --prefix)"
2927
echo "LLVM_DIR: ${LLVM_DIR}"
3028
TEST_CUSTOM_BUILD_DYNAMIC=1 test/mobile/custom_build/build.sh

‎BUILD.bazel

-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ header_template_rule(
3434
substitutions = {
3535
"cmakedefine": "define",
3636
"#define FEATURE_TORCH_MOBILE": "/* #undef FEATURE_TORCH_MOBILE */",
37-
"#define USE_STATIC_DISPATCH": "/* #undef USE_STATIC_DISPATCH */",
3837
"#define C10_USE_NUMA": "/* #undef C10_USE_NUMA */",
3938
},
4039
)

‎CMakeLists.txt

-1
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,6 @@ option(BUILD_PYTHON "Build Python binaries" ON)
124124
option(BUILD_CAFFE2_OPS "Build Caffe2 operators" ON)
125125
option(BUILD_SHARED_LIBS "Build libcaffe2.so" ON)
126126
option(BUILD_CAFFE2_MOBILE "Build libcaffe2 for mobile (deprecating)" OFF)
127-
option(USE_STATIC_DISPATCH "Use static dispatch for ATen operators" OFF)
128127
cmake_dependent_option(
129128
CAFFE2_LINK_LOCAL_PROTOBUF "If set, build protobuf inside libcaffe2.so." ON
130129
"BUILD_SHARED_LIBS AND BUILD_CUSTOM_PROTOBUF" OFF)

‎aten/src/ATen/autocast_mode.cpp

-3
Original file line numberDiff line numberDiff line change
@@ -202,8 +202,6 @@ Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const c10::op
202202
"safe to autocast.");
203203
}
204204

205-
206-
#ifndef USE_STATIC_DISPATCH
207205
namespace {
208206
/*****************************************************************************************************************
209207
This section performs load-time registration for autocast wrappers.
@@ -378,7 +376,6 @@ TORCH_LIBRARY_IMPL(aten, Autocast, m) {
378376
}
379377

380378
}
381-
#endif
382379

383380
} // namespace autocast
384381
} // namespace at

‎aten/src/ATen/cudnn/AutocastRNN.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -104,14 +104,12 @@ _cudnn_rnn_cast_reflatten(const Tensor & input,
104104
#endif // AT_CUDNN_ENABLED()
105105
}
106106

107-
#ifndef USE_STATIC_DISPATCH
108107
namespace {
109108
TORCH_LIBRARY_IMPL(aten, Autocast, m) {
110109
m.impl("_cudnn_rnn",
111110
TORCH_FN((&at::autocast::_cudnn_rnn_cast_reflatten)));
112111
}
113112
} // anonymous namespace
114-
#endif
115113

116114
} // namespace autocast
117115
} // namespace at

‎aten/src/ATen/function_wrapper.py

+3-107
Original file line numberDiff line numberDiff line change
@@ -146,14 +146,10 @@ def TypedDict(name, attrs, total=True): # type: ignore
146146
147147
// ${schema_string}
148148
${return_type} Tensor::${api_name}(${method_formals}) const {
149-
#ifdef USE_STATIC_DISPATCH
150-
${static_dispatch_method_body}
151-
#else
152149
static auto op = c10::Dispatcher::singleton()
153150
.findSchemaOrThrow("aten::${operator_name}", "${overload_name}")
154151
.typed<${tensor_method_cpp_signature}>();
155152
return op.call(${tensor_method_actuals});
156-
#endif
157153
}
158154
""")
159155

@@ -172,45 +168,13 @@ def TypedDict(name, attrs, total=True): # type: ignore
172168
173169
// ${schema_string}
174170
${return_type} ${api_name}(${formals}) {
175-
#ifdef USE_STATIC_DISPATCH
176-
${static_dispatch_function_body}
177-
#else
178171
static auto op = c10::Dispatcher::singleton()
179172
.findSchemaOrThrow("aten::${operator_name}", "${overload_name}")
180173
.typed<${function_cpp_signature}>();
181174
return op.call(${function_actuals});
182-
#endif
183-
}
184-
""")
185-
186-
# In order to rely on the linker to strip unused ops, it requires us to dispatch statically
187-
# in Functions.h and TensorMethods.cpp.
188-
#
189-
# NB: The default body also needs to apply a variable guard, as in some
190-
# situations what we think is a default body actually does have an
191-
# explicit derivative, and thereby would have gotten unwrapped by
192-
# the time you get to the implementation.
193-
STATIC_DISPATCH_FUNCTION_DEFAULT_BODY = CodeTemplate("""\
194-
at::AutoNonVariableTypeMode _var_guard(true);
195-
${return_call} TypeDefault::${type_wrapper_name}(${actuals});
196-
""")
197-
198-
STATIC_DISPATCH_FUNCTION_SWITCH_BODY = CodeTemplate("""\
199-
at::AutoNonVariableTypeMode _var_guard(true);
200-
${dispatch_key_init}
201-
switch (dispatchKeyToBackend(${dispatch_key_var_name})) {
202-
${static_dispatch_function_cases}
203-
default:
204-
AT_ERROR("${api_name} not implemented for ", at::toString(${dispatch_key_var_name}));
205175
}
206176
""")
207177

208-
STATIC_DISPATCH_FUNCTION_SWITCH_CASE = CodeTemplate("""\
209-
case Backend::${backend}:
210-
${return_call} ${backend}Type::${type_wrapper_name}(${actuals});
211-
break;
212-
""")
213-
214178
IFDEF_BLOCK = CodeTemplate("""\
215179
#ifdef ${ifdef_guard}
216180
${content}
@@ -246,10 +210,6 @@ def TypedDict(name, attrs, total=True): # type: ignore
246210
('ComplexDouble', 'ComplexDouble', 'ComplexDouble', False),
247211
]
248212

249-
static_dispatch_backends = ['CPU', 'QuantizedCPU', 'Vulkan']
250-
static_dispatch_backends_ifdef_guard = {'Vulkan' : 'USE_VULKAN'}
251-
252-
253213
class NYIError(Exception):
254214
"""Indicates we don't support this declaration yet"""
255215

@@ -1136,44 +1096,6 @@ def swizzle_self(f): # blegh
11361096

11371097
method_actuals = maybe_unwrap_optional_tensors(option, formals, option['method_actuals'])
11381098

1139-
if isinstance(type_method_dispatch, dict):
1140-
static_dispatch_function_cases = []
1141-
# NB: As this code is currently written, there will NEVER be
1142-
# a backend generated for variable dispatch. There is nothing
1143-
# stopping us from actually implementing this, however, if you
1144-
# really wanted variable on mobile, there's nothing stopping
1145-
# you from implementing this (however, you would have an
1146-
# annoying phase problem, since code generation for variable
1147-
# happens in tools/ which happens later than here.)
1148-
#
1149-
# If you pass in a variable to the dispatch, and variable is
1150-
# enabled, this switch will fail. This is intentional: you
1151-
# probably need to disable variable globally in the mobile
1152-
# calling code.
1153-
for backend in static_dispatch_backends:
1154-
if backend in type_method_dispatch:
1155-
static_dispatch_function_case = STATIC_DISPATCH_FUNCTION_SWITCH_CASE.substitute(
1156-
option,
1157-
backend=backend,
1158-
backend_function=type_method_dispatch[backend],
1159-
actuals=method_actuals)
1160-
if (backend in static_dispatch_backends_ifdef_guard):
1161-
static_dispatch_function_cases.append(IFDEF_BLOCK.substitute(
1162-
option,
1163-
ifdef_guard=static_dispatch_backends_ifdef_guard[backend],
1164-
content=static_dispatch_function_case))
1165-
else:
1166-
static_dispatch_function_cases.append(static_dispatch_function_case)
1167-
1168-
static_dispatch_method_body = STATIC_DISPATCH_FUNCTION_SWITCH_BODY.substitute(
1169-
option,
1170-
dispatch_key_var_name=dispatch_key_var_name,
1171-
dispatch_key_init=dispatch_key_init,
1172-
static_dispatch_function_cases=static_dispatch_function_cases)
1173-
else:
1174-
static_dispatch_method_body = STATIC_DISPATCH_FUNCTION_DEFAULT_BODY.substitute(
1175-
option, actuals=method_actuals)
1176-
11771099
# See NOTE[UnboxedOnly]
11781100
if option['use_c10_dispatcher'] == 'full':
11791101
tensor_method_actuals = option['schema_order_method_actuals']
@@ -1184,13 +1106,12 @@ def swizzle_self(f): # blegh
11841106
tensor_method_cpp_signature = option['cpp_signature']
11851107

11861108
method_definition = TENSOR_METHOD_DEFINITION.substitute(
1187-
option, static_dispatch_method_body=static_dispatch_method_body,
1109+
option,
11881110
tensor_method_actuals=tensor_method_actuals,
11891111
tensor_method_cpp_signature=tensor_method_cpp_signature
11901112
)
11911113
return FunctionCode(
1192-
declaration=TENSOR_METHOD_DECLARATION.substitute(
1193-
option, static_dispatch_method_body=static_dispatch_method_body),
1114+
declaration=TENSOR_METHOD_DECLARATION.substitute(option),
11941115
definition=method_definition)
11951116

11961117
def gen_namespace_function(option, multidispatch_formals):
@@ -1204,31 +1125,6 @@ def gen_namespace_function(option, multidispatch_formals):
12041125

12051126
actuals = maybe_unwrap_optional_tensors(option, formals, option['actuals'])
12061127

1207-
if isinstance(type_method_dispatch, dict):
1208-
static_dispatch_function_cases = []
1209-
for backend in static_dispatch_backends:
1210-
if backend in type_method_dispatch:
1211-
static_dispatch_function_case = STATIC_DISPATCH_FUNCTION_SWITCH_CASE.substitute(
1212-
option,
1213-
backend=backend,
1214-
backend_function=type_method_dispatch[backend],
1215-
actuals=actuals)
1216-
if (backend in static_dispatch_backends_ifdef_guard):
1217-
static_dispatch_function_cases.append(IFDEF_BLOCK.substitute(
1218-
option,
1219-
ifdef_guard=static_dispatch_backends_ifdef_guard[backend],
1220-
content=static_dispatch_function_case))
1221-
else:
1222-
static_dispatch_function_cases.append(static_dispatch_function_case)
1223-
static_dispatch_function_body = STATIC_DISPATCH_FUNCTION_SWITCH_BODY.substitute(
1224-
option,
1225-
dispatch_key_var_name=dispatch_key_var_name,
1226-
dispatch_key_init=dispatch_key_init,
1227-
static_dispatch_function_cases=static_dispatch_function_cases)
1228-
else:
1229-
static_dispatch_function_body = STATIC_DISPATCH_FUNCTION_DEFAULT_BODY.substitute(
1230-
option, actuals=actuals)
1231-
12321128
# See NOTE[UnboxedOnly]
12331129
if option['use_c10_dispatcher'] == 'full':
12341130
function_actuals = option['schema_order_actuals']
@@ -1239,7 +1135,7 @@ def gen_namespace_function(option, multidispatch_formals):
12391135
function_cpp_signature = option['cpp_signature']
12401136

12411137
fn_definition = FUNCTION_DEFINITION.substitute(
1242-
option, static_dispatch_function_body=static_dispatch_function_body,
1138+
option,
12431139
function_actuals=function_actuals,
12441140
function_cpp_signature=function_cpp_signature)
12451141

‎aten/src/ATen/native/TensorProperties.cpp

-6
Original file line numberDiff line numberDiff line change
@@ -53,20 +53,14 @@ bool cudnn_is_acceptable(const Tensor& self) {
5353
}
5454

5555
Tensor detach(const Tensor& self) {
56-
#ifndef USE_STATIC_DISPATCH
5756
// this just exists to give us a hook in VariableType and an entry in Declarations.yaml
5857
//AT_ERROR("detach is not implemented for Tensor");
59-
#endif
60-
// this is no-op for USE_STATIC_DISPATCH mode
6158
return self;
6259
}
6360

6461
Tensor & detach_(Tensor & self) {
65-
#ifndef USE_STATIC_DISPATCH
6662
// this just exists to give us a hook in VariableType and an entry in Declarations.yaml
6763
//AT_ERROR("detach_ is not implemented for Tensor");
68-
#endif
69-
// this is no-op for USE_STATIC_DISPATCH mode
7064
return self;
7165
}
7266

‎aten/src/ATen/templates/TensorMethods.cpp

-9
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,6 @@
1212
#include <ATen/quantized/Quantizer.h>
1313
#include <torch/csrc/WindowsTorchApiMacro.h>
1414

15-
#ifdef USE_STATIC_DISPATCH
16-
#include <ATen/TypeDefault.h>
17-
#include <ATen/CPUType.h>
18-
#include <ATen/QuantizedCPUType.h>
19-
#ifdef USE_VULKAN
20-
#include <ATen/VulkanType.h>
21-
#endif
22-
#endif
23-
2415
namespace at {
2516

2617
Tensor Tensor::cpu() const {

‎c10/macros/cmake_macros.h.in

-5
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,4 @@
1414
// to converging libtorch and caffe2 mobile builds and removing it eventually.
1515
#cmakedefine FEATURE_TORCH_MOBILE
1616

17-
// If defined it will use static dispatch for ATen operators.
18-
// Should expose this macro for projects including ATen headers to inherient
19-
// the same option.
20-
#cmakedefine USE_STATIC_DISPATCH
21-
2217
#endif // C10_MACROS_CMAKE_MACROS_H_

‎caffe2/core/macros.h.in

-1
Original file line numberDiff line numberDiff line change
@@ -79,5 +79,4 @@ static_assert(
7979
{"USE_MKLDNN", "${CAFFE2_USE_MKLDNN}"}, \
8080
{"USE_NVTX", "${CAFFE2_USE_NVTX}"}, \
8181
{"USE_TRT", "${CAFFE2_USE_TRT}"}, \
82-
{"USE_STATIC_DISPATCH", "${USE_STATIC_DISPATCH}"}, \
8382
}

‎cmake/Codegen.cmake

+1-1
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ if(INTERN_BUILD_ATEN_OPS)
167167
endif()
168168

169169
if(SELECTED_OP_LIST)
170-
if(NOT USE_STATIC_DISPATCH AND NOT OP_DEPENDENCY)
170+
if(NOT OP_DEPENDENCY)
171171
message(INFO "Use default op dependency graph .yaml file for custom build with dynamic dispatch.")
172172
set(OP_DEPENDENCY ${CMAKE_CURRENT_LIST_DIR}/../tools/code_analyzer/default_op_deps.yaml)
173173
endif()

‎cmake/Summary.cmake

-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ function(caffe2_print_configuration_summary)
2121
message(STATUS " TORCH_VERSION : ${TORCH_VERSION}")
2222
message(STATUS " CAFFE2_VERSION : ${CAFFE2_VERSION}")
2323
message(STATUS " BUILD_CAFFE2_MOBILE : ${BUILD_CAFFE2_MOBILE}")
24-
message(STATUS " USE_STATIC_DISPATCH : ${USE_STATIC_DISPATCH}")
2524
message(STATUS " BUILD_BINARY : ${BUILD_BINARY}")
2625
message(STATUS " BUILD_CUSTOM_PROTOBUF : ${BUILD_CUSTOM_PROTOBUF}")
2726
if(${CAFFE2_LINK_LOCAL_PROTOBUF})

‎scripts/build_android.sh

-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ CMAKE_ARGS=()
6161

6262
if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
6363
# Build PyTorch mobile
64-
CMAKE_ARGS+=("-DUSE_STATIC_DISPATCH=ON")
6564
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$($PYTHON -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')")
6665
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$($PYTHON -c 'import sys; print(sys.executable)')")
6766
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")

‎scripts/build_ios.sh

+1-2
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@ CMAKE_ARGS=()
1313

1414
if [ -z "${BUILD_CAFFE2_MOBILE:-}" ]; then
1515
# Build PyTorch mobile
16-
CMAKE_ARGS+=("-DUSE_STATIC_DISPATCH=ON")
1716
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')")
1817
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$(python -c 'import sys; print(sys.executable)')")
1918
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")
@@ -62,7 +61,7 @@ fi
6261

6362
# IOS_PLATFORM controls type of iOS platform (see ios-cmake)
6463
if [ -n "${IOS_PLATFORM:-}" ]; then
65-
CMAKE_ARGS+=("-DIOS_PLATFORM=${IOS_PLATFORM}")
64+
CMAKE_ARGS+=("-DIOS_PLATFORM=${IOS_PLATFORM}")
6665
if [ "${IOS_PLATFORM}" == "WATCHOS" ]; then
6766
# enable bitcode by default for watchos
6867
CMAKE_ARGS+=("-DCMAKE_C_FLAGS=-fembed-bitcode")

‎scripts/build_mobile.sh

-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ echo "Bash: $(/bin/bash --version | head -1)"
1515
echo "Caffe2 path: $CAFFE2_ROOT"
1616

1717
CMAKE_ARGS=()
18-
CMAKE_ARGS+=("-DUSE_STATIC_DISPATCH=ON")
1918
CMAKE_ARGS+=("-DCMAKE_PREFIX_PATH=$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')")
2019
CMAKE_ARGS+=("-DPYTHON_EXECUTABLE=$(python -c 'import sys; print(sys.executable)')")
2120
CMAKE_ARGS+=("-DBUILD_CUSTOM_PROTOBUF=OFF")

‎test/mobile/custom_build/build.sh

+2-30
Original file line numberDiff line numberDiff line change
@@ -4,23 +4,18 @@
44
# size for mobile devices and the flow to integrate it with a simple predictor
55
# in c++.
66
#
7-
# There are three custom build types:
7+
# Supported custom build types:
88
#
99
# 1. `TEST_DEFAULT_BUILD=1 ./build.sh` - it is similar to the prebuilt libtorch
1010
# libraries released for Android and iOS (same CMake build options + host
1111
# toolchain), which doesn't contain autograd function nor backward ops thus is
1212
# smaller than full LibTorch.
1313
#
14-
# 2. `TEST_CUSTOM_BUILD_STATIC=1 ./build.sh` - it further optimizes libtorch
14+
# 2. `TEST_CUSTOM_BUILD_DYNAMIC=1 ./build.sh` - it further optimizes libtorch
1515
# size by only including ops used by a specific model.
16-
#
17-
# 3. `TEST_CUSTOM_BUILD_DYNAMIC=1 ./build.sh` - similar as 2) except that it
18-
# relies on the op dependency graph (instead of static dispatch) to calculate
19-
# and keep all transitively dependent ops by the model.
2016
# Note that LLVM_DIR environment variable should be set to the location of
2117
# LLVM-dev toolchain.
2218
#
23-
# Type 2) will be deprecated by type 3) in the future.
2419
###############################################################################
2520

2621
set -ex -o pipefail
@@ -59,25 +54,13 @@ run_default_build() {
5954
"${SRC_ROOT}/scripts/build_mobile.sh"
6055
}
6156

62-
run_custom_build_with_static_dispatch() {
63-
LIBTORCH_BUILD_ROOT="${BUILD_ROOT}/build_custom_libtorch_static"
64-
LIBTORCH_INSTALL_PREFIX="${LIBTORCH_BUILD_ROOT}/install"
65-
66-
BUILD_ROOT="${LIBTORCH_BUILD_ROOT}" \
67-
"${SRC_ROOT}/scripts/build_mobile.sh" \
68-
-DCMAKE_CXX_FLAGS="-DSTRIP_ERROR_MESSAGES" \
69-
-DUSE_STATIC_DISPATCH=ON \
70-
-DSELECTED_OP_LIST="${ROOT_OPS}"
71-
}
72-
7357
run_custom_build_with_dynamic_dispatch() {
7458
LIBTORCH_BUILD_ROOT="${BUILD_ROOT}/build_custom_libtorch_dynamic"
7559
LIBTORCH_INSTALL_PREFIX="${LIBTORCH_BUILD_ROOT}/install"
7660

7761
BUILD_ROOT="${LIBTORCH_BUILD_ROOT}" \
7862
"${SRC_ROOT}/scripts/build_mobile.sh" \
7963
-DCMAKE_CXX_FLAGS="-DSTRIP_ERROR_MESSAGES" \
80-
-DUSE_STATIC_DISPATCH=OFF \
8164
-DSELECTED_OP_LIST="${ROOT_OPS}" \
8265
-DOP_DEPENDENCY="${OP_DEPENDENCY}"
8366
}
@@ -115,13 +98,6 @@ test_default_build() {
11598
run_predictor
11699
}
117100

118-
test_custom_build_with_static_dispatch() {
119-
prepare_model_and_dump_root_ops
120-
run_custom_build_with_static_dispatch
121-
build_predictor
122-
run_predictor
123-
}
124-
125101
test_custom_build_with_dynamic_dispatch() {
126102
prepare_model_and_dump_root_ops
127103
generate_op_dependency_graph
@@ -134,10 +110,6 @@ if [ -n "${TEST_DEFAULT_BUILD}" ]; then
134110
test_default_build
135111
fi
136112

137-
if [ -n "${TEST_CUSTOM_BUILD_STATIC}" ]; then
138-
test_custom_build_with_static_dispatch
139-
fi
140-
141113
if [ -n "${TEST_CUSTOM_BUILD_DYNAMIC}" ]; then
142114
test_custom_build_with_dynamic_dispatch
143115
fi

‎tools/code_analyzer/build.sh

-1
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,6 @@ build_torch_mobile() {
6262

6363
BUILD_ROOT="${TORCH_BUILD_ROOT}" "${SRC_ROOT}/scripts/build_mobile.sh" \
6464
-DCMAKE_CXX_FLAGS="-S -emit-llvm -DSTRIP_ERROR_MESSAGES" \
65-
-DUSE_STATIC_DISPATCH=OFF \
6665
${MOBILE_BUILD_FLAGS}
6766
}
6867

‎tools/jit/gen_unboxing_wrappers.py

-4
Original file line numberDiff line numberDiff line change
@@ -166,11 +166,7 @@ def from_ivalue(arg, value):
166166
.layout(${layout})
167167
.device(${device})
168168
.pinned_memory(${pin_memory});
169-
#ifdef USE_STATIC_DISPATCH
170-
auto result_ = at::${name}(${args_with_tensor_options});
171-
#else
172169
auto result_ = torch::${name}(${args_with_tensor_options});
173-
#endif
174170
""")
175171
CALL_METHOD_WITH_TENSOR_OPTIONS = CodeTemplate("""\
176172
const auto options = TensorOptions()

‎torch/csrc/jit/runtime/register_c10_ops.cpp

-7
Original file line numberDiff line numberDiff line change
@@ -122,14 +122,7 @@ Operator createOperatorFromC10_withTracingHandledHere(
122122
jit::tracer::setTracingState(nullptr);
123123
}
124124

125-
#ifdef USE_STATIC_DISPATCH
126-
{
127-
at::AutoNonVariableTypeMode non_var_type_mode(true);
128-
op.callBoxed(stack);
129-
}
130-
#else
131125
op.callBoxed(stack);
132-
#endif // USE_STATIC_DISPATCH
133126

134127
if (tracer_state) {
135128
jit::tracer::setTracingState(std::move(tracer_state));

0 commit comments

Comments
 (0)
Please sign in to comment.