@@ -782,6 +782,12 @@ def convert_arg_line_to_args(self, arg_line):
782
782
parser .add_argument ("--use_triton_kernel" , action = "store_true" , help = "Use triton compiled kernels" )
783
783
parser .add_argument ("--use_lock_free_queue" , action = "store_true" , help = "Use lock-free task queue for threadpool." )
784
784
785
+ parser .add_argument (
786
+ "--enable_generic_interface" ,
787
+ action = "store_true" ,
788
+ help = "build ORT shared library and compatible bridge with primary EPs(tensorRT, OpenVino, Qnn, vitisai) but not tests" ,
789
+ )
790
+
785
791
if not is_windows ():
786
792
parser .add_argument (
787
793
"--allow_running_as_root" ,
@@ -1042,6 +1048,12 @@ def generate_build_tree(
1042
1048
"-Donnxruntime_USE_TENSORRT=" + ("ON" if args .use_tensorrt else "OFF" ),
1043
1049
"-Donnxruntime_USE_TENSORRT_BUILTIN_PARSER="
1044
1050
+ ("ON" if args .use_tensorrt_builtin_parser and not args .use_tensorrt_oss_parser else "OFF" ),
1051
+ # interface variables are used only for building onnxruntime/onnxruntime_shared.dll but not EPs
1052
+ "-Donnxruntime_USE_TENSORRT_INTERFACE=" + ("ON" if args .enable_generic_interface else "OFF" ),
1053
+ "-Donnxruntime_USE_CUDA_INTERFACE=" + ("ON" if args .enable_generic_interface else "OFF" ),
1054
+ "-Donnxruntime_USE_OPENVINO_INTERFACE=" + ("ON" if args .enable_generic_interface else "OFF" ),
1055
+ "-Donnxruntime_USE_VITISAI_INTERFACE=" + ("ON" if args .enable_generic_interface else "OFF" ),
1056
+ "-Donnxruntime_USE_QNN_INTERFACE=" + ("ON" if args .enable_generic_interface else "OFF" ),
1045
1057
# set vars for migraphx
1046
1058
"-Donnxruntime_USE_MIGRAPHX=" + ("ON" if args .use_migraphx else "OFF" ),
1047
1059
"-Donnxruntime_DISABLE_CONTRIB_OPS=" + ("ON" if args .disable_contrib_ops else "OFF" ),
@@ -1372,6 +1384,8 @@ def generate_build_tree(
1372
1384
cmake_args += ["-Donnxruntime_BUILD_QNN_EP_STATIC_LIB=ON" ]
1373
1385
if args .android and args .use_qnn != "static_lib" :
1374
1386
raise BuildError ("Only support Android + QNN builds with QNN EP built as a static library." )
1387
+ if args .use_qnn == "static_lib" and args .enable_generic_interface :
1388
+ raise BuildError ("Generic ORT interface only supported with QNN EP built as a shared library." )
1375
1389
1376
1390
if args .use_coreml :
1377
1391
cmake_args += ["-Donnxruntime_USE_COREML=ON" ]
@@ -1529,6 +1543,12 @@ def generate_build_tree(
1529
1543
"-Donnxruntime_USE_FULL_PROTOBUF=ON" ,
1530
1544
]
1531
1545
1546
+ # When this flag is enabled, that means we only build ONNXRuntime shared library, expecting some compatible EP
1547
+ # shared lib being build in a seperate process. So we skip the test for now as ONNXRuntime shared lib built under
1548
+ # this flag is not expected to work alone
1549
+ if args .enable_generic_interface :
1550
+ cmake_args += ["-Donnxruntime_BUILD_UNIT_TESTS=OFF" ]
1551
+
1532
1552
if args .enable_lazy_tensor :
1533
1553
import torch
1534
1554
@@ -2649,6 +2669,9 @@ def main():
2649
2669
# Disable ONNX Runtime's builtin memory checker
2650
2670
args .disable_memleak_checker = True
2651
2671
2672
+ if args .enable_generic_interface :
2673
+ args .test = False
2674
+
2652
2675
# If there was no explicit argument saying what to do, default
2653
2676
# to update, build and test (for native builds).
2654
2677
if not (args .update or args .clean or args .build or args .test or args .gen_doc ):
@@ -2752,7 +2775,10 @@ def main():
2752
2775
source_dir = os .path .normpath (os .path .join (script_dir , ".." , ".." ))
2753
2776
2754
2777
# if using cuda, setup cuda paths and env vars
2755
- cuda_home , cudnn_home = setup_cuda_vars (args )
2778
+ cuda_home = ""
2779
+ cudnn_home = ""
2780
+ if args .use_cuda :
2781
+ cuda_home , cudnn_home = setup_cuda_vars (args )
2756
2782
2757
2783
mpi_home = args .mpi_home
2758
2784
nccl_home = args .nccl_home
@@ -2765,10 +2791,14 @@ def main():
2765
2791
armnn_home = args .armnn_home
2766
2792
armnn_libs = args .armnn_libs
2767
2793
2768
- qnn_home = args .qnn_home
2794
+ qnn_home = ""
2795
+ if args .use_qnn :
2796
+ qnn_home = args .qnn_home
2769
2797
2770
2798
# if using tensorrt, setup tensorrt paths
2771
- tensorrt_home = setup_tensorrt_vars (args )
2799
+ tensorrt_home = ""
2800
+ if args .use_tensorrt :
2801
+ tensorrt_home = setup_tensorrt_vars (args )
2772
2802
2773
2803
# if using migraphx, setup migraphx paths
2774
2804
migraphx_home = setup_migraphx_vars (args )
@@ -2853,9 +2883,9 @@ def main():
2853
2883
toolset = "host=" + host_arch + ",version=" + args .msvc_toolset
2854
2884
else :
2855
2885
toolset = "host=" + host_arch
2856
- if args .cuda_version :
2886
+ if args .use_cuda and args . cuda_version :
2857
2887
toolset += ",cuda=" + args .cuda_version
2858
- elif args .cuda_home :
2888
+ elif args .use_cuda and args . cuda_home :
2859
2889
toolset += ",cuda=" + args .cuda_home
2860
2890
if args .windows_sdk_version :
2861
2891
target_arch += ",version=" + args .windows_sdk_version
0 commit comments