From 7f52f0365ba797baf69c5913bbbaa127d93f49e0 Mon Sep 17 00:00:00 2001 From: lin-hitonami Date: Fri, 8 Jul 2022 11:33:09 +0800 Subject: [PATCH 1/4] [llvm] [refactor] (Decomp of #5251 6/n) Let ModuleToFunctionConverter support multiple modules --- taichi/codegen/codegen.cpp | 14 +++++ taichi/codegen/codegen.h | 23 +++++++ taichi/codegen/cpu/codegen_cpu.cpp | 57 +++++++++++++++++ taichi/codegen/cpu/codegen_cpu.h | 20 +++++- taichi/codegen/cuda/codegen_cuda.cpp | 21 +++---- taichi/codegen/cuda/codegen_cuda.h | 12 ++-- taichi/codegen/llvm/codegen_llvm.cpp | 62 ------------------- taichi/codegen/llvm/codegen_llvm.h | 33 +++------- taichi/runtime/cpu/aot_module_loader_impl.cpp | 10 +-- .../runtime/cuda/aot_module_loader_impl.cpp | 5 +- 10 files changed, 144 insertions(+), 113 deletions(-) diff --git a/taichi/codegen/codegen.cpp b/taichi/codegen/codegen.cpp index 7a3370e97226a..bf051e5894f40 100644 --- a/taichi/codegen/codegen.cpp +++ b/taichi/codegen/codegen.cpp @@ -51,5 +51,19 @@ std::unique_ptr KernelCodeGen::create(Arch arch, TI_ERROR("Llvm disabled"); #endif } +#ifdef TI_WITH_LLVM + +ModuleToFunctionConverter::ModuleToFunctionConverter( + TaichiLLVMContext *tlctx, + LlvmRuntimeExecutor *executor) + : tlctx_(tlctx), executor_(executor) { +} +FunctionType ModuleToFunctionConverter::convert( + const Kernel *kernel, + std::vector &&data) const { + return convert(kernel->name, infer_launch_args(kernel), std::move(data)); +} + +#endif TLANG_NAMESPACE_END diff --git a/taichi/codegen/codegen.h b/taichi/codegen/codegen.h index 607a0222fad48..bda8e0a203c5c 100644 --- a/taichi/codegen/codegen.h +++ b/taichi/codegen/codegen.h @@ -1,6 +1,7 @@ // Driver class for kernel code generators. #pragma once +#include #include "taichi/ir/ir.h" #include "taichi/program/program.h" #ifdef TI_WITH_LLVM @@ -36,4 +37,26 @@ class KernelCodeGen { #endif }; +#ifdef TI_WITH_LLVM + +class ModuleToFunctionConverter { + public: + explicit ModuleToFunctionConverter(TaichiLLVMContext *tlctx, + LlvmRuntimeExecutor *program); + + virtual ~ModuleToFunctionConverter() = default; + + virtual FunctionType convert(const std::string &kernel_name, + const std::vector &args, + std::vector &&data) const = 0; + + virtual FunctionType convert(const Kernel *kernel, + std::vector &&data) const; + + protected: + TaichiLLVMContext *tlctx_{nullptr}; + LlvmRuntimeExecutor *executor_{nullptr}; +}; + +#endif TLANG_NAMESPACE_END diff --git a/taichi/codegen/cpu/codegen_cpu.cpp b/taichi/codegen/cpu/codegen_cpu.cpp index 79a62a6b093aa..085de99ed59f9 100644 --- a/taichi/codegen/cpu/codegen_cpu.cpp +++ b/taichi/codegen/cpu/codegen_cpu.cpp @@ -209,6 +209,18 @@ class CodeGenLLVMCPU : public CodeGenLLVM { TI_NOT_IMPLEMENTED } } + + FunctionType gen() override { + auto compiled_res = run_compilation(); + + CPUModuleToFunctionConverter converter{ + tlctx, get_llvm_program(prog)->get_runtime_executor()}; + std::vector data; + data.push_back(std::move(compiled_res)); + return converter.convert(kernel, std::move(data)); + } + + }; } // namespace @@ -219,6 +231,51 @@ std::unique_ptr CodeGenCPU::make_codegen_llvm(Kernel *kernel, IRNode *ir) { return std::make_unique(kernel, ir); } + +FunctionType CPUModuleToFunctionConverter::convert( + const std::string &kernel_name, + const std::vector &args, + std::vector &&data) const { + for (auto &datum : data) { + tlctx_->add_module(std::move(datum.module)); + } + + using TaskFunc = int32 (*)(void *); + std::vector task_funcs; + task_funcs.reserve(data.size()); + for (auto &datum : data) { + for (auto &task : datum.tasks) { + auto *func_ptr = tlctx_->lookup_function_pointer(task.name); + TI_ASSERT_INFO(func_ptr, "Offloaded datum function {} not found", + task.name); + task_funcs.push_back((TaskFunc)(func_ptr)); + } + } + // Do NOT capture `this`... + return [executor = this->executor_, args, kernel_name, + task_funcs](RuntimeContext &context) { + TI_TRACE("Launching kernel {}", kernel_name); + // For taichi ndarrays, context.args saves pointer to its + // |DeviceAllocation|, CPU backend actually want to use the raw ptr here. + for (int i = 0; i < (int)args.size(); i++) { + if (args[i].is_array && + context.device_allocation_type[i] != + RuntimeContext::DevAllocType::kNone && + context.array_runtime_sizes[i] > 0) { + DeviceAllocation *ptr = + static_cast(context.get_arg(i)); + uint64 host_ptr = (uint64)executor->get_ndarray_alloc_info_ptr(*ptr); + context.set_arg(i, host_ptr); + context.set_array_device_allocation_type( + i, RuntimeContext::DevAllocType::kNone); + } + } + for (auto task : task_funcs) { + task(&context); + } + }; +} + #endif // TI_WITH_LLVM FunctionType CodeGenCPU::codegen() { diff --git a/taichi/codegen/cpu/codegen_cpu.h b/taichi/codegen/cpu/codegen_cpu.h index 0994b176bc2fb..ab3e81b0ea0e4 100644 --- a/taichi/codegen/cpu/codegen_cpu.h +++ b/taichi/codegen/cpu/codegen_cpu.h @@ -10,7 +10,7 @@ TLANG_NAMESPACE_BEGIN class CodeGenCPU : public KernelCodeGen { - public: +public: CodeGenCPU(Kernel *kernel, IRNode *ir = nullptr) : KernelCodeGen(kernel, ir) { } @@ -23,4 +23,22 @@ class CodeGenCPU : public KernelCodeGen { FunctionType codegen() override; }; +#ifdef TI_WITH_LLVM + +class CPUModuleToFunctionConverter : public ModuleToFunctionConverter { + public: + explicit CPUModuleToFunctionConverter(TaichiLLVMContext *tlctx, + LlvmRuntimeExecutor *executor) + : ModuleToFunctionConverter(tlctx, executor) { + } + + using ModuleToFunctionConverter::convert; + + FunctionType convert(const std::string &kernel_name, + const std::vector &args, + std::vector &&data) const override; +}; + +#endif + TLANG_NAMESPACE_END diff --git a/taichi/codegen/cuda/codegen_cuda.cpp b/taichi/codegen/cuda/codegen_cuda.cpp index 063679a7068be..3ef4c2b20d046 100644 --- a/taichi/codegen/cuda/codegen_cuda.cpp +++ b/taichi/codegen/cuda/codegen_cuda.cpp @@ -41,9 +41,9 @@ class CodeGenLLVMCUDA : public CodeGenLLVM { auto *llvm_prog = get_llvm_program(kernel->program); CUDAModuleToFunctionConverter converter{tlctx, llvm_prog->get_runtime_executor()}; - - return converter.convert(this->kernel, std::move(compiled_res.module), - std::move(compiled_res.tasks)); + std::vector data; + data.push_back(std::move(compiled_res)); + return converter.convert(this->kernel, std::move(data)); } llvm::Value *create_print(std::string tag, @@ -738,11 +738,14 @@ FunctionType CodeGenCUDA::codegen() { return CodeGenLLVMCUDA(kernel, ir).gen(); } +#ifdef TI_WITH_LLVM + FunctionType CUDAModuleToFunctionConverter::convert( const std::string &kernel_name, const std::vector &args, - std::unique_ptr mod, - std::vector &&tasks) const { + std::vector &&data) const { + auto &mod = data[0].module; + auto &tasks = data[0].tasks; #ifdef TI_WITH_CUDA for (const auto &task : tasks) { llvm::Function *func = mod->getFunction(task.name); @@ -847,12 +850,6 @@ FunctionType CUDAModuleToFunctionConverter::convert( #endif // TI_WITH_CUDA } -FunctionType CUDAModuleToFunctionConverter::convert( - const Kernel *kernel, - std::unique_ptr mod, - std::vector &&tasks) const { - return convert(kernel->name, infer_launch_args(kernel), std::move(mod), - std::move(tasks)); -} +#endif TLANG_NAMESPACE_END diff --git a/taichi/codegen/cuda/codegen_cuda.h b/taichi/codegen/cuda/codegen_cuda.h index 7c11f17d27d34..a252e7e6729d3 100644 --- a/taichi/codegen/cuda/codegen_cuda.h +++ b/taichi/codegen/cuda/codegen_cuda.h @@ -22,21 +22,21 @@ class CodeGenCUDA : public KernelCodeGen { FunctionType codegen() override; }; +#ifdef TI_WITH_LLVM + class CUDAModuleToFunctionConverter : public ModuleToFunctionConverter { public: explicit CUDAModuleToFunctionConverter(TaichiLLVMContext *tlctx, LlvmRuntimeExecutor *executor) : ModuleToFunctionConverter(tlctx, executor) { } + using ModuleToFunctionConverter::convert; FunctionType convert(const std::string &kernel_name, const std::vector &args, - std::unique_ptr mod, - std::vector &&tasks) const override; - - FunctionType convert(const Kernel *kernel, - std::unique_ptr mod, - std::vector &&tasks) const override; + std::vector &&data) const override; }; +#endif + TLANG_NAMESPACE_END diff --git a/taichi/codegen/llvm/codegen_llvm.cpp b/taichi/codegen/llvm/codegen_llvm.cpp index 8ddc91261ee75..9955853f41ff1 100644 --- a/taichi/codegen/llvm/codegen_llvm.cpp +++ b/taichi/codegen/llvm/codegen_llvm.cpp @@ -2374,14 +2374,6 @@ bool CodeGenLLVM::maybe_read_compilation_from_cache( return true; } -FunctionType CodeGenLLVM::gen() { - auto compiled_res = run_compilation(); - - ModuleToFunctionConverter converter{ - tlctx, get_llvm_program(prog)->get_runtime_executor()}; - return converter.convert(kernel, std::move(compiled_res.module), - std::move(compiled_res.tasks)); -} llvm::Value *CodeGenLLVM::create_xlogue(std::unique_ptr &block) { llvm::Value *xlogue; @@ -2457,60 +2449,6 @@ void CodeGenLLVM::cache_module(const std::string &kernel_key) { std::move(offloaded_task_list)); } -ModuleToFunctionConverter::ModuleToFunctionConverter( - TaichiLLVMContext *tlctx, - LlvmRuntimeExecutor *executor) - : tlctx_(tlctx), executor_(executor) { -} - -FunctionType ModuleToFunctionConverter::convert( - const std::string &kernel_name, - const std::vector &args, - std::unique_ptr mod, - std::vector &&tasks) const { - tlctx_->add_module(std::move(mod)); - - using TaskFunc = int32 (*)(void *); - std::vector task_funcs; - task_funcs.reserve(tasks.size()); - for (auto &task : tasks) { - auto *func_ptr = tlctx_->lookup_function_pointer(task.name); - TI_ASSERT_INFO(func_ptr, "Offloaded task function {} not found", task.name); - task_funcs.push_back((TaskFunc)(func_ptr)); - } - // Do NOT capture `this`... - return [executor = this->executor_, args, kernel_name, - task_funcs](RuntimeContext &context) { - TI_TRACE("Launching kernel {}", kernel_name); - // For taichi ndarrays, context.args saves pointer to its - // |DeviceAllocation|, CPU backend actually want to use the raw ptr here. - for (int i = 0; i < (int)args.size(); i++) { - if (args[i].is_array && - context.device_allocation_type[i] != - RuntimeContext::DevAllocType::kNone && - context.array_runtime_sizes[i] > 0) { - DeviceAllocation *ptr = - static_cast(context.get_arg(i)); - uint64 host_ptr = (uint64)executor->get_ndarray_alloc_info_ptr(*ptr); - context.set_arg(i, host_ptr); - context.set_array_device_allocation_type( - i, RuntimeContext::DevAllocType::kNone); - } - } - for (auto task : task_funcs) { - task(&context); - } - }; -} - -FunctionType ModuleToFunctionConverter::convert( - const Kernel *kernel, - std::unique_ptr mod, - std::vector &&tasks) const { - return convert(kernel->name, infer_launch_args(kernel), std::move(mod), - std::move(tasks)); -} - TLANG_NAMESPACE_END #endif // #ifdef TI_WITH_LLVM diff --git a/taichi/codegen/llvm/codegen_llvm.h b/taichi/codegen/llvm/codegen_llvm.h index 15e98019da4cf..d2d1928077398 100644 --- a/taichi/codegen/llvm/codegen_llvm.h +++ b/taichi/codegen/llvm/codegen_llvm.h @@ -45,6 +45,13 @@ class FunctionCreationGuard { struct LLVMCompiledData { std::vector tasks; std::unique_ptr module{nullptr}; + LLVMCompiledData() = default; + LLVMCompiledData(LLVMCompiledData &&) = default; + LLVMCompiledData(std::vector tasks, + std::unique_ptr module) + : tasks(std::move(tasks)), module(std::move(module)) { + } + TI_IO_DEF(tasks); }; class CodeGenLLVM : public IRVisitor, public LLVMModuleBuilder { @@ -134,7 +141,7 @@ class CodeGenLLVM : public IRVisitor, public LLVMModuleBuilder { LLVMCompiledData run_compilation(); // TODO: This function relies largely on `run_compilation()`. Name it better. - virtual FunctionType gen(); + virtual FunctionType gen() { TI_NOT_IMPLEMENTED }; virtual bool supports_offline_cache() const { return false; @@ -410,30 +417,6 @@ class CodeGenLLVM : public IRVisitor, public LLVMModuleBuilder { void cache_module(const std::string &kernel_key); }; -class LlvmRuntimeExecutor; - -// TODO: Make ModuleToFunctionConverter abstract, -// Move CPU implementation to "taichi/backend/cpu/" -class ModuleToFunctionConverter { - public: - explicit ModuleToFunctionConverter(TaichiLLVMContext *tlctx, - LlvmRuntimeExecutor *executor); - - virtual ~ModuleToFunctionConverter() = default; - - virtual FunctionType convert(const std::string &kernel_name, - const std::vector &args, - std::unique_ptr mod, - std::vector &&tasks) const; - - virtual FunctionType convert(const Kernel *kernel, - std::unique_ptr mod, - std::vector &&tasks) const; - - protected: - TaichiLLVMContext *tlctx_{nullptr}; - LlvmRuntimeExecutor *executor_{nullptr}; -}; } // namespace lang } // namespace taichi diff --git a/taichi/runtime/cpu/aot_module_loader_impl.cpp b/taichi/runtime/cpu/aot_module_loader_impl.cpp index 03fec02000694..0471c72f1a3f5 100644 --- a/taichi/runtime/cpu/aot_module_loader_impl.cpp +++ b/taichi/runtime/cpu/aot_module_loader_impl.cpp @@ -3,7 +3,7 @@ #include "taichi/runtime/llvm/llvm_offline_cache.h" #include "taichi/runtime/llvm/llvm_runtime_executor.h" -#include "taichi/codegen/llvm/codegen_llvm.h" +#include "taichi/codegen/cpu/codegen_cpu.h" namespace taichi { namespace lang { @@ -23,10 +23,10 @@ class AotModuleImpl : public LlvmAotModule { TI_ASSERT(arch == Arch::x64 || arch == Arch::arm64); auto *tlctx = executor_->get_llvm_context(arch); - ModuleToFunctionConverter converter{tlctx, executor_}; - - return converter.convert(name, loaded.args, std::move(loaded.owned_module), - std::move(loaded.offloaded_task_list)); + CPUModuleToFunctionConverter converter{tlctx, executor_}; + std::vector data; + data.emplace_back(std::move(loaded.offloaded_task_list), std::move(loaded.owned_module)); + return converter.convert(name, loaded.args, std::move(data)); } std::unique_ptr make_new_kernel_template( diff --git a/taichi/runtime/cuda/aot_module_loader_impl.cpp b/taichi/runtime/cuda/aot_module_loader_impl.cpp index bd9340312364b..72de1ad6d4d55 100644 --- a/taichi/runtime/cuda/aot_module_loader_impl.cpp +++ b/taichi/runtime/cuda/aot_module_loader_impl.cpp @@ -25,8 +25,9 @@ class AotModuleImpl : public LlvmAotModule { CUDAModuleToFunctionConverter converter{tlctx, executor_}; - return converter.convert(name, loaded.args, std::move(loaded.owned_module), - std::move(loaded.offloaded_task_list)); + std::vector data; + data.emplace_back(std::move(loaded.offloaded_task_list), std::move(loaded.owned_module)); + return converter.convert(name, loaded.args, std::move(data)); } std::unique_ptr make_new_kernel_template( From 4269e0c17e10c72323f0c92b5d2dcefe09ee3c6f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 8 Jul 2022 03:35:05 +0000 Subject: [PATCH 2/4] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- taichi/codegen/cpu/codegen_cpu.cpp | 2 -- taichi/codegen/cpu/codegen_cpu.h | 2 +- taichi/codegen/llvm/codegen_llvm.cpp | 1 - taichi/codegen/llvm/codegen_llvm.h | 3 +-- taichi/runtime/cpu/aot_module_loader_impl.cpp | 3 ++- taichi/runtime/cuda/aot_module_loader_impl.cpp | 3 ++- 6 files changed, 6 insertions(+), 8 deletions(-) diff --git a/taichi/codegen/cpu/codegen_cpu.cpp b/taichi/codegen/cpu/codegen_cpu.cpp index 085de99ed59f9..ba48784c07aa4 100644 --- a/taichi/codegen/cpu/codegen_cpu.cpp +++ b/taichi/codegen/cpu/codegen_cpu.cpp @@ -219,8 +219,6 @@ class CodeGenLLVMCPU : public CodeGenLLVM { data.push_back(std::move(compiled_res)); return converter.convert(kernel, std::move(data)); } - - }; } // namespace diff --git a/taichi/codegen/cpu/codegen_cpu.h b/taichi/codegen/cpu/codegen_cpu.h index ab3e81b0ea0e4..7aa0369a29d23 100644 --- a/taichi/codegen/cpu/codegen_cpu.h +++ b/taichi/codegen/cpu/codegen_cpu.h @@ -10,7 +10,7 @@ TLANG_NAMESPACE_BEGIN class CodeGenCPU : public KernelCodeGen { -public: + public: CodeGenCPU(Kernel *kernel, IRNode *ir = nullptr) : KernelCodeGen(kernel, ir) { } diff --git a/taichi/codegen/llvm/codegen_llvm.cpp b/taichi/codegen/llvm/codegen_llvm.cpp index 9955853f41ff1..4c5a6f186d0c1 100644 --- a/taichi/codegen/llvm/codegen_llvm.cpp +++ b/taichi/codegen/llvm/codegen_llvm.cpp @@ -2374,7 +2374,6 @@ bool CodeGenLLVM::maybe_read_compilation_from_cache( return true; } - llvm::Value *CodeGenLLVM::create_xlogue(std::unique_ptr &block) { llvm::Value *xlogue; diff --git a/taichi/codegen/llvm/codegen_llvm.h b/taichi/codegen/llvm/codegen_llvm.h index d2d1928077398..dcf7d552c653e 100644 --- a/taichi/codegen/llvm/codegen_llvm.h +++ b/taichi/codegen/llvm/codegen_llvm.h @@ -141,7 +141,7 @@ class CodeGenLLVM : public IRVisitor, public LLVMModuleBuilder { LLVMCompiledData run_compilation(); // TODO: This function relies largely on `run_compilation()`. Name it better. - virtual FunctionType gen() { TI_NOT_IMPLEMENTED }; + virtual FunctionType gen(){TI_NOT_IMPLEMENTED}; virtual bool supports_offline_cache() const { return false; @@ -417,7 +417,6 @@ class CodeGenLLVM : public IRVisitor, public LLVMModuleBuilder { void cache_module(const std::string &kernel_key); }; - } // namespace lang } // namespace taichi diff --git a/taichi/runtime/cpu/aot_module_loader_impl.cpp b/taichi/runtime/cpu/aot_module_loader_impl.cpp index 0471c72f1a3f5..092eff1040098 100644 --- a/taichi/runtime/cpu/aot_module_loader_impl.cpp +++ b/taichi/runtime/cpu/aot_module_loader_impl.cpp @@ -25,7 +25,8 @@ class AotModuleImpl : public LlvmAotModule { CPUModuleToFunctionConverter converter{tlctx, executor_}; std::vector data; - data.emplace_back(std::move(loaded.offloaded_task_list), std::move(loaded.owned_module)); + data.emplace_back(std::move(loaded.offloaded_task_list), + std::move(loaded.owned_module)); return converter.convert(name, loaded.args, std::move(data)); } diff --git a/taichi/runtime/cuda/aot_module_loader_impl.cpp b/taichi/runtime/cuda/aot_module_loader_impl.cpp index 72de1ad6d4d55..65690522d4741 100644 --- a/taichi/runtime/cuda/aot_module_loader_impl.cpp +++ b/taichi/runtime/cuda/aot_module_loader_impl.cpp @@ -26,7 +26,8 @@ class AotModuleImpl : public LlvmAotModule { CUDAModuleToFunctionConverter converter{tlctx, executor_}; std::vector data; - data.emplace_back(std::move(loaded.offloaded_task_list), std::move(loaded.owned_module)); + data.emplace_back(std::move(loaded.offloaded_task_list), + std::move(loaded.owned_module)); return converter.convert(name, loaded.args, std::move(data)); } From 101497813c0aadfdbd2ae9d3380cb1b437bcc8c3 Mon Sep 17 00:00:00 2001 From: Lin Jiang <90667349+lin-hitonami@users.noreply.github.com> Date: Fri, 8 Jul 2022 14:04:37 +0800 Subject: [PATCH 3/4] Update codegen_cuda.cpp --- taichi/codegen/cuda/codegen_cuda.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/taichi/codegen/cuda/codegen_cuda.cpp b/taichi/codegen/cuda/codegen_cuda.cpp index 3ef4c2b20d046..cd3299a51ac79 100644 --- a/taichi/codegen/cuda/codegen_cuda.cpp +++ b/taichi/codegen/cuda/codegen_cuda.cpp @@ -738,8 +738,6 @@ FunctionType CodeGenCUDA::codegen() { return CodeGenLLVMCUDA(kernel, ir).gen(); } -#ifdef TI_WITH_LLVM - FunctionType CUDAModuleToFunctionConverter::convert( const std::string &kernel_name, const std::vector &args, @@ -850,6 +848,4 @@ FunctionType CUDAModuleToFunctionConverter::convert( #endif // TI_WITH_CUDA } -#endif - TLANG_NAMESPACE_END From 9d42a1e614c3f0f78653b32af7a4248e521cdf6c Mon Sep 17 00:00:00 2001 From: Lin Jiang <90667349+lin-hitonami@users.noreply.github.com> Date: Fri, 8 Jul 2022 15:05:44 +0800 Subject: [PATCH 4/4] Update codegen_cuda.h --- taichi/codegen/cuda/codegen_cuda.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/taichi/codegen/cuda/codegen_cuda.h b/taichi/codegen/cuda/codegen_cuda.h index a252e7e6729d3..ab1a47f4baceb 100644 --- a/taichi/codegen/cuda/codegen_cuda.h +++ b/taichi/codegen/cuda/codegen_cuda.h @@ -22,8 +22,6 @@ class CodeGenCUDA : public KernelCodeGen { FunctionType codegen() override; }; -#ifdef TI_WITH_LLVM - class CUDAModuleToFunctionConverter : public ModuleToFunctionConverter { public: explicit CUDAModuleToFunctionConverter(TaichiLLVMContext *tlctx, @@ -37,6 +35,4 @@ class CUDAModuleToFunctionConverter : public ModuleToFunctionConverter { std::vector &&data) const override; }; -#endif - TLANG_NAMESPACE_END