From 4d7240040e668ebdd72c3a9fb59f11406b2b2621 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Mon, 22 Aug 2022 16:55:37 +0800 Subject: [PATCH 1/5] [bug] Improve error message with GlobalPtrStmt indexing --- python/taichi/lang/any_array.py | 6 +++-- python/taichi/lang/impl.py | 19 +++++++++++---- python/taichi/lang/matrix.py | 14 +++++++---- python/taichi/lang/mesh.py | 6 +++-- python/taichi/lang/simt/block.py | 3 ++- taichi/ir/frontend_ir.cpp | 23 ++++++++++++++----- taichi/ir/frontend_ir.h | 14 +++++++---- taichi/math/svd.h | 3 ++- taichi/program/program.cpp | 6 +++-- taichi/python/export_lang.cpp | 13 +++++++---- taichi/transforms/auto_diff.cpp | 3 +++ taichi/transforms/check_out_of_bound.cpp | 5 ++++ taichi/transforms/lower_ast.cpp | 2 ++ taichi/transforms/simplify.cpp | 9 +++++--- taichi/transforms/type_check.cpp | 3 +++ taichi/transforms/utils.cpp | 5 ++++ taichi/transforms/utils.h | 3 +++ tests/cpp/ir/frontend_type_inference_test.cpp | 2 +- 18 files changed, 103 insertions(+), 36 deletions(-) diff --git a/python/taichi/lang/any_array.py b/python/taichi/lang/any_array.py index b27fe9890b303..7f6c748b56340 100644 --- a/python/taichi/lang/any_array.py +++ b/python/taichi/lang/any_array.py @@ -1,4 +1,5 @@ from taichi._lib import core as _ti_core +from taichi.lang import impl from taichi.lang.enums import Layout from taichi.lang.expr import Expr, make_expr_group from taichi.lang.util import taichi_scope @@ -67,8 +68,9 @@ def subscript(self, i, j): indices = indices_second + self.indices_first else: indices = self.indices_first + indices_second - return Expr(_ti_core.subscript(self.arr.ptr, - make_expr_group(*indices))) + return Expr( + _ti_core.subscript(self.arr.ptr, make_expr_group(*indices), + impl.get_runtime().get_current_src_info())) __all__ = [] diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index f16efdeb1f4c3..4016c3ffc9edb 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -32,7 +32,8 @@ @taichi_scope def expr_init_local_tensor(shape, element_type, elements): return get_runtime().prog.current_ast_builder().expr_alloca_local_tensor( - shape, element_type, elements) + shape, element_type, elements, + get_runtime().get_current_src_info()) @taichi_scope @@ -72,7 +73,8 @@ def expr_init(rhs): if hasattr(rhs, '_data_oriented'): return rhs return Expr(get_runtime().prog.current_ast_builder().expr_var( - Expr(rhs).ptr)) + Expr(rhs).ptr, + get_runtime().get_current_src_info())) @taichi_scope @@ -158,11 +160,13 @@ def subscript(value, *_indices, skip_reordered=False, get_ref=False): Expr(_indices[0]).ptr, ConvType.g2r)) ]) + return subscript(value, *reordered_index, skip_reordered=True) if isinstance(value, SparseMatrixProxy): return value.subscript(*_indices) if isinstance(value, Field): _var = value._get_field_members()[0].ptr + if _var.snode() is None: if _var.is_primal(): raise RuntimeError( @@ -182,7 +186,9 @@ def subscript(value, *_indices, skip_reordered=False, get_ref=False): entries = {k: subscript(v, *_indices) for k, v in value._items} entries['__struct_methods'] = value.struct_methods return _IntermediateStruct(entries) - return Expr(_ti_core.subscript(_var, indices_expr_group)) + return Expr( + _ti_core.subscript(_var, indices_expr_group, + get_runtime().get_current_src_info())) if isinstance(value, AnyArray): # TODO: deprecate using get_attribute to get dim field_dim = int(value.ptr.get_attribute("dim")) @@ -192,7 +198,9 @@ def subscript(value, *_indices, skip_reordered=False, get_ref=False): f'Field with dim {field_dim - element_dim} accessed with indices of dim {index_dim}' ) if element_dim == 0: - return Expr(_ti_core.subscript(value.ptr, indices_expr_group)) + return Expr( + _ti_core.subscript(value.ptr, indices_expr_group, + get_runtime().get_current_src_info())) n = value.element_shape[0] m = 1 if element_dim == 1 else value.element_shape[1] any_array_access = AnyArrayAccess(value, _indices) @@ -217,7 +225,8 @@ def make_stride_expr(_var, _indices, shape, stride): @taichi_scope def make_index_expr(_var, _indices): - return Expr(_ti_core.make_index_expr(_var, make_expr_group(*_indices))) + return Expr(_ti_core.make_index_expr(_var, make_expr_group(*_indices)), + get_runtime().get_current_src_info()) class SrcInfoGuard: diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index e72831224738b..b8f5f8c81e3cb 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -248,8 +248,12 @@ def _subscript(self, is_global_mat, *indices, get_ref=False): return self.any_array_access.subscript(i, j) if self.local_tensor_proxy is not None: if len(indices) == 1: - return impl.make_index_expr(self.local_tensor_proxy, (i, )) - return impl.make_index_expr(self.local_tensor_proxy, (i, j)) + return impl.make_index_expr( + self.local_tensor_proxy, (i, ), + impl.get_runtime().get_current_src_info()) + return impl.make_index_expr( + self.local_tensor_proxy, (i, j), + impl.get_runtime().get_current_src_info()) if impl.current_cfg( ).dynamic_index and is_global_mat and self.dynamic_index_stride: return impl.make_stride_expr(self.entries[0].ptr, (i, j), @@ -322,7 +326,8 @@ def with_dynamic_index(self, arr, dt): list([ impl.make_index_expr( local_tensor_proxy, - (expr.Expr(i, dtype=primitive_types.i32), )) + (expr.Expr(i, dtype=primitive_types.i32), ), + impl.get_runtime().get_current_src_info()) ])) return local_tensor_proxy, mat @@ -352,7 +357,8 @@ def with_dynamic_index(self, arr, dt): impl.make_index_expr( local_tensor_proxy, (expr.Expr(i, dtype=primitive_types.i32), - expr.Expr(j, dtype=primitive_types.i32)))) + expr.Expr(j, dtype=primitive_types.i32)), + impl.get_runtime().get_current_src_info())) return local_tensor_proxy, mat def _get_entry_to_infer(self, arr): diff --git a/python/taichi/lang/mesh.py b/python/taichi/lang/mesh.py index e8592f7b69f8b..2005d217ccff3 100644 --- a/python/taichi/lang/mesh.py +++ b/python/taichi/lang/mesh.py @@ -660,8 +660,10 @@ def __init__(self, mesh: MeshInstance, element_type: MeshElementType, var = attr._get_field_members()[0].ptr setattr( self, key, - impl.Expr(_ti_core.subscript(var, - global_entry_expr_group))) + impl.Expr( + _ti_core.subscript( + var, global_entry_expr_group, + impl.get_runtime().get_current_src_info()))) for element_type in self.mesh._type.elements: setattr(self, element_type_name(element_type), diff --git a/python/taichi/lang/simt/block.py b/python/taichi/lang/simt/block.py index 290ae3667f9a7..7f47a04603759 100644 --- a/python/taichi/lang/simt/block.py +++ b/python/taichi/lang/simt/block.py @@ -53,4 +53,5 @@ def __init__(self, shape, dtype): @taichi_scope def _subscript(self, *indices, get_ref=False): - return impl.make_index_expr(self.shared_array_proxy, (indices, )) + return impl.make_index_expr(self.shared_array_proxy, (indices, ), + impl.get_runtime().get_current_src_info()) diff --git a/taichi/ir/frontend_ir.cpp b/taichi/ir/frontend_ir.cpp index 56d5de025a9e8..a553e3ca0da4e 100644 --- a/taichi/ir/frontend_ir.cpp +++ b/taichi/ir/frontend_ir.cpp @@ -358,10 +358,12 @@ void InternalFuncCallExpression::flatten(FlattenContext *ctx) { ctx->push_back(func_name, args_stmts, nullptr, with_runtime_context); stmt = ctx->back_stmt(); + stmt->tb = tb; } void ExternalTensorExpression::flatten(FlattenContext *ctx) { auto ptr = Stmt::make(arg_id, dt, /*is_ptr=*/true); + ptr->tb = tb; ctx->push_back(std::move(ptr)); stmt = ctx->back_stmt(); } @@ -370,6 +372,7 @@ void GlobalVariableExpression::flatten(FlattenContext *ctx) { TI_ASSERT(snode->num_active_indices == 0); auto ptr = Stmt::make(LaneAttribute(snode), std::vector()); + ptr->tb = tb; ctx->push_back(std::move(ptr)); } @@ -483,6 +486,7 @@ void IndexExpression::flatten(FlattenContext *ctx) { stmt = make_tensor_access( ctx, var, indices, var->ret_type->cast()->get_shape(), 1); } + stmt->tb = tb; } void StrideExpression::type_check(CompileConfig *) { @@ -603,6 +607,7 @@ void SNodeOpExpression::flatten(FlattenContext *ctx) { indices_stmt.push_back(indices[i]->stmt); } auto ptr = ctx->push_back(snode, indices_stmt); + ptr->tb = tb; if (op_type == SNodeOpType::is_active) { TI_ERROR_IF(snode->type != SNodeType::pointer && snode->type != SNodeType::hash && @@ -835,22 +840,27 @@ void ASTBuilder::stop_gradient(SNode *snode) { stack_.back()->stop_gradients.push_back(snode); } -void ASTBuilder::insert_assignment(Expr &lhs, const Expr &rhs) { +void ASTBuilder::insert_assignment(Expr &lhs, + const Expr &rhs, + const std::string &tb) { // Inside a kernel or a function // Create an assignment in the IR if (lhs.expr == nullptr) { lhs.set(rhs); } else if (lhs.expr->is_lvalue()) { - this->insert(std::make_unique(lhs, rhs)); + auto stmt = std::make_unique(lhs, rhs); + stmt->tb = tb; + this->insert(std::move(stmt)); + } else { TI_ERROR("Cannot assign to non-lvalue: {}", ExpressionHumanFriendlyPrinter::expr_to_string(lhs)); } } -Expr ASTBuilder::make_var(const Expr &x) { +Expr ASTBuilder::make_var(const Expr &x, std::string tb) { auto var = this->expr_alloca(); - this->insert_assignment(var, x); + this->insert_assignment(var, x, tb); return var; } @@ -962,7 +972,8 @@ Expr ASTBuilder::expr_alloca() { Expr ASTBuilder::expr_alloca_local_tensor(const std::vector &shape, const DataType &element_type, - const ExprGroup &elements) { + const ExprGroup &elements, + std::string tb) { auto var = Expr(std::make_shared(get_next_id())); this->insert(std::make_unique( std::static_pointer_cast(var.expr)->id, shape, @@ -980,7 +991,7 @@ Expr ASTBuilder::expr_alloca_local_tensor(const std::vector &shape, for (int d = 0; d < (int)shape.size(); ++d) indices.push_back(reversed_indices[(int)shape.size() - 1 - d]); this->insert(std::make_unique( - Expr::make(var, indices), elements.exprs[i])); + Expr::make(var, indices, tb), elements.exprs[i])); } return var; } diff --git a/taichi/ir/frontend_ir.h b/taichi/ir/frontend_ir.h index b56c3ec48f416..1ce0254617afb 100644 --- a/taichi/ir/frontend_ir.h +++ b/taichi/ir/frontend_ir.h @@ -511,8 +511,11 @@ class IndexExpression : public Expression { Expr var; ExprGroup indices; - IndexExpression(const Expr &var, const ExprGroup &indices) + IndexExpression(const Expr &var, + const ExprGroup &indices, + std::string tb = "") : var(var), indices(indices) { + this->tb = tb; } void type_check(CompileConfig *config) override; @@ -853,8 +856,10 @@ class ASTBuilder { Block *current_block(); Stmt *get_last_stmt(); void stop_gradient(SNode *); - void insert_assignment(Expr &lhs, const Expr &rhs); - Expr make_var(const Expr &x); + void insert_assignment(Expr &lhs, + const Expr &rhs, + const std::string &tb = ""); + Expr make_var(const Expr &x, std::string tb); void insert_for(const Expr &s, const Expr &e, const std::function &func); @@ -878,7 +883,8 @@ class ASTBuilder { Expr expr_alloca(); Expr expr_alloca_local_tensor(const std::vector &shape, const DataType &element_type, - const ExprGroup &elements); + const ExprGroup &elements, + std::string tb); Expr expr_alloca_shared_array(const std::vector &shape, const DataType &element_type); void expr_assign(const Expr &lhs, const Expr &rhs, std::string tb); diff --git a/taichi/math/svd.h b/taichi/math/svd.h index 36f349f4128d0..f57e081a1f0f8 100644 --- a/taichi/math/svd.h +++ b/taichi/math/svd.h @@ -58,8 +58,9 @@ sifakis_svd_export(ASTBuilder *ast_builder, constexpr Tf Sine_Pi_Over_Eight = 0.3826834323650897f; constexpr Tf Cosine_Pi_Over_Eight = 0.9238795325112867f; + std::string tb = ""; auto Var = - std::bind(&ASTBuilder::make_var, ast_builder, std::placeholders::_1); + std::bind(&ASTBuilder::make_var, ast_builder, std::placeholders::_1, tb); auto Sfour_gamma_squared = Var(Expr(Tf(0.0))); auto Ssine_pi_over_eight = Var(Expr(Tf(0.0))); diff --git a/taichi/program/program.cpp b/taichi/program/program.cpp index e425218c0835f..d137ca5fe64a2 100644 --- a/taichi/program/program.cpp +++ b/taichi/program/program.cpp @@ -364,8 +364,10 @@ Kernel &Program::get_snode_writer(SNode *snode) { } auto expr = Expr(snode_to_glb_var_exprs_.at(snode))[indices]; this->current_ast_builder()->insert_assignment( - expr, Expr::make(snode->num_active_indices, - snode->dt->get_compute_type())); + expr, + Expr::make(snode->num_active_indices, + snode->dt->get_compute_type()), + expr->tb); }); ker.set_arch(get_accessor_arch()); ker.name = kernel_name; diff --git a/taichi/python/export_lang.cpp b/taichi/python/export_lang.cpp index e2aaf88f8b4f4..cd98c859a21b3 100644 --- a/taichi/python/export_lang.cpp +++ b/taichi/python/export_lang.cpp @@ -958,12 +958,15 @@ void export_lang(py::module &m) { }); m.def("data_type_name", data_type_name); - m.def("subscript", [](const Expr &expr, const ExprGroup &expr_group) { - return expr[expr_group]; - }); + m.def("subscript", + [](const Expr &expr, const ExprGroup &expr_group, std::string tb) { + Expr idx_expr = expr[expr_group]; + idx_expr.set_tb(tb); + return idx_expr; + }); - m.def("make_index_expr", - Expr::make); + m.def("make_index_expr", Expr::make); m.def("make_stride_expr", Expr::make #include @@ -1601,6 +1602,8 @@ class GloablDataAccessRuleChecker : public BasicStmtVisitor { "(kernel={}) Breaks the global data access rule. Snode {} is " "overwritten unexpectedly.", kernel_name_, dest->snodes[0]->get_node_type_name()); + msg = message_append_backtrace_info(msg, stmt); + stmt->insert_before_me( Stmt::make(check_equal, msg, std::vector())); } diff --git a/taichi/transforms/check_out_of_bound.cpp b/taichi/transforms/check_out_of_bound.cpp index 62734dc93c2b0..1877feec4df2e 100644 --- a/taichi/transforms/check_out_of_bound.cpp +++ b/taichi/transforms/check_out_of_bound.cpp @@ -3,6 +3,7 @@ #include "taichi/ir/transforms.h" #include "taichi/ir/visitors.h" #include "taichi/transforms/check_out_of_bound.h" +#include "taichi/transforms/utils.h" #include TLANG_NAMESPACE_BEGIN @@ -53,6 +54,7 @@ class CheckOutOfBound : public BasicStmtVisitor { std::string msg = fmt::format("(kernel={}) Accessing field ({}) of size (", kernel_name, snode->get_node_type_name_hinted()); + std::string offset_msg = "offset ("; std::vector args; for (int i = 0; i < stmt->indices.size(); i++) { @@ -98,6 +100,7 @@ class CheckOutOfBound : public BasicStmtVisitor { msg += "%d"; } msg += ")"; + msg = message_append_backtrace_info(msg, stmt); new_stmts.push_back(result, msg, args); modifier.insert_before(stmt, std::move(new_stmts)); @@ -117,6 +120,8 @@ class CheckOutOfBound : public BasicStmtVisitor { BinaryOpType::cmp_ge, stmt->rhs, compare_rhs.get()); compare->ret_type = PrimitiveType::i32; std::string msg = "Negative exponent for integer pows are not allowed"; + msg = message_append_backtrace_info(msg, stmt); + auto assert_stmt = std::make_unique(compare.get(), msg, std::vector()); assert_stmt->accept(this); diff --git a/taichi/transforms/lower_ast.cpp b/taichi/transforms/lower_ast.cpp index ed837aa0806ff..0f88474fd318a 100644 --- a/taichi/transforms/lower_ast.cpp +++ b/taichi/transforms/lower_ast.cpp @@ -391,7 +391,9 @@ class LowerAST : public IRVisitor { expr->stmt); } else if (dest.is()) { auto ix = dest.cast(); + flatten_lvalue(dest, &fctx); + if (ix->is_local()) { fctx.push_back(dest->stmt, expr->stmt); } else { diff --git a/taichi/transforms/simplify.cpp b/taichi/transforms/simplify.cpp index af09d42a21df1..f0d557e241c0b 100644 --- a/taichi/transforms/simplify.cpp +++ b/taichi/transforms/simplify.cpp @@ -6,6 +6,7 @@ #include "taichi/transforms/simplify.h" #include "taichi/program/kernel.h" #include "taichi/program/program.h" +#include "taichi/transforms/utils.h" #include #include #include @@ -301,9 +302,11 @@ class BasicBlockSimplify : public IRVisitor { auto zero = Stmt::make(LaneAttribute(0)); auto check_sum = Stmt::make(BinaryOpType::cmp_ge, sum.get(), zero.get()); - auto assert = Stmt::make(check_sum.get(), - "The indices provided are too big!", - std::vector()); + auto assert = + Stmt::make(check_sum.get(), + message_append_backtrace_info( + "The indices provided are too big!", stmt), + std::vector()); // Because Taichi's assertion is checked only after the execution of the // kernel, when the linear index overflows and goes negative, we have to // replace that with 0 to make sure that the rest of the kernel can still diff --git a/taichi/transforms/type_check.cpp b/taichi/transforms/type_check.cpp index e16c62501ba3c..15a912a3736f6 100644 --- a/taichi/transforms/type_check.cpp +++ b/taichi/transforms/type_check.cpp @@ -5,6 +5,7 @@ #include "taichi/ir/transforms.h" #include "taichi/ir/analysis.h" #include "taichi/ir/frontend_ir.h" +#include "taichi/transforms/utils.h" TLANG_NAMESPACE_BEGIN @@ -252,6 +253,8 @@ class TypeCheck : public IRVisitor { std::string msg = "Detected overflow for bit_shift_op with rhs = %d, exceeding limit of " "%d."; + msg = message_append_backtrace_info(msg, stmt); + std::vector args = {rhs, const_stmt.get()}; auto assert_stmt = Stmt::make(cond_stmt.get(), msg, std::move(args)); diff --git a/taichi/transforms/utils.cpp b/taichi/transforms/utils.cpp index d3dd28bccf3c2..1ce201d4d3511 100644 --- a/taichi/transforms/utils.cpp +++ b/taichi/transforms/utils.cpp @@ -10,5 +10,10 @@ Stmt *generate_mod_x_div_y(VecStatement *stmts, Stmt *num, int x, int y) { return stmts->push_back(BinaryOpType::div, mod_x, const_y); } +std::string message_append_backtrace_info(const std::string &message, + Stmt *stmt) { + return message + "\n" + stmt->tb; +} + } // namespace lang } // namespace taichi diff --git a/taichi/transforms/utils.h b/taichi/transforms/utils.h index 3440be0314b99..195af43aaacd4 100644 --- a/taichi/transforms/utils.h +++ b/taichi/transforms/utils.h @@ -5,5 +5,8 @@ namespace lang { Stmt *generate_mod_x_div_y(VecStatement *stmts, Stmt *num, int x, int y); +std::string message_append_backtrace_info(const std::string &message, + Stmt *stmt); + } // namespace lang } // namespace taichi diff --git a/tests/cpp/ir/frontend_type_inference_test.cpp b/tests/cpp/ir/frontend_type_inference_test.cpp index 53b34381c07fd..2d1b918f1a647 100644 --- a/tests/cpp/ir/frontend_type_inference_test.cpp +++ b/tests/cpp/ir/frontend_type_inference_test.cpp @@ -32,7 +32,7 @@ TEST(FrontendTypeInference, Id) { Callable::CurrentCallableGuard _(kernel->program, kernel.get()); auto const_i32 = value(-(1 << 20)); const_i32->type_check(nullptr); - auto id_i32 = prog->current_ast_builder()->make_var(const_i32); + auto id_i32 = prog->current_ast_builder()->make_var(const_i32, const_i32->tb); EXPECT_EQ(id_i32->ret_type, PrimitiveType::i32); } From 37580e7c56dc6c3be615c7fac0e9d90add865a1f Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Tue, 23 Aug 2022 10:59:38 +0800 Subject: [PATCH 2/5] Fix minor bug --- python/taichi/lang/matrix.py | 9 +++++++-- taichi/transforms/auto_diff.cpp | 2 +- taichi/transforms/check_out_of_bound.cpp | 4 ++-- taichi/transforms/simplify.cpp | 8 +++----- taichi/transforms/type_check.cpp | 2 +- taichi/transforms/utils.cpp | 5 ----- taichi/transforms/utils.h | 3 --- 7 files changed, 14 insertions(+), 19 deletions(-) diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index b8f5f8c81e3cb..ba37f19b2735f 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -245,7 +245,9 @@ def _subscript(self, is_global_mat, *indices, get_ref=False): ndim=1) if self.any_array_access: - return self.any_array_access.subscript(i, j) + return self.any_array_access.subscript( + i, j, + impl.get_runtime().get_current_src_info()) if self.local_tensor_proxy is not None: if len(indices) == 1: return impl.make_index_expr( @@ -1502,7 +1504,10 @@ def __init__(self, field, indices): super().__init__( field.n, field.m, [ - expr.Expr(ti_python_core.subscript(e.ptr, indices)) + expr.Expr( + ti_python_core.subscript( + e.ptr, indices, + impl.get_runtime().get_current_src_info())) for e in field._get_field_members() ], ndim=getattr(field, "ndim", 2)) diff --git a/taichi/transforms/auto_diff.cpp b/taichi/transforms/auto_diff.cpp index bce7cc2ec9b98..27d41849d8b0b 100644 --- a/taichi/transforms/auto_diff.cpp +++ b/taichi/transforms/auto_diff.cpp @@ -1602,7 +1602,7 @@ class GloablDataAccessRuleChecker : public BasicStmtVisitor { "(kernel={}) Breaks the global data access rule. Snode {} is " "overwritten unexpectedly.", kernel_name_, dest->snodes[0]->get_node_type_name()); - msg = message_append_backtrace_info(msg, stmt); + msg += "\n" + stmt->tb; stmt->insert_before_me( Stmt::make(check_equal, msg, std::vector())); diff --git a/taichi/transforms/check_out_of_bound.cpp b/taichi/transforms/check_out_of_bound.cpp index 1877feec4df2e..6c89fa60d3904 100644 --- a/taichi/transforms/check_out_of_bound.cpp +++ b/taichi/transforms/check_out_of_bound.cpp @@ -100,7 +100,7 @@ class CheckOutOfBound : public BasicStmtVisitor { msg += "%d"; } msg += ")"; - msg = message_append_backtrace_info(msg, stmt); + msg += "\n" + stmt->tb; new_stmts.push_back(result, msg, args); modifier.insert_before(stmt, std::move(new_stmts)); @@ -120,7 +120,7 @@ class CheckOutOfBound : public BasicStmtVisitor { BinaryOpType::cmp_ge, stmt->rhs, compare_rhs.get()); compare->ret_type = PrimitiveType::i32; std::string msg = "Negative exponent for integer pows are not allowed"; - msg = message_append_backtrace_info(msg, stmt); + msg += "\n" + stmt->tb; auto assert_stmt = std::make_unique(compare.get(), msg, std::vector()); diff --git a/taichi/transforms/simplify.cpp b/taichi/transforms/simplify.cpp index f0d557e241c0b..54e81b247f7bb 100644 --- a/taichi/transforms/simplify.cpp +++ b/taichi/transforms/simplify.cpp @@ -302,11 +302,9 @@ class BasicBlockSimplify : public IRVisitor { auto zero = Stmt::make(LaneAttribute(0)); auto check_sum = Stmt::make(BinaryOpType::cmp_ge, sum.get(), zero.get()); - auto assert = - Stmt::make(check_sum.get(), - message_append_backtrace_info( - "The indices provided are too big!", stmt), - std::vector()); + auto assert = Stmt::make( + check_sum.get(), "The indices provided are too big!\n" + stmt->tb, + std::vector()); // Because Taichi's assertion is checked only after the execution of the // kernel, when the linear index overflows and goes negative, we have to // replace that with 0 to make sure that the rest of the kernel can still diff --git a/taichi/transforms/type_check.cpp b/taichi/transforms/type_check.cpp index 15a912a3736f6..06253e1c6f602 100644 --- a/taichi/transforms/type_check.cpp +++ b/taichi/transforms/type_check.cpp @@ -253,7 +253,7 @@ class TypeCheck : public IRVisitor { std::string msg = "Detected overflow for bit_shift_op with rhs = %d, exceeding limit of " "%d."; - msg = message_append_backtrace_info(msg, stmt); + msg += "\n" + stmt->tb; std::vector args = {rhs, const_stmt.get()}; auto assert_stmt = diff --git a/taichi/transforms/utils.cpp b/taichi/transforms/utils.cpp index 1ce201d4d3511..d3dd28bccf3c2 100644 --- a/taichi/transforms/utils.cpp +++ b/taichi/transforms/utils.cpp @@ -10,10 +10,5 @@ Stmt *generate_mod_x_div_y(VecStatement *stmts, Stmt *num, int x, int y) { return stmts->push_back(BinaryOpType::div, mod_x, const_y); } -std::string message_append_backtrace_info(const std::string &message, - Stmt *stmt) { - return message + "\n" + stmt->tb; -} - } // namespace lang } // namespace taichi diff --git a/taichi/transforms/utils.h b/taichi/transforms/utils.h index 195af43aaacd4..3440be0314b99 100644 --- a/taichi/transforms/utils.h +++ b/taichi/transforms/utils.h @@ -5,8 +5,5 @@ namespace lang { Stmt *generate_mod_x_div_y(VecStatement *stmts, Stmt *num, int x, int y); -std::string message_append_backtrace_info(const std::string &message, - Stmt *stmt); - } // namespace lang } // namespace taichi From 04fc404affa79be288e34c55e22cc5c8d59dd9e5 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Tue, 23 Aug 2022 11:41:16 +0800 Subject: [PATCH 3/5] Bug fix --- python/taichi/lang/matrix.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index ba37f19b2735f..ab15dd48e8316 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -245,9 +245,7 @@ def _subscript(self, is_global_mat, *indices, get_ref=False): ndim=1) if self.any_array_access: - return self.any_array_access.subscript( - i, j, - impl.get_runtime().get_current_src_info()) + return self.any_array_access.subscript(i, j) if self.local_tensor_proxy is not None: if len(indices) == 1: return impl.make_index_expr( From e9564fc9ef3295a6b723d7215e98f0fdc0f7aa70 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Tue, 23 Aug 2022 12:47:13 +0800 Subject: [PATCH 4/5] Fixed interface issues --- python/taichi/lang/impl.py | 5 +++-- python/taichi/lang/matrix.py | 14 ++++---------- python/taichi/lang/simt/block.py | 3 +-- 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index 4016c3ffc9edb..65fca0069b125 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -225,8 +225,9 @@ def make_stride_expr(_var, _indices, shape, stride): @taichi_scope def make_index_expr(_var, _indices): - return Expr(_ti_core.make_index_expr(_var, make_expr_group(*_indices)), - get_runtime().get_current_src_info()) + return Expr( + _ti_core.make_index_expr(_var, make_expr_group(*_indices), + get_runtime().get_current_src_info())) class SrcInfoGuard: diff --git a/python/taichi/lang/matrix.py b/python/taichi/lang/matrix.py index ab15dd48e8316..056ab0626db5d 100644 --- a/python/taichi/lang/matrix.py +++ b/python/taichi/lang/matrix.py @@ -248,12 +248,8 @@ def _subscript(self, is_global_mat, *indices, get_ref=False): return self.any_array_access.subscript(i, j) if self.local_tensor_proxy is not None: if len(indices) == 1: - return impl.make_index_expr( - self.local_tensor_proxy, (i, ), - impl.get_runtime().get_current_src_info()) - return impl.make_index_expr( - self.local_tensor_proxy, (i, j), - impl.get_runtime().get_current_src_info()) + return impl.make_index_expr(self.local_tensor_proxy, (i, )) + return impl.make_index_expr(self.local_tensor_proxy, (i, j)) if impl.current_cfg( ).dynamic_index and is_global_mat and self.dynamic_index_stride: return impl.make_stride_expr(self.entries[0].ptr, (i, j), @@ -326,8 +322,7 @@ def with_dynamic_index(self, arr, dt): list([ impl.make_index_expr( local_tensor_proxy, - (expr.Expr(i, dtype=primitive_types.i32), ), - impl.get_runtime().get_current_src_info()) + (expr.Expr(i, dtype=primitive_types.i32), )) ])) return local_tensor_proxy, mat @@ -357,8 +352,7 @@ def with_dynamic_index(self, arr, dt): impl.make_index_expr( local_tensor_proxy, (expr.Expr(i, dtype=primitive_types.i32), - expr.Expr(j, dtype=primitive_types.i32)), - impl.get_runtime().get_current_src_info())) + expr.Expr(j, dtype=primitive_types.i32)))) return local_tensor_proxy, mat def _get_entry_to_infer(self, arr): diff --git a/python/taichi/lang/simt/block.py b/python/taichi/lang/simt/block.py index 7f47a04603759..290ae3667f9a7 100644 --- a/python/taichi/lang/simt/block.py +++ b/python/taichi/lang/simt/block.py @@ -53,5 +53,4 @@ def __init__(self, shape, dtype): @taichi_scope def _subscript(self, *indices, get_ref=False): - return impl.make_index_expr(self.shared_array_proxy, (indices, ), - impl.get_runtime().get_current_src_info()) + return impl.make_index_expr(self.shared_array_proxy, (indices, )) From 829122c14a663891b1ccc867487bc5be674d2f2b Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Wed, 24 Aug 2022 14:35:58 +0800 Subject: [PATCH 5/5] Addressed review comments --- python/taichi/lang/impl.py | 2 -- taichi/transforms/check_out_of_bound.cpp | 2 -- taichi/transforms/lower_ast.cpp | 2 -- taichi/transforms/type_check.cpp | 1 - 4 files changed, 7 deletions(-) diff --git a/python/taichi/lang/impl.py b/python/taichi/lang/impl.py index 65fca0069b125..b79221116614a 100644 --- a/python/taichi/lang/impl.py +++ b/python/taichi/lang/impl.py @@ -160,13 +160,11 @@ def subscript(value, *_indices, skip_reordered=False, get_ref=False): Expr(_indices[0]).ptr, ConvType.g2r)) ]) - return subscript(value, *reordered_index, skip_reordered=True) if isinstance(value, SparseMatrixProxy): return value.subscript(*_indices) if isinstance(value, Field): _var = value._get_field_members()[0].ptr - if _var.snode() is None: if _var.is_primal(): raise RuntimeError( diff --git a/taichi/transforms/check_out_of_bound.cpp b/taichi/transforms/check_out_of_bound.cpp index 6c89fa60d3904..47e2105e954bb 100644 --- a/taichi/transforms/check_out_of_bound.cpp +++ b/taichi/transforms/check_out_of_bound.cpp @@ -54,7 +54,6 @@ class CheckOutOfBound : public BasicStmtVisitor { std::string msg = fmt::format("(kernel={}) Accessing field ({}) of size (", kernel_name, snode->get_node_type_name_hinted()); - std::string offset_msg = "offset ("; std::vector args; for (int i = 0; i < stmt->indices.size(); i++) { @@ -121,7 +120,6 @@ class CheckOutOfBound : public BasicStmtVisitor { compare->ret_type = PrimitiveType::i32; std::string msg = "Negative exponent for integer pows are not allowed"; msg += "\n" + stmt->tb; - auto assert_stmt = std::make_unique(compare.get(), msg, std::vector()); assert_stmt->accept(this); diff --git a/taichi/transforms/lower_ast.cpp b/taichi/transforms/lower_ast.cpp index 0f88474fd318a..ed837aa0806ff 100644 --- a/taichi/transforms/lower_ast.cpp +++ b/taichi/transforms/lower_ast.cpp @@ -391,9 +391,7 @@ class LowerAST : public IRVisitor { expr->stmt); } else if (dest.is()) { auto ix = dest.cast(); - flatten_lvalue(dest, &fctx); - if (ix->is_local()) { fctx.push_back(dest->stmt, expr->stmt); } else { diff --git a/taichi/transforms/type_check.cpp b/taichi/transforms/type_check.cpp index 06253e1c6f602..2d691c573055b 100644 --- a/taichi/transforms/type_check.cpp +++ b/taichi/transforms/type_check.cpp @@ -254,7 +254,6 @@ class TypeCheck : public IRVisitor { "Detected overflow for bit_shift_op with rhs = %d, exceeding limit of " "%d."; msg += "\n" + stmt->tb; - std::vector args = {rhs, const_stmt.get()}; auto assert_stmt = Stmt::make(cond_stmt.get(), msg, std::move(args));