diff --git a/cmake/cinn/external/absl.cmake b/cmake/cinn/external/absl.cmake index bec032ff3f763f..076b46b711ac06 100644 --- a/cmake/cinn/external/absl.cmake +++ b/cmake/cinn/external/absl.cmake @@ -38,37 +38,14 @@ ExternalProject_Add( -DCMAKE_INSTALL_PREFIX:PATH=${ABSL_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR:PATH=${ABSL_INSTALL_DIR}/lib -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON - -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_base.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_hash.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_string_view.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_low_level_hash.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_demangle_internal.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_raw_logging_internal.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_city.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_strings.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_throw_delegate.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_bad_any_cast_impl.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_bad_optional_access.a - BUILD_BYPRODUCTS ${ABSL_INSTALL_DIR}/lib/libabsl_bad_variant_access.a) + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}) # It may be more convenient if we just include all absl libs -set(ABSL_LIB_NAMES - hash - string_view - low_level_hash - demangle_internal - raw_logging_internal - city - strings - throw_delegate - bad_any_cast_impl - bad_optional_access - bad_variant_access) +set(ABSL_LIB_NAMES "") set(ABSL_LIBS "") if(WITH_ROCM) - list(APPEND ABSL_LIB_NAMES strings_internal raw_logging_internal) + list(APPEND ABSL_LIB_NAMES strings_internal) endif() add_library(absl STATIC IMPORTED GLOBAL) diff --git a/paddle/cinn/adt/op_equation_context.h b/paddle/cinn/adt/op_equation_context.h index 494b731638d9c7..d3ebdf5b37823d 100644 --- a/paddle/cinn/adt/op_equation_context.h +++ b/paddle/cinn/adt/op_equation_context.h @@ -64,7 +64,7 @@ class OpEquationContext { template const T& Attr(const std::string& name) const { - return absl::get(GetAttribute(name)); + return std::get(GetAttribute(name)); } protected: diff --git a/paddle/cinn/backends/compiler.cc b/paddle/cinn/backends/compiler.cc index 8d39efe823a01c..ebf6a6955bbe83 100644 --- a/paddle/cinn/backends/compiler.cc +++ b/paddle/cinn/backends/compiler.cc @@ -564,7 +564,7 @@ void Compiler::ExportObject(const std::string& path) { engine_->ExportObject(path); } -void* Compiler::Lookup(absl::string_view fn_name) { +void* Compiler::Lookup(std::string_view fn_name) { PADDLE_ENFORCE_NOT_NULL( engine_, ::common::errors::InvalidArgument("Sorry, engine_ is nullptr")); if (engine_->Lookup(fn_name) != nullptr) { diff --git a/paddle/cinn/backends/compiler.h b/paddle/cinn/backends/compiler.h index 81c6cd6e62d83c..cfb5f8b0b37b83 100644 --- a/paddle/cinn/backends/compiler.h +++ b/paddle/cinn/backends/compiler.h @@ -14,7 +14,7 @@ #pragma once -#include +#include #include #include @@ -127,7 +127,7 @@ class Compiler final { * Retrieve a function by \p fn_name. * @return function address or null if not exists. */ - void* Lookup(absl::string_view fn_name); + void* Lookup(std::string_view fn_name); std::vector GetFnPtr() const { return fn_ptr_; } diff --git a/paddle/cinn/backends/extern_func_emitter.cc b/paddle/cinn/backends/extern_func_emitter.cc index 2c19730dc9226b..b6fdb764be588f 100644 --- a/paddle/cinn/backends/extern_func_emitter.cc +++ b/paddle/cinn/backends/extern_func_emitter.cc @@ -14,9 +14,7 @@ #include "paddle/cinn/backends/extern_func_emitter.h" -#include #include - #include #include #include @@ -85,8 +83,8 @@ namespace std { size_t hash::operator()( const cinn::backends::ExternFuncID& x) const { - return absl::Hash{}(x.name) ^ - absl::Hash{}(x.backend_id); + return std::hash{}(x.name) ^ + std::hash{}(x.backend_id); } } // namespace std diff --git a/paddle/cinn/backends/function_prototype.h b/paddle/cinn/backends/function_prototype.h index d68d7c4c9c4447..0859a441ab5970 100644 --- a/paddle/cinn/backends/function_prototype.h +++ b/paddle/cinn/backends/function_prototype.h @@ -14,7 +14,7 @@ #pragma once -#include +#include #include #include diff --git a/paddle/cinn/backends/llvm/codegen_llvm.cc b/paddle/cinn/backends/llvm/codegen_llvm.cc index 4c338ec69949fc..dbf9a46c1b369a 100644 --- a/paddle/cinn/backends/llvm/codegen_llvm.cc +++ b/paddle/cinn/backends/llvm/codegen_llvm.cc @@ -1511,7 +1511,7 @@ void CodeGenLLVM::InitTarget(const Target &target) { } void CodeGenLLVM::AddTbaaMetadata(llvm::Instruction *inst, - absl::string_view buffer, + std::string_view buffer, Expr index) { // If the index is constant, generate some TBAA info that helps LLVM // understand our loads/stores aren't aliased. diff --git a/paddle/cinn/backends/llvm/codegen_llvm.h b/paddle/cinn/backends/llvm/codegen_llvm.h index f1befe4429bec9..4285dd4e98e246 100644 --- a/paddle/cinn/backends/llvm/codegen_llvm.h +++ b/paddle/cinn/backends/llvm/codegen_llvm.h @@ -14,7 +14,6 @@ #pragma once -#include #include #include #include @@ -24,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -254,7 +254,7 @@ class CodeGenLLVM : public LLVMIRVisitor, public IrBuilderMixin { * can optimize by reordering loads and stores across different buffers. */ void AddTbaaMetadata(llvm::Instruction *inst, - absl::string_view buffer, + std::string_view buffer, Expr index); void InitTarget(const Target &target); diff --git a/paddle/cinn/backends/llvm/execution_engine.cc b/paddle/cinn/backends/llvm/execution_engine.cc index 91a32c283c77db..9d77f42240db7b 100644 --- a/paddle/cinn/backends/llvm/execution_engine.cc +++ b/paddle/cinn/backends/llvm/execution_engine.cc @@ -14,7 +14,6 @@ #include "paddle/cinn/backends/llvm/execution_engine.h" -#include #include #include #include @@ -52,6 +51,7 @@ #include #include // NOLINT #include +#include #include #include "paddle/cinn/backends/codegen_cuda_host.h" @@ -265,7 +265,7 @@ void ExecutionEngine::ExportObject(const std::string &path) { fclose(of); } -void *ExecutionEngine::Lookup(absl::string_view name) { +void *ExecutionEngine::Lookup(std::string_view name) { utils::RecordEvent("ExecutionEngine Lookup", utils::EventType::kOrdinary); std::lock_guard lock(mu_); if (auto symbol = jit_->lookup(AsStringRef(name))) { diff --git a/paddle/cinn/backends/llvm/execution_engine.h b/paddle/cinn/backends/llvm/execution_engine.h index cd5f2e4499e9d1..26fdf13bd51088 100644 --- a/paddle/cinn/backends/llvm/execution_engine.h +++ b/paddle/cinn/backends/llvm/execution_engine.h @@ -73,7 +73,7 @@ class ExecutionEngine { static std::unique_ptr Create( const ExecutionOptions &config); - void *Lookup(absl::string_view name); + void *Lookup(std::string_view name); template void Link(const ir::Module &module); diff --git a/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py b/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py index 257ef3a7215f0c..87b41abf1daf09 100644 --- a/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py +++ b/paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py @@ -24,10 +24,10 @@ def main(): llvm_config = sys.argv[3] srcs = [] - srcs.append('#include ') + srcs.append('#include ') # srcs.append('#include "paddle/cinn/backends/llvm/cinn_runtime_llvm_ir.h"\n') srcs.append('namespace cinn::backends {') - srcs.append("static const absl::string_view kRuntimeLlvmIr(") + srcs.append("static const std::string_view kRuntimeLlvmIr(") srcs.append('R"ROC(') with open(path, 'r') as fr: srcs.append(fr.read()) diff --git a/paddle/cinn/backends/llvm/llvm_util.h b/paddle/cinn/backends/llvm/llvm_util.h index fb65ae457f9305..19b2c5ddd66f8e 100644 --- a/paddle/cinn/backends/llvm/llvm_util.h +++ b/paddle/cinn/backends/llvm/llvm_util.h @@ -14,7 +14,6 @@ #pragma once -#include #include #include #include @@ -26,6 +25,7 @@ #include #include +#include #include #include @@ -45,7 +45,7 @@ std::string DumpToString(const T &entity) { // return "\033[33m" + buffer + "\033[0m"; // Green } -inline llvm::StringRef AsStringRef(absl::string_view str) { +inline llvm::StringRef AsStringRef(std::string_view str) { return llvm::StringRef(str.data(), str.size()); } diff --git a/paddle/cinn/backends/llvm/runtime_symbol_registry.cc b/paddle/cinn/backends/llvm/runtime_symbol_registry.cc index 2609b4fd4872aa..f96e4ea24407f3 100644 --- a/paddle/cinn/backends/llvm/runtime_symbol_registry.cc +++ b/paddle/cinn/backends/llvm/runtime_symbol_registry.cc @@ -14,10 +14,10 @@ #include "paddle/cinn/backends/llvm/runtime_symbol_registry.h" -#include #include #include +#include #include "paddle/cinn/runtime/flags.h" #include "paddle/common/enforce.h" @@ -32,7 +32,7 @@ RuntimeSymbols &GlobalSymbolRegistry::Global() { return symbols; } -void *RuntimeSymbols::Lookup(absl::string_view name) const { +void *RuntimeSymbols::Lookup(std::string_view name) const { std::lock_guard lock(mu_); auto it = symbols_.find(std::string(name)); if (it != symbols_.end()) { diff --git a/paddle/cinn/backends/llvm/runtime_symbol_registry.h b/paddle/cinn/backends/llvm/runtime_symbol_registry.h index bffdb451aa0487..373cb8eea803fc 100644 --- a/paddle/cinn/backends/llvm/runtime_symbol_registry.h +++ b/paddle/cinn/backends/llvm/runtime_symbol_registry.h @@ -14,14 +14,14 @@ #pragma once -#include -#include -#include #include +#include +#include #include #include // NOLINT #include +#include #include #include "paddle/cinn/common/macros.h" @@ -81,7 +81,7 @@ class RuntimeSymbols { * @param name Name of the symbol. * @return The address if exists, or nullptr will return. */ - void *Lookup(absl::string_view name) const; + void *Lookup(std::string_view name) const; /** * Get all the symbols. diff --git a/paddle/cinn/backends/llvm/simple_jit.h b/paddle/cinn/backends/llvm/simple_jit.h old mode 100755 new mode 100644 index 5d70f98e556976..698813987d13fb --- a/paddle/cinn/backends/llvm/simple_jit.h +++ b/paddle/cinn/backends/llvm/simple_jit.h @@ -14,7 +14,6 @@ #pragma once -#include #include #include #include @@ -36,6 +35,7 @@ #include #include +#include #include #include @@ -67,7 +67,7 @@ class SimpleJIT { llvm::cantFail(jit_->addIRModule(std::move(m))); } - llvm::JITTargetAddress Lookup(absl::string_view name) { + llvm::JITTargetAddress Lookup(std::string_view name) { return llvm::cantFail(jit_->lookup(AsStringRef(name))).getAddress(); } diff --git a/paddle/cinn/common/cinn_value.cc b/paddle/cinn/common/cinn_value.cc index 996187d7d7964e..7ec97a9a321cc1 100644 --- a/paddle/cinn/common/cinn_value.cc +++ b/paddle/cinn/common/cinn_value.cc @@ -116,12 +116,12 @@ bool CINNValue::is_var() const { return type_code_ == TypeCode(); } bool CINNValue::is_expr() const { return type_code_ == TypeCode() && - !absl::any_cast(shared_).as_tensor(); + !std::any_cast(shared_).as_tensor(); } bool CINNValue::is_tensor() const { return type_code_ == TypeCode() && - absl::any_cast(shared_).as_tensor(); + std::any_cast(shared_).as_tensor(); } CINNValue::operator std::string() const { @@ -129,28 +129,28 @@ CINNValue::operator std::string() const { type_code_, TypeCode(), ::common::errors::InvalidArgument("The type_code is not std::string.")); - return absl::any_cast(shared_); + return std::any_cast(shared_); } CINNValue::operator ir::Var() const { PADDLE_ENFORCE_EQ( type_code_, TypeCode(), ::common::errors::InvalidArgument("The type_code is not ir::Var.")); - return absl::any_cast(shared_); + return std::any_cast(shared_); } CINNValue::operator ir::Expr() const { PADDLE_ENFORCE_EQ( type_code_, TypeCode(), ::common::errors::InvalidArgument("The type_code is not ir::Expr.")); - return absl::any_cast(shared_); + return std::any_cast(shared_); } CINNValue::operator CINNValuePack() const { PADDLE_ENFORCE_EQ( type_code_, TypeCode(), ::common::errors::InvalidArgument("The type_code is not CINNValuePack.")); - return absl::any_cast(shared_); + return std::any_cast(shared_); } CINNValue::CINNValue(char *value) diff --git a/paddle/cinn/common/cinn_value.h b/paddle/cinn/common/cinn_value.h index a9b9a5413155e4..2ab294e46f18ae 100644 --- a/paddle/cinn/common/cinn_value.h +++ b/paddle/cinn/common/cinn_value.h @@ -13,9 +13,9 @@ // limitations under the License. #pragma once -#include -#include +#include +#include #include #include "paddle/cinn/common/common.h" @@ -234,7 +234,7 @@ class CINNValue : public cinn_pod_value_t { static int TypeCode(); protected: - absl::any shared_; + std::any shared_; }; } // namespace common diff --git a/paddle/cinn/common/common.h b/paddle/cinn/common/common.h index 35fa728da8b4ef..5793acad06f26f 100644 --- a/paddle/cinn/common/common.h +++ b/paddle/cinn/common/common.h @@ -14,7 +14,7 @@ #pragma once -#include +#include #include "paddle/cinn/common/axis.h" #include "paddle/cinn/common/cinn_value.h" @@ -55,7 +55,7 @@ T& Reference(const T* x) { return *const_cast(x); } -static void CheckVarNameValid(const absl::string_view name) { +static void CheckVarNameValid(const std::string_view name) { PADDLE_ENFORCE_EQ(name.empty(), false, ::common::errors::InvalidArgument( diff --git a/paddle/cinn/common/context.h b/paddle/cinn/common/context.h index 12f4e8b3d2aa66..58f538d3495ffc 100644 --- a/paddle/cinn/common/context.h +++ b/paddle/cinn/common/context.h @@ -13,9 +13,9 @@ // limitations under the License. #pragma once -#include -#include +#include +#include #include #include #include diff --git a/paddle/cinn/common/debug_manager.cc b/paddle/cinn/common/debug_manager.cc index aad6b48517481c..5c2dd753662a9d 100644 --- a/paddle/cinn/common/debug_manager.cc +++ b/paddle/cinn/common/debug_manager.cc @@ -17,9 +17,9 @@ namespace cinn { namespace common { -inline std::vector> &GetVec( - absl::any &data) { // NOLINT - return absl::any_cast> &>(data); +inline std::vector> &GetVec( + std::any &data) { // NOLINT + return std::any_cast> &>(data); } //! AppendTypeSuffix for multiple types. @@ -56,7 +56,7 @@ inline std::string DebugManager::AppendTypeSuffix( } // @} -void DebugManager::Append(const std::string &key, absl::any value) { +void DebugManager::Append(const std::string &key, std::any value) { GetVec(data_).push_back(std::make_pair(key, value)); } void DebugManager::Append(const std::string &key, int32_t value) { diff --git a/paddle/cinn/common/debug_manager.h b/paddle/cinn/common/debug_manager.h index 001fdb1a61267a..9a0e23a9f76f73 100644 --- a/paddle/cinn/common/debug_manager.h +++ b/paddle/cinn/common/debug_manager.h @@ -13,7 +13,7 @@ // limitations under the License. #pragma once -#include +#include #include #include @@ -35,7 +35,7 @@ class DebugManager { void Clear(); protected: - void Append(const std::string& key, absl::any value); + void Append(const std::string& key, std::any value); template inline std::string AppendTypeSuffix(const std::string& key) { @@ -44,7 +44,7 @@ class DebugManager { private: //! hide the type of vector> - absl::any data_; + std::any data_; }; } // namespace common diff --git a/paddle/cinn/common/info_registry.h b/paddle/cinn/common/info_registry.h index 74406bbbb8046d..d7103da77b5dc1 100644 --- a/paddle/cinn/common/info_registry.h +++ b/paddle/cinn/common/info_registry.h @@ -13,7 +13,7 @@ // limitations under the License. #pragma once -#include +#include #include #include "paddle/utils/flat_hash_map.h" @@ -33,7 +33,7 @@ class InfoRegistry { void Clear() { data_.clear(); } private: - paddle::flat_hash_map data_; + paddle::flat_hash_map data_; }; template @@ -42,7 +42,7 @@ T& InfoRegistry::Get(const std::string& key) { if (it == data_.end()) { data_[key] = T(); } - return absl::any_cast(data_[key]); + return std::any_cast(data_[key]); } } // namespace common diff --git a/paddle/cinn/hlir/framework/op.h b/paddle/cinn/hlir/framework/op.h index c2c7786746556c..2ccdae411eb7bc 100644 --- a/paddle/cinn/hlir/framework/op.h +++ b/paddle/cinn/hlir/framework/op.h @@ -13,9 +13,9 @@ // limitations under the License. #pragma once -#include -#include +#include +#include #include #include #include @@ -71,7 +71,7 @@ enum OpPatternKind { struct OpRegistry : public Registry { std::recursive_mutex mutex; std::atomic op_counter{0}; - paddle::flat_hash_map> attrs; + paddle::flat_hash_map> attrs; static OpRegistry* Global() { static OpRegistry x; @@ -146,14 +146,14 @@ class Operator { template inline Operator& set_attr(const std::string& attr_name, const ValueType& value) { - UpdateAttrMap(attr_name, [this, attr_name, value](absl::any* pmap) { + UpdateAttrMap(attr_name, [this, attr_name, value](std::any* pmap) { if (!pmap->has_value()) { OpValueType pm; pm.attr_name = attr_name; *pmap = std::move(pm); } std::vector& vec = - absl::any_cast&>(*pmap).data; + std::any_cast&>(*pmap).data; // resize the value type. if (vec.size() <= index) { vec.resize(index + 1, ValueType()); @@ -164,10 +164,10 @@ class Operator { } template static const OpValueType& GetAttrs(const std::string& attr_name) { - const absl::any* ref = GetAttrMap(attr_name); + const std::any* ref = GetAttrMap(attr_name); if (ref == nullptr) { //! update the attribute map of the key by creating new empty OpMap - UpdateAttrMap(attr_name, [attr_name](absl::any* pmap) { + UpdateAttrMap(attr_name, [attr_name](std::any* pmap) { if (!pmap->has_value()) { OpValueType pm; pm.attr_name = attr_name; @@ -176,7 +176,7 @@ class Operator { }); ref = GetAttrMap(attr_name); } - return absl::any_cast&>(*ref); + return std::any_cast&>(*ref); } auto get_index() const { return index; } @@ -187,7 +187,7 @@ class Operator { friend class Registry; uint32_t index{0}; Operator() { index = OpRegistry::Global()->op_counter++; } - static const absl::any* GetAttrMap(const std::string& key) { + static const std::any* GetAttrMap(const std::string& key) { auto& dict = OpRegistry::Global()->attrs; auto it = dict.find(key); if (it != dict.end()) { @@ -198,11 +198,11 @@ class Operator { } //! update the attribute OpValueType static void UpdateAttrMap(const std::string& key, - std::function updater) { + std::function updater) { OpRegistry* reg = OpRegistry::Global(); std::lock_guard(reg->mutex); - std::unique_ptr& value = reg->attrs[key]; - if (value.get() == nullptr) value.reset(new absl::any()); + std::unique_ptr& value = reg->attrs[key]; + if (value.get() == nullptr) value.reset(new std::any()); if (updater != nullptr) updater(value.get()); } }; diff --git a/paddle/cinn/hlir/framework/op_strategy.cc b/paddle/cinn/hlir/framework/op_strategy.cc index ccc1fe5d750377..7ffe84e5c0957a 100644 --- a/paddle/cinn/hlir/framework/op_strategy.cc +++ b/paddle/cinn/hlir/framework/op_strategy.cc @@ -56,7 +56,7 @@ std::ostream& operator<<(std::ostream& os, const NodeAttr& node_attr) { for (auto& item : node_attr.attr_store) { std::stringstream os; PyBindNodeAttrVisitor visitor(os); - absl::visit(visitor, item.second); + std::visit(visitor, item.second); ss << "- " << os.str() << "\n"; } os << ss.str(); diff --git a/paddle/cinn/hlir/op/broadcast.cc b/paddle/cinn/hlir/op/broadcast.cc index a864cf08ec796e..615b2daa62899a 100644 --- a/paddle/cinn/hlir/op/broadcast.cc +++ b/paddle/cinn/hlir/op/broadcast.cc @@ -112,7 +112,7 @@ std::shared_ptr StrategyForBroadcast( bool trans_a; for (auto &iter : attrs.attr_store) { if (iter.first == "axis") { - axis = Expr(absl::get(iter.second)); + axis = Expr(std::get(iter.second)); break; } } @@ -181,7 +181,7 @@ std::shared_ptr StrategyForBroadcastSymbolic( bool trans_a; for (auto &iter : attrs.attr_store) { if (iter.first == "axis") { - axis = Expr(absl::get(iter.second)); + axis = Expr(std::get(iter.second)); break; } } @@ -207,14 +207,14 @@ std::shared_ptr StrategyForBroadcastTo( 1, ::common::errors::InvalidArgument( "The attrs.attr_store doesn't have the attribute of 'out_shape'.")); - out_shape = absl::get>(attrs.attr_store.at("out_shape")); + out_shape = std::get>(attrs.attr_store.at("out_shape")); PADDLE_ENFORCE_GE( attrs.attr_store.count("broadcast_axes"), 1, ::common::errors::InvalidArgument("The attrs.attr_store doesn't have the " "attribute of 'broadcast_axes'.")); broadcast_axes = - absl::get>(attrs.attr_store.at("broadcast_axes")); + std::get>(attrs.attr_store.at("broadcast_axes")); VLOG(3) << "broadcast out shape: " << utils::Join(out_shape, ", "); VLOG(3) << "broadcast_axes shape: " << utils::Join(broadcast_axes, ", "); diff --git a/paddle/cinn/hlir/op/contrib/cholesky.cc b/paddle/cinn/hlir/op/contrib/cholesky.cc index 009fd4fd5f06f3..4bcc3b614cf4f3 100644 --- a/paddle/cinn/hlir/op/contrib/cholesky.cc +++ b/paddle/cinn/hlir/op/contrib/cholesky.cc @@ -17,7 +17,7 @@ #include #include -#include "absl/types/variant.h" +#include #include "glog/logging.h" #include "paddle/cinn/common/cinn_value.h" #include "paddle/cinn/common/common.h" diff --git a/paddle/cinn/hlir/op/contrib/gaussian_random.cc b/paddle/cinn/hlir/op/contrib/gaussian_random.cc index 4697c6cec2acaa..2979eeae486cbe 100644 --- a/paddle/cinn/hlir/op/contrib/gaussian_random.cc +++ b/paddle/cinn/hlir/op/contrib/gaussian_random.cc @@ -17,7 +17,7 @@ #include #include -#include "absl/types/variant.h" +#include #include "glog/logging.h" #include "paddle/cinn/common/cinn_value.h" #include "paddle/cinn/common/common.h" diff --git a/paddle/cinn/hlir/op/contrib/lookup_table.cc b/paddle/cinn/hlir/op/contrib/lookup_table.cc index 24777538a5a4c7..02b6d6c411a1b6 100644 --- a/paddle/cinn/hlir/op/contrib/lookup_table.cc +++ b/paddle/cinn/hlir/op/contrib/lookup_table.cc @@ -92,7 +92,7 @@ std::shared_ptr StrategyForLookupTable( true, ::common::errors::InvalidArgument( "The padding_idx should be set in lookup_table.")); - auto padding_idx = absl::get(attr_store.at("padding_idx")); + auto padding_idx = std::get(attr_store.at("padding_idx")); framework::CINNCompute lookup_table_compute([=](lang::Args args, lang::RetValue* ret) { diff --git a/paddle/cinn/hlir/op/contrib/one_hot.cc b/paddle/cinn/hlir/op/contrib/one_hot.cc index b33fff2ad533b0..8ce1bb953e349d 100644 --- a/paddle/cinn/hlir/op/contrib/one_hot.cc +++ b/paddle/cinn/hlir/op/contrib/one_hot.cc @@ -129,11 +129,11 @@ std::shared_ptr StrategyForOneHot( for (auto& iter : attrs.attr_store) { if (iter.first == "depth") { - depth = absl::get(iter.second); + depth = std::get(iter.second); } else if (iter.first == "axis") { - axis = absl::get(iter.second); + axis = std::get(iter.second); } else if (iter.first == "dtype") { - dtype = absl::get(iter.second); + dtype = std::get(iter.second); } } diff --git a/paddle/cinn/hlir/op/contrib/randint.cc b/paddle/cinn/hlir/op/contrib/randint.cc index ea678191f5ce3f..2a09e4f6cde64a 100644 --- a/paddle/cinn/hlir/op/contrib/randint.cc +++ b/paddle/cinn/hlir/op/contrib/randint.cc @@ -17,7 +17,7 @@ #include #include -#include "absl/types/variant.h" +#include #include "glog/logging.h" #include "paddle/cinn/common/cinn_value.h" #include "paddle/cinn/common/common.h" diff --git a/paddle/cinn/hlir/op/contrib/repeat.cc b/paddle/cinn/hlir/op/contrib/repeat.cc index 1b42edb5603ca1..5dbba9557f3b15 100644 --- a/paddle/cinn/hlir/op/contrib/repeat.cc +++ b/paddle/cinn/hlir/op/contrib/repeat.cc @@ -106,9 +106,9 @@ std::shared_ptr StrategyForRepeat( int axis = 0; for (auto &iter : attrs.attr_store) { if (iter.first == "repeats") { - repeats = absl::get(iter.second); + repeats = std::get(iter.second); } else if (iter.first == "axis") { - axis = absl::get(iter.second); + axis = std::get(iter.second); } } PADDLE_ENFORCE_GE( diff --git a/paddle/cinn/hlir/op/contrib/sort.cc b/paddle/cinn/hlir/op/contrib/sort.cc index 5e015dfc737e70..a77327b71ee90b 100644 --- a/paddle/cinn/hlir/op/contrib/sort.cc +++ b/paddle/cinn/hlir/op/contrib/sort.cc @@ -165,10 +165,10 @@ std::shared_ptr StrategyForSort( 1, ::common::errors::InvalidArgument( "The attr_store doesn't have the attribute of 'axis'.")); - int axis = absl::get(attr_store.at("axis")); + int axis = std::get(attr_store.at("axis")); bool is_ascend = true; if (attr_store.count("is_ascend")) { - is_ascend = absl::get(attr_store.at("is_ascend")); + is_ascend = std::get(attr_store.at("is_ascend")); } framework::CINNCompute sort_compute([=](lang::Args args, @@ -235,10 +235,10 @@ std::shared_ptr StrategyForArgSort( 1, ::common::errors::InvalidArgument( "The attr_store doesn't have the attribute of 'axis'.")); - int axis = absl::get(attr_store.at("axis")); + int axis = std::get(attr_store.at("axis")); bool is_ascend = true; if (attr_store.count("is_ascend")) { - is_ascend = absl::get(attr_store.at("is_ascend")); + is_ascend = std::get(attr_store.at("is_ascend")); } framework::CINNCompute argsort_compute([=](lang::Args args, diff --git a/paddle/cinn/hlir/op/contrib/uniform_random.cc b/paddle/cinn/hlir/op/contrib/uniform_random.cc index faa0d40104d1e6..e668511dada0bf 100644 --- a/paddle/cinn/hlir/op/contrib/uniform_random.cc +++ b/paddle/cinn/hlir/op/contrib/uniform_random.cc @@ -17,7 +17,7 @@ #include #include -#include "absl/types/variant.h" +#include #include "glog/logging.h" #include "paddle/cinn/common/cinn_value.h" #include "paddle/cinn/common/common.h" diff --git a/paddle/cinn/hlir/op/custom_call.cc b/paddle/cinn/hlir/op/custom_call.cc index bb9114031d1c33..cd32cd0a793f2a 100644 --- a/paddle/cinn/hlir/op/custom_call.cc +++ b/paddle/cinn/hlir/op/custom_call.cc @@ -184,28 +184,28 @@ std::vector CustomCallArgsForCublas( const auto &attr_store = attrs.attr_store; bool trans_a = attr_store.count("trans_a") - ? absl::get(attr_store.at("trans_a")) + ? std::get(attr_store.at("trans_a")) : false; bool trans_b = attr_store.count("trans_b") - ? absl::get(attr_store.at("trans_b")) + ? std::get(attr_store.at("trans_b")) : false; bool trans_out = attr_store.count("trans_out") - ? absl::get(attr_store.at("trans_out")) + ? std::get(attr_store.at("trans_out")) : false; float alpha = attr_store.count("alpha") - ? absl::get(attr_store.at("alpha")) + ? std::get(attr_store.at("alpha")) : 1.0f; float beta = - attr_store.count("beta") ? absl::get(attr_store.at("beta")) : 0.0f; + attr_store.count("beta") ? std::get(attr_store.at("beta")) : 0.0f; int x_num_col_dims = attr_store.count("x_num_col_dims") - ? absl::get(attr_store.at("x_num_col_dims")) + ? std::get(attr_store.at("x_num_col_dims")) : 0; int y_num_col_dims = attr_store.count("y_num_col_dims") - ? absl::get(attr_store.at("y_num_col_dims")) + ? std::get(attr_store.at("y_num_col_dims")) : 0; bool is_infer = attr_store.count("is_infer") - ? absl::get(attr_store.at("is_infer")) + ? std::get(attr_store.at("is_infer")) : false; PADDLE_ENFORCE_EQ( (x_num_col_dims == 0 && y_num_col_dims == 0) || @@ -357,28 +357,28 @@ std::vector CustomCallArgsForBatchedCublas( const auto &attr_store = attrs.attr_store; bool trans_a = attr_store.count("trans_a") - ? absl::get(attr_store.at("trans_a")) + ? std::get(attr_store.at("trans_a")) : false; bool trans_b = attr_store.count("trans_b") - ? absl::get(attr_store.at("trans_b")) + ? std::get(attr_store.at("trans_b")) : false; bool trans_out = attr_store.count("trans_out") - ? absl::get(attr_store.at("trans_out")) + ? std::get(attr_store.at("trans_out")) : false; float alpha = attr_store.count("alpha") - ? absl::get(attr_store.at("alpha")) + ? std::get(attr_store.at("alpha")) : 1.0f; float beta = - attr_store.count("beta") ? absl::get(attr_store.at("beta")) : 0.0f; + attr_store.count("beta") ? std::get(attr_store.at("beta")) : 0.0f; int x_num_col_dims = attr_store.count("x_num_col_dims") - ? absl::get(attr_store.at("x_num_col_dims")) + ? std::get(attr_store.at("x_num_col_dims")) : 0; int y_num_col_dims = attr_store.count("y_num_col_dims") - ? absl::get(attr_store.at("y_num_col_dims")) + ? std::get(attr_store.at("y_num_col_dims")) : 0; bool is_infer = attr_store.count("is_infer") - ? absl::get(attr_store.at("is_infer")) + ? std::get(attr_store.at("is_infer")) : false; PADDLE_ENFORCE_EQ((x_num_col_dims == 0 && y_num_col_dims == 0) || (x_num_col_dims > 0 && y_num_col_dims > 0), @@ -402,7 +402,7 @@ std::vector CustomCallArgsForBatchedCublas( "x_num_col_dims = %d, y_num_col_dims = %d.", x_num_col_dims, y_num_col_dims)); - if (absl::get(attr_store.at("side")) == "left") { + if (std::get(attr_store.at("side")) == "left") { left = inputs[0]; right = inputs[1]; } else { @@ -511,8 +511,8 @@ std::vector CustomCallArgsForBatchedCublas( // func args std::vector args = { - absl::get(attr_store.at("side")) == "left" ? ir::Expr(0) - : ir::Expr(1), + std::get(attr_store.at("side")) == "left" ? ir::Expr(0) + : ir::Expr(1), ir::Expr(trans_a), ir::Expr(trans_b), ir::Expr(trans_out), @@ -543,36 +543,36 @@ std::vector CustomCallArgsForCudnnConvForward( output_shapes.size())); */ const auto &attr_store = attrs.attr_store; float alpha = attr_store.count("alpha") - ? absl::get(attr_store.at("alpha")) + ? std::get(attr_store.at("alpha")) : 1.0f; float beta = - attr_store.count("beta") ? absl::get(attr_store.at("beta")) : 0.0f; + attr_store.count("beta") ? std::get(attr_store.at("beta")) : 0.0f; PADDLE_ENFORCE_EQ( attr_store.count("padding"), true, ::common::errors::NotFound( "The CudnnConvForward custom_call must has attribute \"padding\"")); - auto padding = absl::get>(attr_store.at("padding")); + auto padding = std::get>(attr_store.at("padding")); PADDLE_ENFORCE_EQ( attr_store.count("stride"), true, ::common::errors::NotFound( "The CudnnConvForward custom_call must has attribute \"stride\"")); - auto stride = absl::get>(attr_store.at("stride")); + auto stride = std::get>(attr_store.at("stride")); auto dilation = attr_store.count("dilation") - ? absl::get>(attr_store.at("dilation")) + ? std::get>(attr_store.at("dilation")) : std::vector({1, 1}); std::string data_format = attr_store.count("data_format") - ? absl::get(attr_store.at("data_format")) + ? std::get(attr_store.at("data_format")) : "NCHW"; if (data_format == "AnyLayout") { data_format = "NCHW"; } int groups = - attr_store.count("groups") ? absl::get(attr_store.at("groups")) : 1; + attr_store.count("groups") ? std::get(attr_store.at("groups")) : 1; cudnnTensorFormat_t format = data_format == "NCHW" ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; @@ -626,36 +626,36 @@ std::vector CustomCallArgsForCudnnConvBackwardData( const auto &attr_store = attrs.attr_store; float alpha = attr_store.count("alpha") - ? absl::get(attr_store.at("alpha")) + ? std::get(attr_store.at("alpha")) : 1.0f; float beta = - attr_store.count("beta") ? absl::get(attr_store.at("beta")) : 0.0f; + attr_store.count("beta") ? std::get(attr_store.at("beta")) : 0.0f; PADDLE_ENFORCE_EQ( attr_store.count("padding"), true, ::common::errors::NotFound("The CudnnConvBackwardData custom_call" "must has attribute \"padding\"")); - auto padding = absl::get>(attr_store.at("padding")); + auto padding = std::get>(attr_store.at("padding")); PADDLE_ENFORCE_EQ( attr_store.count("stride"), true, ::common::errors::NotFound("The CudnnConvBackwardData custom_call" "must has attribute \"stride\"")); - auto stride = absl::get>(attr_store.at("stride")); + auto stride = std::get>(attr_store.at("stride")); auto dilation = attr_store.count("dilation") - ? absl::get>(attr_store.at("dilation")) + ? std::get>(attr_store.at("dilation")) : std::vector({1, 1}); std::string data_format = attr_store.count("data_format") - ? absl::get(attr_store.at("data_format")) + ? std::get(attr_store.at("data_format")) : "NCHW"; if (data_format == "AnyLayout") { data_format = "NCHW"; } int groups = - attr_store.count("groups") ? absl::get(attr_store.at("groups")) : 1; + attr_store.count("groups") ? std::get(attr_store.at("groups")) : 1; cudnnTensorFormat_t format = data_format == "NCHW" ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; @@ -708,36 +708,36 @@ std::vector CustomCallArgsForCudnnConvBackwardFilter( const auto &attr_store = attrs.attr_store; float alpha = attr_store.count("alpha") - ? absl::get(attr_store.at("alpha")) + ? std::get(attr_store.at("alpha")) : 1.0f; float beta = - attr_store.count("beta") ? absl::get(attr_store.at("beta")) : 0.0f; + attr_store.count("beta") ? std::get(attr_store.at("beta")) : 0.0f; PADDLE_ENFORCE_EQ( attr_store.count("padding"), true, ::common::errors::NotFound("The CudnnConvBackwardFilter custom_call" "must has attribute \"padding\"")); - auto padding = absl::get>(attr_store.at("padding")); + auto padding = std::get>(attr_store.at("padding")); PADDLE_ENFORCE_EQ( attr_store.count("stride"), true, ::common::errors::NotFound("The CudnnConvBackwardFilter custom_call" "must has attribute \"stride\"")); - auto stride = absl::get>(attr_store.at("stride")); + auto stride = std::get>(attr_store.at("stride")); auto dilation = attr_store.count("dilation") - ? absl::get>(attr_store.at("dilation")) + ? std::get>(attr_store.at("dilation")) : std::vector({1, 1}); std::string data_format = attr_store.count("data_format") - ? absl::get(attr_store.at("data_format")) + ? std::get(attr_store.at("data_format")) : "NCHW"; if (data_format == "AnyLayout") { data_format = "NCHW"; } int groups = - attr_store.count("groups") ? absl::get(attr_store.at("groups")) : 1; + attr_store.count("groups") ? std::get(attr_store.at("groups")) : 1; cudnnTensorFormat_t format = data_format == "NCHW" ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; @@ -791,45 +791,44 @@ std::vector CustomCallArgsForCudnnPoolForward( const auto &attr_store = attrs.attr_store; float alpha = attr_store.count("alpha") - ? absl::get(attr_store.at("alpha")) + ? std::get(attr_store.at("alpha")) : 1.0f; float beta = - attr_store.count("beta") ? absl::get(attr_store.at("beta")) : 0.0f; + attr_store.count("beta") ? std::get(attr_store.at("beta")) : 0.0f; PADDLE_ENFORCE_EQ( attr_store.count("kernel_size"), true, ::common::errors::NotFound("The CudnnPoolForward custom_call" "must has attribute \"kernel_size\"")); - auto kernel = absl::get>(attr_store.at("kernel_size")); + auto kernel = std::get>(attr_store.at("kernel_size")); PADDLE_ENFORCE_EQ( attr_store.count("padding_size"), true, ::common::errors::NotFound("The CudnnPoolForward custom_call" "must has attribute \"padding_size\"")); - auto padding = absl::get>(attr_store.at("padding_size")); + auto padding = std::get>(attr_store.at("padding_size")); PADDLE_ENFORCE_EQ( attr_store.count("stride_size"), true, ::common::errors::NotFound("The CudnnPoolForward custom_call" "must has attribute \"stride_size\"")); - auto stride = absl::get>(attr_store.at("stride_size")); + auto stride = std::get>(attr_store.at("stride_size")); PADDLE_ENFORCE_EQ( attr_store.count("pool_type"), true, ::common::errors::NotFound("The CudnnPoolForward custom_call" "must has attribute \"pool_type\"")); - auto pool_type = absl::get(attr_store.at("pool_type")); + auto pool_type = std::get(attr_store.at("pool_type")); PADDLE_ENFORCE_EQ( attr_store.count("data_format"), true, ::common::errors::NotFound("The CudnnPoolForward custom_call" "must has attribute \"data_format\"")); - std::string data_format = - absl::get(attr_store.at("data_format")); + std::string data_format = std::get(attr_store.at("data_format")); bool exclusive = attr_store.count("exclusive") - ? absl::get(attrs.attr_store.at("exclusive")) + ? std::get(attrs.attr_store.at("exclusive")) : true; cudnnPoolingMode_t mode = pool_type == "max" @@ -886,45 +885,45 @@ std::vector CustomCallArgsForCudnnPoolBackward( const auto &attr_store = attrs.attr_store; float alpha = attr_store.count("alpha") - ? absl::get(attr_store.at("alpha")) + ? std::get(attr_store.at("alpha")) : 1.0f; float beta = - attr_store.count("beta") ? absl::get(attr_store.at("beta")) : 0.0f; + attr_store.count("beta") ? std::get(attr_store.at("beta")) : 0.0f; PADDLE_ENFORCE_EQ( attr_store.count("kernel_size"), true, ::common::errors::NotFound("The CudnnPoolBackward custom_call" "must has attribute \"kernel_size\"")); - auto kernel = absl::get>(attr_store.at("kernel_size")); + auto kernel = std::get>(attr_store.at("kernel_size")); PADDLE_ENFORCE_EQ( attr_store.count("padding_size"), true, ::common::errors::NotFound("The CudnnPoolBackward custom_call" "must has attribute \"padding_size\"")); - auto padding = absl::get>(attr_store.at("padding_size")); + auto padding = std::get>(attr_store.at("padding_size")); PADDLE_ENFORCE_EQ( attr_store.count("stride_size"), true, ::common::errors::NotFound("The CudnnPoolBackward custom_call" "must has attribute \"stride_size\"")); - auto stride = absl::get>(attr_store.at("stride_size")); + auto stride = std::get>(attr_store.at("stride_size")); PADDLE_ENFORCE_EQ( attr_store.count("pool_type"), true, ::common::errors::NotFound("The CudnnPoolBackward custom_call" "must has attribute \"pool_type\"")); - auto pool_type = absl::get(attr_store.at("pool_type")); + auto pool_type = std::get(attr_store.at("pool_type")); PADDLE_ENFORCE_EQ( attr_store.count("data_format"), true, ::common::errors::NotFound("The CudnnPoolBackward custom_call" "must has attribute \"data_format\"")); std::string data_format = - absl::get(attrs.attr_store.at("data_format")); + std::get(attrs.attr_store.at("data_format")); bool exclusive = attr_store.count("exclusive") - ? absl::get(attrs.attr_store.at("exclusive")) + ? std::get(attrs.attr_store.at("exclusive")) : true; cudnnPoolingMode_t mode = pool_type == "max" @@ -985,9 +984,9 @@ std::vector CustomCallArgsForAssertTrue( "The assert_true custom_call must has attribute \"msg\"")); // TODO(thisjiang): change type from 'int' to 'std::string' when custom call // support 'std::string' type - int msg = absl::get(attr_store.at("msg")); + int msg = std::get(attr_store.at("msg")); bool only_warning = attr_store.count("only_warning") - ? absl::get(attrs.attr_store.at("only_warning")) + ? std::get(attrs.attr_store.at("only_warning")) : false; std::vector args = {ir::Expr(msg), ir::Expr(only_warning)}; @@ -1009,14 +1008,13 @@ std::vector CustomCallArgsForGaussianRandom( const auto &attr_store = attrs.attr_store; float mean = attr_store.count("mean") - ? absl::get(attrs.attr_store.at("mean")) + ? std::get(attrs.attr_store.at("mean")) : 0.0f; float std = attr_store.count("std") - ? absl::get(attrs.attr_store.at("std")) + ? std::get(attrs.attr_store.at("std")) : 1.0f; - int seed = attr_store.count("seed") - ? absl::get(attrs.attr_store.at("seed")) - : 0; + int seed = + attr_store.count("seed") ? std::get(attrs.attr_store.at("seed")) : 0; std::vector args = {ir::Expr(mean), ir::Expr(std), ir::Expr(seed)}; @@ -1037,14 +1035,13 @@ std::vector CustomCallArgsForUniformRandom( const auto &attr_store = attrs.attr_store; float min = attr_store.count("min") - ? absl::get(attrs.attr_store.at("min")) + ? std::get(attrs.attr_store.at("min")) : -1.0f; float max = attr_store.count("max") - ? absl::get(attrs.attr_store.at("max")) + ? std::get(attrs.attr_store.at("max")) : 1.0f; - int seed = attr_store.count("seed") - ? absl::get(attrs.attr_store.at("seed")) - : 0; + int seed = + attr_store.count("seed") ? std::get(attrs.attr_store.at("seed")) : 0; PADDLE_ENFORCE_GE(max, min, @@ -1072,9 +1069,8 @@ std::vector CustomCallArgsForRandInt( const auto &attr_store = attrs.attr_store; - int seed = attr_store.count("seed") - ? absl::get(attrs.attr_store.at("seed")) - : 0; + int seed = + attr_store.count("seed") ? std::get(attrs.attr_store.at("seed")) : 0; std::vector args = {ir::Expr(seed)}; @@ -1106,7 +1102,7 @@ std::vector CustomCallArgsForCholesky( } int m = x->shape[ndim - 1].as_int32(); - auto upper = absl::get(attrs.attr_store.at("upper")); + auto upper = std::get(attrs.attr_store.at("upper")); std::vector args = { ir::Expr(batch_size), ir::Expr(m), ir::Expr(upper)}; @@ -1154,10 +1150,10 @@ std::vector CustomCallArgsForTriangularSolve( batch_size *= a->shape[i].as_int32(); } - auto left_side = absl::get(attrs.attr_store.at("left_side")); - auto upper = absl::get(attrs.attr_store.at("upper")); - auto transpose_a = absl::get(attrs.attr_store.at("transpose_a")); - auto unit_diagonal = absl::get(attrs.attr_store.at("unit_diagonal")); + auto left_side = std::get(attrs.attr_store.at("left_side")); + auto upper = std::get(attrs.attr_store.at("upper")); + auto transpose_a = std::get(attrs.attr_store.at("transpose_a")); + auto unit_diagonal = std::get(attrs.attr_store.at("unit_diagonal")); int m = a->shape[a_ndim - 1].as_int32(); int k = left_side ? b->shape[b_ndim - 1].as_int32() @@ -1228,7 +1224,7 @@ std::vector CustomCallArgsForMemset( int value = 0; const auto &value_attr = attr_store.at("value"); - absl::visit(Visitor(&value), value_attr); + std::visit(Visitor(&value), value_attr); // can support memset non-0 ? PADDLE_ENFORCE_EQ( value, @@ -1242,7 +1238,7 @@ std::vector CustomCallArgsForMemset( } const auto &dtype = - cinn::common::Str2Type(absl::get(attr_store.at("dtype"))); + cinn::common::Str2Type(std::get(attr_store.at("dtype"))); count *= dtype.bytes(); VLOG(4) << "call memset custom_call with value=" << utils::Attribute2String(value_attr) << " (" << value diff --git a/paddle/cinn/hlir/op/elementwise.cc b/paddle/cinn/hlir/op/elementwise.cc index d9b5cd73872b67..46b07876d9f811 100644 --- a/paddle/cinn/hlir/op/elementwise.cc +++ b/paddle/cinn/hlir/op/elementwise.cc @@ -16,7 +16,7 @@ #include -#include "absl/types/optional.h" +#include #include "paddle/cinn/adt/op_equation_context.h" #include "paddle/cinn/common/type.h" #include "paddle/cinn/hlir/dialect/operator/ir/symbol_bindings.h" @@ -187,11 +187,11 @@ std::shared_ptr StrategyForScale( bool bias_after_scale = true; for (auto &iter : attrs.attr_store) { if (iter.first == "scale") { - scale = absl::get(iter.second); + scale = std::get(iter.second); } else if (iter.first == "bias") { - bias = absl::get(iter.second); + bias = std::get(iter.second); } else if (iter.first == "bias_after_scale") { - bias_after_scale = absl::get(iter.second); + bias_after_scale = std::get(iter.second); } } framework::CINNCompute scale_compute([=](lang::Args args, @@ -272,11 +272,11 @@ std::shared_ptr StrategyForScaleSymbolic( bool bias_after_scale = true; for (auto &iter : attrs.attr_store) { if (iter.first == "scale") { - scale = absl::get(iter.second); + scale = std::get(iter.second); } else if (iter.first == "bias") { - bias = absl::get(iter.second); + bias = std::get(iter.second); } else if (iter.first == "bias_after_scale") { - bias_after_scale = absl::get(iter.second); + bias_after_scale = std::get(iter.second); } } framework::CINNCompute scale_compute( @@ -404,7 +404,7 @@ Expr GetScalarExpr(const framework::NodeAttr::attr_t &attr) { "wrong type std::vector")); } }; - absl::visit(Visitor{scalar}, attr); + std::visit(Visitor{scalar}, attr); return scalar; } @@ -491,7 +491,7 @@ std::shared_ptr StrategyForFillConstant( ::common::errors::InvalidArgument( "The attribute shape of fill_constant is not found! " "Please check.")); - auto shape = absl::get>(attrs.attr_store.at("shape")); + auto shape = std::get>(attrs.attr_store.at("shape")); PADDLE_ENFORCE_EQ(attrs.attr_store.count("value"), true, ::common::errors::InvalidArgument( @@ -504,7 +504,7 @@ std::shared_ptr StrategyForFillConstant( ::common::errors::InvalidArgument( "The attribute force_cpu of fill_constant is not found! " "Please check.")); - force_cpu = absl::get(attrs.attr_store.at("force_cpu")); + force_cpu = std::get(attrs.attr_store.at("force_cpu")); if (force_cpu && target != cinn::common::DefaultHostTarget()) { LOG(WARNING) << "The attribute force_cpu of fill_constant " @@ -653,15 +653,15 @@ std::shared_ptr StrategyForAssignValue( "Please check.")); std::string tensor_name = arg_pack[0].operator std::string(); - absl::optional out; -#define EXPAND_VALUE_TO_TENSOR(TYPE) \ - else if (absl::get_if(&value)) { /*NOLINT*/ \ - out = pe::AssignValue( \ - std::vector{absl::get(value)}, out_type[0], tensor_name); \ - } \ - else if (absl::get_if>(&value)) { /*NOLINT*/ \ - out = pe::AssignValue( \ - absl::get>(value), out_type[0], tensor_name); \ + std::optional out; +#define EXPAND_VALUE_TO_TENSOR(TYPE) \ + else if (std::get_if(&value)) { /*NOLINT*/ \ + out = pe::AssignValue( \ + std::vector{std::get(value)}, out_type[0], tensor_name); \ + } \ + else if (std::get_if>(&value)) { /*NOLINT*/ \ + out = pe::AssignValue( \ + std::get>(value), out_type[0], tensor_name); \ } if (false) { // NOLINT @@ -739,7 +739,7 @@ std::shared_ptr StrategyForSqueeze( const Target &target) { const std::vector &axes = attrs.attr_store.count("axes") - ? absl::get>(attrs.attr_store.at("axes")) + ? std::get>(attrs.attr_store.at("axes")) : std::vector{}; framework::CINNCompute squeeze_compute([=](lang::Args args, @@ -800,7 +800,7 @@ std::shared_ptr StrategyForExpandDims( const Target &target) { const std::vector &axes = attrs.attr_store.count("axes") - ? absl::get>(attrs.attr_store.at("axes")) + ? std::get>(attrs.attr_store.at("axes")) : std::vector{}; framework::CINNCompute expand_dims_compute{[=](lang::Args args, @@ -881,7 +881,7 @@ std::shared_ptr StrategyForReshape( PADDLE_ENFORCE(attr_store.count("shape"), ::common::errors::InvalidArgument("find no attr of shape")); std::vector new_shape = - absl::get>(attr_store.at("shape")); + std::get>(attr_store.at("shape")); auto tensor_A = A.as_tensor_ref(); VLOG(3) << "A shape: " << utils::Join(tensor_A->shape, ", ") << ", output_shapes: " << utils::Join(output_shapes[0], ", "); @@ -1208,9 +1208,9 @@ std::shared_ptr StrategyForGenerateShapeSymbolic( attrs.attr_store.count("symbol_bindings"), ::common::errors::InvalidArgument("Expected attribute symbol_bindings " "in strategy for generate shape op")); - auto output_dim_exprs = absl::get>( + auto output_dim_exprs = std::get>( attrs.attr_store.at("output_dim_exprs")); - auto symbol_bindings = absl::get( + auto symbol_bindings = std::get( attrs.attr_store.at("symbol_bindings")); framework::CINNCompute generate_shape_compute( @@ -1276,11 +1276,11 @@ std::shared_ptr StrategyForArange( ::common::errors::InvalidArgument( "No dtype attribute in attrs.attr_store! Please check.")); - auto start = absl::get(attr_store.at("start")); - auto stop = absl::get(attr_store.at("stop")); - auto step = absl::get(attr_store.at("step")); + auto start = std::get(attr_store.at("start")); + auto stop = std::get(attr_store.at("stop")); + auto step = std::get(attr_store.at("step")); auto dtype = - cinn::common::Str2Type(absl::get(attr_store.at("dtype"))); + cinn::common::Str2Type(std::get(attr_store.at("dtype"))); framework::CINNCompute arange_compute( [=](lang::Args args, lang::RetValue *ret) { @@ -1333,11 +1333,11 @@ std::shared_ptr StrategyForArangeSymbolic( ::common::errors::InvalidArgument( "No dtype attribute in arange Op! Please check.")); - auto start = absl::get(attr_store.at("start")); - auto stop = absl::get(attr_store.at("stop")); - auto step = absl::get(attr_store.at("step")); + auto start = std::get(attr_store.at("start")); + auto stop = std::get(attr_store.at("stop")); + auto step = std::get(attr_store.at("step")); auto dtype = - cinn::common::Str2Type(absl::get(attr_store.at("dtype"))); + cinn::common::Str2Type(std::get(attr_store.at("dtype"))); framework::CINNCompute arange_compute([=](lang::Args args, lang::RetValue *ret) { @@ -1388,7 +1388,7 @@ std::shared_ptr StrategyForTril( A.as_tensor(), ::common::errors::InvalidArgument( "first input argument in tril should be tensor")); - int diagonal = absl::get(attrs.attr_store.at("diagonal")); + int diagonal = std::get(attrs.attr_store.at("diagonal")); auto tensor_A = A.as_tensor_ref(); PADDLE_ENFORCE_NE(output_shapes.size(), @@ -1497,16 +1497,16 @@ std::shared_ptr StrategyForIsClose( int axis = -1; if (attrs.attr_store.count("axis")) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } if (attrs.attr_store.count("rtol")) { - rtol = absl::get(attrs.attr_store.at("rtol")); + rtol = std::get(attrs.attr_store.at("rtol")); } if (attrs.attr_store.count("atol")) { - atol = absl::get(attrs.attr_store.at("atol")); + atol = std::get(attrs.attr_store.at("atol")); } if (attrs.attr_store.count("equal_nan")) { - equal_nan = absl::get(attrs.attr_store.at("equal_nan")); + equal_nan = std::get(attrs.attr_store.at("equal_nan")); } framework::CINNCompute isclose_compute([=](lang::Args args, @@ -1567,16 +1567,16 @@ std::shared_ptr StrategyForIsCloseSymbolic( int axis = -1; if (attrs.attr_store.count("axis")) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } if (attrs.attr_store.count("rtol")) { - rtol = absl::get(attrs.attr_store.at("rtol")); + rtol = std::get(attrs.attr_store.at("rtol")); } if (attrs.attr_store.count("atol")) { - atol = absl::get(attrs.attr_store.at("atol")); + atol = std::get(attrs.attr_store.at("atol")); } if (attrs.attr_store.count("equal_nan")) { - equal_nan = absl::get(attrs.attr_store.at("equal_nan")); + equal_nan = std::get(attrs.attr_store.at("equal_nan")); } framework::CINNCompute isclose_compute([=](lang::Args args, diff --git a/paddle/cinn/hlir/op/nn.cc b/paddle/cinn/hlir/op/nn.cc index 87316d79009e0f..322b45e213d00f 100644 --- a/paddle/cinn/hlir/op/nn.cc +++ b/paddle/cinn/hlir/op/nn.cc @@ -235,29 +235,29 @@ std::shared_ptr StrategyForConv2d( std::string conv_type = ""; bool use_onednn = false; if (attrs.attr_store.find("padding") != attrs.attr_store.end()) { - padding = absl::get>(attrs.attr_store.at("padding")); + padding = std::get>(attrs.attr_store.at("padding")); } if (attrs.attr_store.find("stride") != attrs.attr_store.end()) { - stride = absl::get>(attrs.attr_store.at("stride")); + stride = std::get>(attrs.attr_store.at("stride")); } if (attrs.attr_store.find("dilation") != attrs.attr_store.end()) { - dilation = absl::get>(attrs.attr_store.at("dilation")); + dilation = std::get>(attrs.attr_store.at("dilation")); } if (attrs.attr_store.find("data_format") != attrs.attr_store.end()) { - data_format = absl::get(attrs.attr_store.at("data_format")); + data_format = std::get(attrs.attr_store.at("data_format")); } if (attrs.attr_store.find("groups") != attrs.attr_store.end()) { - groups = absl::get(attrs.attr_store.at("groups")); + groups = std::get(attrs.attr_store.at("groups")); } if (attrs.attr_store.find("use_onednn") != attrs.attr_store.end()) { - use_onednn = absl::get(attrs.attr_store.at("use_onednn")); + use_onednn = std::get(attrs.attr_store.at("use_onednn")); } if (attrs.attr_store.find("key") != attrs.attr_store.end()) { - key = absl::get(attrs.attr_store.at("key")); + key = std::get(attrs.attr_store.at("key")); } // get conv type if (attrs.attr_store.find("conv_type") != attrs.attr_store.end()) { - conv_type = absl::get(attrs.attr_store.at("conv_type")); + conv_type = std::get(attrs.attr_store.at("conv_type")); } else { conv_type = "forward"; } @@ -451,19 +451,19 @@ std::shared_ptr StrategyForDepthwiseConv2d( std::string data_format = "NCHW"; std::string key; if (attrs.attr_store.find("padding") != attrs.attr_store.end()) { - padding = absl::get>(attrs.attr_store.at("padding")); + padding = std::get>(attrs.attr_store.at("padding")); } if (attrs.attr_store.find("stride") != attrs.attr_store.end()) { - stride = absl::get>(attrs.attr_store.at("stride")); + stride = std::get>(attrs.attr_store.at("stride")); } if (attrs.attr_store.find("data_format") != attrs.attr_store.end()) { - data_format = absl::get(attrs.attr_store.at("data_format")); + data_format = std::get(attrs.attr_store.at("data_format")); } if (attrs.attr_store.find("dilation") != attrs.attr_store.end()) { - dilation = absl::get>(attrs.attr_store.at("dilation")); + dilation = std::get>(attrs.attr_store.at("dilation")); } if (attrs.attr_store.find("key") != attrs.attr_store.end()) { - key = absl::get(attrs.attr_store.at("key")); + key = std::get(attrs.attr_store.at("key")); } framework::CINNCompute depthwise_conv2d_compute([=](lang::Args args, @@ -594,10 +594,10 @@ std::shared_ptr StrategyForBatchNorm( float epsilon = 0.00001f; std::vector input_layouts; if (attrs.attr_store.find("epsilon") != attrs.attr_store.end()) { - epsilon = absl::get(attrs.attr_store.at("epsilon")); + epsilon = std::get(attrs.attr_store.at("epsilon")); } if (attrs.attr_store.find("input_layouts") != attrs.attr_store.end()) { - input_layouts = absl::get>( + input_layouts = std::get>( attrs.attr_store.at("input_layouts")); } framework::CINNCompute batchnorm_compute([=](lang::Args args, @@ -741,19 +741,19 @@ std::shared_ptr StrategyForPool1d( std::string data_format = "NCW"; for (auto &iter : attrs.attr_store) { if (iter.first == "kernel_size") { - kernel_size = absl::get>(iter.second); + kernel_size = std::get>(iter.second); } else if (iter.first == "stride_size") { - stride_size = absl::get>(iter.second); + stride_size = std::get>(iter.second); } else if (iter.first == "padding_size") { - padding_size = absl::get>(iter.second); + padding_size = std::get>(iter.second); } else if (iter.first == "pool_type") { - pool_type = absl::get(iter.second); + pool_type = std::get(iter.second); } else if (iter.first == "ceil_mode") { - ceil_mode = absl::get(iter.second); + ceil_mode = std::get(iter.second); } else if (iter.first == "exclusive") { - exclusive = absl::get(iter.second); + exclusive = std::get(iter.second); } else if (iter.first == "data_format") { - data_format = absl::get(iter.second); + data_format = std::get(iter.second); } else { LOG(ERROR) << "Unsupported attr: " << iter.first << std::endl; } @@ -832,39 +832,39 @@ std::shared_ptr StrategyForPool2d( std::string data_format = "NCHW"; for (auto &iter : attrs.attr_store) { if (iter.first == "kernel_size") { - kernel_size = absl::get>(iter.second); + kernel_size = std::get>(iter.second); } else if (iter.first == "stride_size") { - stride_size = absl::get>(iter.second); + stride_size = std::get>(iter.second); } else if (iter.first == "padding_size") { - padding_size = absl::get>(iter.second); + padding_size = std::get>(iter.second); } else if (iter.first == "pool_type") { - pool_type = absl::get(iter.second); + pool_type = std::get(iter.second); } else if (iter.first == "ceil_mode") { - ceil_mode = absl::get(iter.second); + ceil_mode = std::get(iter.second); } else if (iter.first == "exclusive") { - exclusive = absl::get(iter.second); + exclusive = std::get(iter.second); } else if (iter.first == "data_format") { - data_format = absl::get(iter.second); + data_format = std::get(iter.second); } else if (iter.first == "global_pooling") { - global_pooling = absl::get(iter.second); + global_pooling = std::get(iter.second); } else if (iter.first == "adaptive") { - adaptive = absl::get(iter.second); + adaptive = std::get(iter.second); } } // It can be removed after fixing the global_pool2d problem if (attr_store.count("origin_kernel_size")) { kernel_size = - absl::get>(attr_store.at("origin_kernel_size")); + std::get>(attr_store.at("origin_kernel_size")); } if (attr_store.count("origin_padding_size")) { padding_size = - absl::get>(attr_store.at("origin_padding_size")); + std::get>(attr_store.at("origin_padding_size")); } if (attr_store.count("origin_global_pooling")) { - global_pooling = absl::get(attr_store.at("origin_global_pooling")); + global_pooling = std::get(attr_store.at("origin_global_pooling")); } if (attr_store.count("origin_adaptive")) { - adaptive = absl::get(attr_store.at("origin_adaptive")); + adaptive = std::get(attr_store.at("origin_adaptive")); } PADDLE_ENFORCE_EQ(kernel_size.empty(), @@ -1064,19 +1064,19 @@ std::shared_ptr StrategyForPool3d( std::string data_format = "NCDHW"; for (auto &iter : attrs.attr_store) { if (iter.first == "kernel_size") { - kernel_size = absl::get>(iter.second); + kernel_size = std::get>(iter.second); } else if (iter.first == "stride_size") { - stride_size = absl::get>(iter.second); + stride_size = std::get>(iter.second); } else if (iter.first == "padding_size") { - padding_size = absl::get>(iter.second); + padding_size = std::get>(iter.second); } else if (iter.first == "pool_type") { - pool_type = absl::get(iter.second); + pool_type = std::get(iter.second); } else if (iter.first == "ceil_mode") { - ceil_mode = absl::get(iter.second); + ceil_mode = std::get(iter.second); } else if (iter.first == "exclusive") { - exclusive = absl::get(iter.second); + exclusive = std::get(iter.second); } else if (iter.first == "data_format") { - data_format = absl::get(iter.second); + data_format = std::get(iter.second); } else { LOG(ERROR) << "Unsupported attr: " << iter.first << std::endl; } @@ -1147,10 +1147,10 @@ std::shared_ptr StrategyForSoftmax( int axis = -1; bool use_onednn = false; if (attrs.attr_store.count("axis")) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } if (attrs.attr_store.count("use_onednn")) { - use_onednn = absl::get(attrs.attr_store.at("use_onednn")); + use_onednn = std::get(attrs.attr_store.at("use_onednn")); } framework::CINNCompute softmax_compute([=](lang::Args args, lang::RetValue *ret) { @@ -1231,12 +1231,12 @@ std::shared_ptr StrategyForDropoutInfer( float dropout_prob = 0; std::string dropout_implementation = "downgrade_in_infer"; if (attrs.attr_store.find("dropout_prob") != attrs.attr_store.end()) { - dropout_prob = absl::get(attrs.attr_store.at("dropout_prob")); + dropout_prob = std::get(attrs.attr_store.at("dropout_prob")); } if (attrs.attr_store.find("dropout_implementation") != attrs.attr_store.end()) { dropout_implementation = - absl::get(attrs.attr_store.at("dropout_implementation")); + std::get(attrs.attr_store.at("dropout_implementation")); } framework::CINNCompute dropout_infer_compute([=](lang::Args args, diff --git a/paddle/cinn/hlir/op/op_util.h b/paddle/cinn/hlir/op/op_util.h index 91f63ac8aec17c..9a18342cfe8ada 100644 --- a/paddle/cinn/hlir/op/op_util.h +++ b/paddle/cinn/hlir/op/op_util.h @@ -36,11 +36,11 @@ T GetAttr(const cinn::utils::AttributeMap &attr_map, "Sorry, cannot found attribute %s", attr_name)); const auto &attr = attr_map.at(attr_name); PADDLE_ENFORCE_EQ( - absl::holds_alternative(attr), + std::holds_alternative(attr), true, ::common::errors::InvalidArgument( "The type of attribute %s isn't %s", attr_name, typeid(T).name())); - return absl::get(attr_map.at(attr_name)); + return std::get(attr_map.at(attr_name)); } template diff --git a/paddle/cinn/hlir/op/reduction.cc b/paddle/cinn/hlir/op/reduction.cc index a563f3ff2d92aa..edc540937636c2 100644 --- a/paddle/cinn/hlir/op/reduction.cc +++ b/paddle/cinn/hlir/op/reduction.cc @@ -60,15 +60,15 @@ std::shared_ptr StrategyForReduceSymbolic( auto ndim = inputs[0]->shape.size(); if (attrs.attr_store.count("axis")) { reduce_axes = [&] { - if (absl::holds_alternative>( + if (std::holds_alternative>( attrs.attr_store.at("axis"))) { const auto &dim_attr = - absl::get>(attrs.attr_store.at("axis")); + std::get>(attrs.attr_store.at("axis")); return std::vector(dim_attr.begin(), dim_attr.end()); - } else if (absl::holds_alternative>( + } else if (std::holds_alternative>( attrs.attr_store.at("axis"))) { - return absl::get>(attrs.attr_store.at("axis")); - } else if (absl::holds_alternative(attrs.attr_store.at("axis"))) { + return std::get>(attrs.attr_store.at("axis")); + } else if (std::holds_alternative(attrs.attr_store.at("axis"))) { return std::vector{}; } else { PADDLE_THROW(::common::errors::InvalidArgument( @@ -113,7 +113,7 @@ std::shared_ptr StrategyForReduceSymbolic( bool keepdim = false; if (attrs.attr_store.count("keepdim")) { - keepdim = absl::get(attrs.attr_store.at("keepdim")); + keepdim = std::get(attrs.attr_store.at("keepdim")); } framework::CINNCompute reduction_compute([=](lang::Args args, diff --git a/paddle/cinn/hlir/op/transform.cc b/paddle/cinn/hlir/op/transform.cc index 8943ec7a35b7e4..b402d76972dfc2 100644 --- a/paddle/cinn/hlir/op/transform.cc +++ b/paddle/cinn/hlir/op/transform.cc @@ -160,10 +160,10 @@ std::shared_ptr StrategyForSplit( int axis = 0; if (attrs.attr_store.find("num_or_sections") != attrs.attr_store.end()) { sections = - absl::get>(attrs.attr_store.at("num_or_sections")); + std::get>(attrs.attr_store.at("num_or_sections")); } if (attrs.attr_store.find("axis") != attrs.attr_store.end()) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } if (axis < 0) axis += static_cast(output_shapes[0].size()); @@ -268,7 +268,7 @@ std::shared_ptr StrategyForConcat( "The output_shapes of Concat is empty! Please check.\n")); int axis = 0; if (attrs.attr_store.count("axis")) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } std::vector input_tensors; @@ -336,7 +336,7 @@ std::shared_ptr StrategyForConcatSymbolic( "The output_shapes of Concat is empty! Please check.")); int axis = 0; if (attrs.attr_store.count("axis")) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } std::vector input_tensors; @@ -546,10 +546,10 @@ std::shared_ptr StrategyForLayoutTransform( std::string src_layout; std::string dst_layout; if (attrs.attr_store.find("src_layout") != attrs.attr_store.end()) { - src_layout = absl::get(attrs.attr_store.at("src_layout")); + src_layout = std::get(attrs.attr_store.at("src_layout")); } if (attrs.attr_store.find("dst_layout") != attrs.attr_store.end()) { - dst_layout = absl::get(attrs.attr_store.at("dst_layout")); + dst_layout = std::get(attrs.attr_store.at("dst_layout")); } PADDLE_ENFORCE_EQ(!src_layout.empty(), true, @@ -612,7 +612,7 @@ std::shared_ptr StrategyForReverse( // get axis[0, n_dim) std::vector axis; if (attrs.attr_store.find("axis") != attrs.attr_store.end()) { - axis = absl::get>(attrs.attr_store.at("axis")); + axis = std::get>(attrs.attr_store.at("axis")); for (auto &e : axis) { if (e >= static_cast(output_shapes[0].size()) || e < -1 * static_cast(output_shapes[0].size())) { @@ -685,7 +685,7 @@ std::shared_ptr StrategyForReverseSymbolic( // get axis[0, n_dim) std::vector axis; if (attrs.attr_store.find("axis") != attrs.attr_store.end()) { - axis = absl::get>(attrs.attr_store.at("axis")); + axis = std::get>(attrs.attr_store.at("axis")); for (auto &e : axis) { if (e >= static_cast(output_shapes[0].size()) || e < -1 * static_cast(output_shapes[0].size())) { @@ -756,7 +756,7 @@ std::shared_ptr StrategyForTranspose( std::vector axis; auto input_shape = inputs[0]->shape; if (attrs.attr_store.find("axis") != attrs.attr_store.end()) { - axis = absl::get>(attrs.attr_store.at("axis")); + axis = std::get>(attrs.attr_store.at("axis")); PADDLE_ENFORCE_EQ( axis.size(), output_shapes[0].size(), @@ -856,7 +856,7 @@ std::shared_ptr StrategyForTransposeSymbolic( std::vector axis; auto input_shape = inputs[0]->shape; if (attrs.attr_store.find("axis") != attrs.attr_store.end()) { - axis = absl::get>(attrs.attr_store.at("axis")); + axis = std::get>(attrs.attr_store.at("axis")); PADDLE_ENFORCE_LE(axis.size(), output_shapes[0].size(), ::common::errors::InvalidArgument( @@ -933,7 +933,7 @@ std::shared_ptr StrategyForGather( int axis = 0; if (attrs.attr_store.count("axis")) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } axis = axis < 0 ? axis + static_cast(inputs[0]->shape.size()) : axis; @@ -1025,7 +1025,7 @@ std::shared_ptr StrategyForGatherSymbolic( int axis = 0; if (attrs.attr_store.count("axis")) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } axis = axis < 0 ? axis + static_cast(inputs[0]->shape.size()) : axis; @@ -1080,7 +1080,7 @@ std::shared_ptr StrategyForScatterAssign( const Target &target) { int axis = 0; if (attrs.attr_store.find("axis") != attrs.attr_store.end()) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } framework::CINNCompute scatter_assign_compute([=](lang::Args args, @@ -1165,7 +1165,7 @@ std::shared_ptr StrategyForScatterAdd( const Target &target) { int axis = 0; if (attrs.attr_store.find("axis") != attrs.attr_store.end()) { - axis = absl::get(attrs.attr_store.at("axis")); + axis = std::get(attrs.attr_store.at("axis")); } framework::CINNCompute scatter_add_compute([=](lang::Args args, @@ -1248,20 +1248,20 @@ std::shared_ptr StrategyForSlice( const Target &target) { std::vector starts, ends, axes, strides, decrease_axis; if (attrs.attr_store.find("starts") != attrs.attr_store.end()) { - starts = absl::get>(attrs.attr_store.at("starts")); + starts = std::get>(attrs.attr_store.at("starts")); } if (attrs.attr_store.find("ends") != attrs.attr_store.end()) { - ends = absl::get>(attrs.attr_store.at("ends")); + ends = std::get>(attrs.attr_store.at("ends")); } if (attrs.attr_store.find("axes") != attrs.attr_store.end()) { - axes = absl::get>(attrs.attr_store.at("axes")); + axes = std::get>(attrs.attr_store.at("axes")); } if (attrs.attr_store.find("strides") != attrs.attr_store.end()) { - strides = absl::get>(attrs.attr_store.at("strides")); + strides = std::get>(attrs.attr_store.at("strides")); } if (attrs.attr_store.find("decrease_axis") != attrs.attr_store.end()) { decrease_axis = - absl::get>(attrs.attr_store.at("decrease_axis")); + std::get>(attrs.attr_store.at("decrease_axis")); } PADDLE_ENFORCE_EQ(!starts.empty(), @@ -1364,13 +1364,13 @@ std::shared_ptr StrategyForSlice( template std::vector GetIntVectorFromAttr(const utils::Attribute &attr) { - if (absl::holds_alternative>(attr)) { - const auto &attr_data = absl::get>(attr); + if (std::holds_alternative>(attr)) { + const auto &attr_data = std::get>(attr); return std::vector(attr_data.begin(), attr_data.end()); - } else if (absl::holds_alternative>(attr)) { - const auto &attr_data = absl::get>(attr); + } else if (std::holds_alternative>(attr)) { + const auto &attr_data = std::get>(attr); return std::vector(attr_data.begin(), attr_data.end()); - } else if (absl::holds_alternative(attr)) { + } else if (std::holds_alternative(attr)) { return std::vector{}; } else { PADDLE_THROW(::common::errors::InvalidArgument( @@ -1573,16 +1573,16 @@ std::shared_ptr StrategyForSliceAssign( std::vector starts, ends, axes, strides; if (attrs.attr_store.find("starts") != attrs.attr_store.end()) { - starts = absl::get>(attrs.attr_store.at("starts")); + starts = std::get>(attrs.attr_store.at("starts")); } if (attrs.attr_store.find("ends") != attrs.attr_store.end()) { - ends = absl::get>(attrs.attr_store.at("ends")); + ends = std::get>(attrs.attr_store.at("ends")); } if (attrs.attr_store.find("axes") != attrs.attr_store.end()) { - axes = absl::get>(attrs.attr_store.at("axes")); + axes = std::get>(attrs.attr_store.at("axes")); } if (attrs.attr_store.find("strides") != attrs.attr_store.end()) { - strides = absl::get>(attrs.attr_store.at("strides")); + strides = std::get>(attrs.attr_store.at("strides")); } PADDLE_ENFORCE_EQ( diff --git a/paddle/cinn/ir/ir.cc b/paddle/cinn/ir/ir.cc index 40fe094f060c79..5b52c0da61d356 100644 --- a/paddle/cinn/ir/ir.cc +++ b/paddle/cinn/ir/ir.cc @@ -124,7 +124,7 @@ IndexExpr Add::Make(IndexExpr a, IndexExpr b) { return IndexExpr(node); } -void BinaryNodeVerify(const Expr &a, const Expr &b, absl::string_view ir_name) { +void BinaryNodeVerify(const Expr &a, const Expr &b, std::string_view ir_name) { PADDLE_ENFORCE_EQ( a.defined(), true, diff --git a/paddle/cinn/ir/ir.h b/paddle/cinn/ir/ir.h index ec35aea4be0ea0..5136f8badb473e 100644 --- a/paddle/cinn/ir/ir.h +++ b/paddle/cinn/ir/ir.h @@ -17,12 +17,12 @@ */ #pragma once -#include #include #include #include #include #include +#include #include #include "paddle/common/enforce.h" @@ -49,7 +49,7 @@ using cinn::common::Object; using cinn::common::Shared; // NOTE attr_t only support POD, can not contain Expr or other IR nodes, or the // IRVisitor or IRCopy on PrimitiveNode will result in undefined behavior. -using attr_t = absl::variant; +using attr_t = std::variant; /** * Cast a node to another type, can't change the width. diff --git a/paddle/cinn/ir/ir_printer.cc b/paddle/cinn/ir/ir_printer.cc index aff074a2b852ae..b5ee36402e47d8 100644 --- a/paddle/cinn/ir/ir_printer.cc +++ b/paddle/cinn/ir/ir_printer.cc @@ -735,7 +735,7 @@ void IrPrinter::Visit(const ScheduleBlockRealize *x) { if (comma) str_ += ", "; str_ += kv.first; str_ += ":"; - absl::visit( + std::visit( [this](auto &&arg) { std::ostringstream ss; ss << arg; @@ -997,7 +997,7 @@ void IrPrinter::VisitStmt(const stmt::Schedule &stmt) { if (comma) str_ += ", "; str_ += kv.first; str_ += ":"; - absl::visit( + std::visit( [this](auto &&arg) { std::ostringstream ss; ss << arg; diff --git a/paddle/cinn/ir/op/ir_operators.cc b/paddle/cinn/ir/op/ir_operators.cc index 44b36f7e51bb95..2d3c0f43d8d16f 100644 --- a/paddle/cinn/ir/op/ir_operators.cc +++ b/paddle/cinn/ir/op/ir_operators.cc @@ -28,7 +28,7 @@ namespace cinn { namespace ir { -using attr_t = absl::variant; +using attr_t = std::variant; Expr operator<<(Expr a, Expr b) { PADDLE_ENFORCE_EQ(a.type().is_int() || a.type().is_uint(), diff --git a/paddle/cinn/ir/registry.h b/paddle/cinn/ir/registry.h index 1413ae6dbf70a3..769762a8f4e079 100644 --- a/paddle/cinn/ir/registry.h +++ b/paddle/cinn/ir/registry.h @@ -14,7 +14,7 @@ #pragma once -#include +#include #include #include diff --git a/paddle/cinn/ir/schedule/ir_schedule.cc b/paddle/cinn/ir/schedule/ir_schedule.cc index 80a87ec05df02b..68984a36d32108 100644 --- a/paddle/cinn/ir/schedule/ir_schedule.cc +++ b/paddle/cinn/ir/schedule/ir_schedule.cc @@ -628,14 +628,14 @@ void IRSchedule::Annotate(const Expr& block, const attr_t& value) { impl_->Annotate(block, key, value); -#define TRACE_ANNOTATE_ITEM(data_type, step_name) \ - if (absl::holds_alternative(value)) { \ - trace_.Append(ScheduleDesc::Step( \ - #step_name, \ - {{"block", std::vector({block})}}, \ - {{"key", key}, {"value", absl::get(value)}}, \ - {})); \ - return; \ +#define TRACE_ANNOTATE_ITEM(data_type, step_name) \ + if (std::holds_alternative(value)) { \ + trace_.Append(ScheduleDesc::Step( \ + #step_name, \ + {{"block", std::vector({block})}}, \ + {{"key", key}, {"value", std::get(value)}}, \ + {})); \ + return; \ } TRACE_ANNOTATE_ITEM(int, AnnotateIntAttr) TRACE_ANNOTATE_ITEM(bool, AnnotateBoolAttr) diff --git a/paddle/cinn/ir/schedule/schedule_desc.cc b/paddle/cinn/ir/schedule/schedule_desc.cc index 0003b8232d7ee6..dca8ed62f4e448 100644 --- a/paddle/cinn/ir/schedule/schedule_desc.cc +++ b/paddle/cinn/ir/schedule/schedule_desc.cc @@ -123,8 +123,8 @@ class PackedStepContext { template const AttrType& AttrAt(size_t idx) const { try { - return absl::get(attrs_.at(idx)); - } catch (absl::bad_variant_access& ex) { + return std::get(attrs_.at(idx)); + } catch (std::bad_variant_access& ex) { std::stringstream ss; ss << "Attribute cast error, idx:" << idx << ", get type:" << typeid(AttrType).name() @@ -585,13 +585,13 @@ void AttrVariantToProto(const utils::Attribute& attr, #define SET_DESC_SINGLE_ITEM(index, built_type, proto_type, proto_field) \ case index: \ attr_proto->set_dtype(proto::ScheduleDesc_Attr_DataType_##proto_type); \ - attr_proto->set_##proto_field(absl::get(attr)); \ + attr_proto->set_##proto_field(std::get(attr)); \ break; #define SET_DESC_REPEATED_ITEM(index, built_type, proto_type, proto_field) \ case index: { \ attr_proto->set_dtype(proto::ScheduleDesc_Attr_DataType_##proto_type); \ - const auto& values = absl::get(attr); \ + const auto& values = std::get(attr); \ attr_proto->mutable_##proto_field()->Reserve(values.size()); \ *attr_proto->mutable_##proto_field() = {values.begin(), values.end()}; \ break; \ diff --git a/paddle/cinn/ir/tensor.cc b/paddle/cinn/ir/tensor.cc index 2022472a7b8ab0..9e264c0e6dd45a 100644 --- a/paddle/cinn/ir/tensor.cc +++ b/paddle/cinn/ir/tensor.cc @@ -515,7 +515,7 @@ bool _Tensor_::is_tuple_get() const { operation->as()->is_tuple_get; } -bool _Tensor_::IsDependOnStatement(absl::string_view statement) { +bool _Tensor_::IsDependOnStatement(std::string_view statement) { if (!is_compute_node()) { return false; } diff --git a/paddle/cinn/ir/tensor.h b/paddle/cinn/ir/tensor.h index d7fa81977ccc7e..7ad5e74bd8e455 100644 --- a/paddle/cinn/ir/tensor.h +++ b/paddle/cinn/ir/tensor.h @@ -14,7 +14,6 @@ #pragma once -#include #include #include @@ -22,6 +21,7 @@ #include #include #include +#include #include #include @@ -207,7 +207,7 @@ class _Tensor_ : public ExprNode<_Tensor_> { * @param statement The name of a statement(equivalent to the id of tensor). * @return A boolean. */ - bool IsDependOnStatement(absl::string_view statement); + bool IsDependOnStatement(std::string_view statement); /** * Get the names of the tensors those this tensor depends on. diff --git a/paddle/cinn/lang/compute.h b/paddle/cinn/lang/compute.h index 258ed4acb7b644..9b03e95a4e77fb 100755 --- a/paddle/cinn/lang/compute.h +++ b/paddle/cinn/lang/compute.h @@ -13,7 +13,7 @@ // limitations under the License. #pragma once -#include +#include #include #include @@ -30,7 +30,7 @@ namespace cinn { namespace lang { using compute_handler_t = std::function &)>; -using attr_t = absl::variant; +using attr_t = std::variant; //! Compute methods for one to five Vars as arguments. // @{ diff --git a/paddle/cinn/optim/unroll_loops.cc b/paddle/cinn/optim/unroll_loops.cc index 276a6339249918..be8a9a4474d604 100644 --- a/paddle/cinn/optim/unroll_loops.cc +++ b/paddle/cinn/optim/unroll_loops.cc @@ -36,7 +36,7 @@ struct UnrollMutator : public ir::IRMutator { void Visit(const ir::ScheduleBlock* op, Expr* expr) override { auto attr_it = op->attrs.find(ir::attr::auto_unroll_max_step); if (attr_it != op->attrs.end()) { - const int* attr_v = absl::get_if(&attr_it->second); + const int* attr_v = std::get_if(&attr_it->second); if (attr_v) { int value = *attr_v; std::swap(auto_max_step_, value); diff --git a/paddle/cinn/pybind/backends.cc b/paddle/cinn/pybind/backends.cc index 0317bc353dacf5..32333b6a02d2ac 100644 --- a/paddle/cinn/pybind/backends.cc +++ b/paddle/cinn/pybind/backends.cc @@ -40,7 +40,7 @@ void BindExecutionEngine(py::module *m) { .def_readwrite("opt_level", &ExecutionOptions::opt_level) .def_readwrite("enable_debug_info", &ExecutionOptions::enable_debug_info); - auto lookup = [](ExecutionEngine &self, absl::string_view name) { + auto lookup = [](ExecutionEngine &self, std::string_view name) { auto *function_ptr = reinterpret_cast(self.Lookup(name)); auto function_wrapper = @@ -64,7 +64,7 @@ void BindExecutionEngine(py::module *m) { .def("link", &ExecutionEngine::Link, py::arg("module")); { - auto lookup = [](Compiler &self, absl::string_view name) { + auto lookup = [](Compiler &self, std::string_view name) { auto *function_ptr = reinterpret_cast(self.Lookup(name)); auto function_wrapper = diff --git a/paddle/cinn/pybind/bind.h b/paddle/cinn/pybind/bind.h index e0e90cf4a404c8..706d435d27c70c 100644 --- a/paddle/cinn/pybind/bind.h +++ b/paddle/cinn/pybind/bind.h @@ -14,11 +14,11 @@ #pragma once -#include -#include #include #include #include +#include +#include #include "paddle/utils/flat_hash_map.h" namespace pybind11 { @@ -34,8 +34,7 @@ struct type_caster> Value> {}; template <> -struct type_caster : string_caster { -}; +struct type_caster : string_caster {}; } // namespace detail } // namespace pybind11 diff --git a/paddle/cinn/pybind/bind_utils.h b/paddle/cinn/pybind/bind_utils.h index 7d7764917a873c..80ed020bcd7f41 100644 --- a/paddle/cinn/pybind/bind_utils.h +++ b/paddle/cinn/pybind/bind_utils.h @@ -36,35 +36,35 @@ using cinn::common::Type; using ir::Expr; using ir::ExprNode; -using ExprOp = absl::variant; -using BinaryOp = absl::variant<>; -using UnaryOp = absl::variant<>; +using ExprOp = std::variant; +using BinaryOp = std::variant<>; +using UnaryOp = std::variant<>; // hold CINNValue using ValueVar = - absl::variant; + std::variant; inline ValueVar ConvertToVar(const CINNValue &value) { auto type_code = value.type_code(); @@ -87,7 +87,7 @@ inline ValueVar ConvertToVar(const CINNValue &value) { } template -auto DefineShared(py::module *m, absl::string_view obj_name) { +auto DefineShared(py::module *m, std::string_view obj_name) { std::string name = "Shared" + std::string(obj_name); py::class_> shared(*m, name.c_str()); @@ -98,7 +98,7 @@ auto DefineShared(py::module *m, absl::string_view obj_name) { } template -void DefineExprNode(py::module *m, absl::string_view node_name) { +void DefineExprNode(py::module *m, std::string_view node_name) { using ExprNodeT = ExprNode; std::string prefix{"ExprNode"}; @@ -122,7 +122,7 @@ void DefineExprNode(py::module *m, absl::string_view node_name) { } template -void DefineBinaryOpNode(py::module *m, absl::string_view node_name) { +void DefineBinaryOpNode(py::module *m, std::string_view node_name) { DefineExprNode(m, node_name); std::string prefix{"BinaryOpNode"}; std::string name = prefix + std::string(node_name); @@ -151,7 +151,7 @@ void DefineBinaryOpNode(py::module *m, absl::string_view node_name) { } template -void DefineUnaryOpNode(py::module *m, absl::string_view node_name) { +void DefineUnaryOpNode(py::module *m, std::string_view node_name) { using UnaryOpNodeT = ir::UnaryOpNode; DefineExprNode(m, node_name); diff --git a/paddle/cinn/pybind/common.cc b/paddle/cinn/pybind/common.cc index 072898a2c9f3dd..b5fb397daacff4 100644 --- a/paddle/cinn/pybind/common.cc +++ b/paddle/cinn/pybind/common.cc @@ -222,7 +222,7 @@ void BindType(py::module *m) { py::arg("type"), py::arg("val")); - m->def("type_of", [](absl::string_view dtype) { + m->def("type_of", [](std::string_view dtype) { return cinn::common::Str2Type(dtype.data()); }); } @@ -359,7 +359,7 @@ void BindCinnValue(py::module *m) { auto visitor = [&](auto x, auto y) { \ return binary_op_visitor(self, x, y, __op##_fn); \ }; \ - absl::visit(visitor, ConvertToVar(self), ConvertToVar(other)); \ + std::visit(visitor, ConvertToVar(self), ConvertToVar(other)); \ return self; \ }) diff --git a/paddle/cinn/pybind/lang.cc b/paddle/cinn/pybind/lang.cc index 76bc54f0ba34ac..6f260b0b443b80 100644 --- a/paddle/cinn/pybind/lang.cc +++ b/paddle/cinn/pybind/lang.cc @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include #include +#include #include @@ -102,7 +102,7 @@ void BindCompute(py::module *m) { const std::string &, const std::vector &, const std::map> &>( + std::variant> &>( &lang::CallExtern)); } @@ -169,13 +169,13 @@ class PlaceholderWrapper { DEFINE_PLACEHOLDER(float32, float); \ DEFINE_PLACEHOLDER(float64, double) - PlaceholderWrapper(absl::string_view dtype, + PlaceholderWrapper(std::string_view dtype, const std::string &name, const std::vector &shape) { INIT_PLACEHOLDER; } - PlaceholderWrapper(absl::string_view dtype, + PlaceholderWrapper(std::string_view dtype, const std::string &name, const std::vector &shape) { INIT_PLACEHOLDER; @@ -184,39 +184,39 @@ class PlaceholderWrapper { #undef DEFINE_PLACEHOLDER ir::Type type() const { - return absl::visit([](auto &v) { return v->type(); }, placeholder_); + return std::visit([](auto &v) { return v->type(); }, placeholder_); } ir::Tensor tensor() const { - return absl::visit([](auto &v) { return v->tensor(); }, placeholder_); + return std::visit([](auto &v) { return v->tensor(); }, placeholder_); } ir::Expr operator()(ir::Expr a) const { - return absl::visit([&](auto &v) { return (*v)(a); }, placeholder_); + return std::visit([&](auto &v) { return (*v)(a); }, placeholder_); } ir::Expr operator()(ir::Expr a, ir::Expr b) const { - return absl::visit([&](auto &v) { return (*v)(a, b); }, placeholder_); + return std::visit([&](auto &v) { return (*v)(a, b); }, placeholder_); } ir::Expr operator()(ir::Expr a, ir::Expr b, ir::Expr c) const { - return absl::visit([&](auto &v) { return (*v)(a, b, c); }, placeholder_); + return std::visit([&](auto &v) { return (*v)(a, b, c); }, placeholder_); } ir::Expr operator()(const std::vector &indices) const { - return absl::visit([&](auto &v) { return (*v)(indices); }, placeholder_); + return std::visit([&](auto &v) { return (*v)(indices); }, placeholder_); } operator ir::Tensor() { - return absl::visit([&](auto &v) { return ir::Tensor(*v); }, placeholder_); + return std::visit([&](auto &v) { return ir::Tensor(*v); }, placeholder_); } operator ir::Expr() { - return absl::visit([&](auto &v) { return ir::Expr(*v); }, placeholder_); + return std::visit([&](auto &v) { return ir::Expr(*v); }, placeholder_); } private: template - using PlaceholderVariant = absl::variant>...>; + using PlaceholderVariant = std::variant>...>; PlaceholderVariant placeholder_; }; @@ -224,10 +224,10 @@ class PlaceholderWrapper { void BindPlaceholder(py::module *m) { py::class_ placeholder(*m, "Placeholder"); placeholder - .def(py::init &>()) - .def(py::init &>()) .def("type", &PlaceholderWrapper::type) diff --git a/paddle/cinn/utils/functional.h b/paddle/cinn/utils/functional.h index 3ae8c4f03935ea..6f0adcf7402a38 100644 --- a/paddle/cinn/utils/functional.h +++ b/paddle/cinn/utils/functional.h @@ -14,7 +14,7 @@ #pragma once -#include +#include #include #include @@ -79,14 +79,14 @@ struct IsString : std::integral_constant< std::is_same>::value> {}; template -auto Flatten(const absl::optional> &c) +auto Flatten(const std::optional> &c) -> std::enable_if_t::value || IsString::value, std::vector> { return c ? std::vector{c->get()} : std::vector{}; } template >> &c) +auto Flatten(const std::optional>> &c) -> std::enable_if_t::value && !IsStringget())>::value, std::vector> { @@ -98,8 +98,8 @@ template ::value, std::decay_t().begin())>>> -auto Flatten(const absl::optional> &c) { - absl::optional> val; +auto Flatten(const std::optional> &c) { + std::optional> val; if (c && !c->get().empty()) { val = *(c->get().begin()); } @@ -120,7 +120,7 @@ auto Flatten(const absl::optional> &c) { template auto Flatten(const T &v) { - absl::optional> w = v; + std::optional> w = v; return Flatten(w); } diff --git a/paddle/cinn/utils/string.cc b/paddle/cinn/utils/string.cc index 78401569b3f3e7..9a405cac342f4e 100644 --- a/paddle/cinn/utils/string.cc +++ b/paddle/cinn/utils/string.cc @@ -139,37 +139,36 @@ std::string TransValidVarName(std::string name) { std::string Attribute2String(const utils::Attribute &attr) { std::stringstream ss; - if (absl::holds_alternative(attr)) { - ss << (absl::get(attr) ? "True" : "False"); - } else if (absl::holds_alternative(attr)) { + if (std::holds_alternative(attr)) { + ss << (std::get(attr) ? "True" : "False"); + } else if (std::holds_alternative(attr)) { ss << std::setprecision(std::numeric_limits::max_digits10) - << std::showpoint << absl::get(attr); - } else if (absl::holds_alternative(attr)) { + << std::showpoint << std::get(attr); + } else if (std::holds_alternative(attr)) { ss << std::setprecision(std::numeric_limits::max_digits10) - << std::showpoint << absl::get(attr); - } else if (absl::holds_alternative(attr)) { - ss << absl::get(attr); - } else if (absl::holds_alternative(attr)) { - ss << absl::get(attr); - } else if (absl::holds_alternative(attr)) { - ss << "\"" << absl::get(attr) << "\""; - } else if (absl::holds_alternative>(attr)) { - ss << "[" + cinn::utils::Join(absl::get>(attr), ", ") + + << std::showpoint << std::get(attr); + } else if (std::holds_alternative(attr)) { + ss << std::get(attr); + } else if (std::holds_alternative(attr)) { + ss << std::get(attr); + } else if (std::holds_alternative(attr)) { + ss << "\"" << std::get(attr) << "\""; + } else if (std::holds_alternative>(attr)) { + ss << "[" + cinn::utils::Join(std::get>(attr), ", ") + "]"; - } else if (absl::holds_alternative>(attr)) { - ss << "[" + cinn::utils::Join(absl::get>(attr), ", ") + + } else if (std::holds_alternative>(attr)) { + ss << "[" + cinn::utils::Join(std::get>(attr), ", ") + "]"; + } else if (std::holds_alternative>(attr)) { + ss << "[" + cinn::utils::Join(std::get>(attr), ", ") + "]"; - } else if (absl::holds_alternative>(attr)) { - ss << "[" + cinn::utils::Join(absl::get>(attr), ", ") + + } else if (std::holds_alternative>(attr)) { + ss << "[" + cinn::utils::Join(std::get>(attr), ", ") + "]"; - } else if (absl::holds_alternative>(attr)) { - ss << "[" + cinn::utils::Join(absl::get>(attr), ", ") + + } else if (std::holds_alternative>(attr)) { + ss << "[" + cinn::utils::Join(std::get>(attr), ", ") + "]"; - } else if (absl::holds_alternative>(attr)) { - ss << "[" + cinn::utils::Join(absl::get>(attr), ", ") + - "]"; - } else if (absl::holds_alternative>(attr)) { - auto attrs = absl::get>(attr); + } else if (std::holds_alternative>(attr)) { + auto attrs = std::get>(attr); for (auto &str : attrs) { str = "\"" + str + "\""; } diff --git a/paddle/cinn/utils/type_defs.h b/paddle/cinn/utils/type_defs.h index fffd15d5c7abf3..2fd97ca4774ea7 100644 --- a/paddle/cinn/utils/type_defs.h +++ b/paddle/cinn/utils/type_defs.h @@ -13,8 +13,8 @@ // limitations under the License. #pragma once -#include #include +#include #include #include "paddle/cinn/hlir/dialect/operator/ir/symbol_bindings.h" #include "paddle/pir/include/dialect/shape/utils/dim_expr.h" @@ -24,21 +24,21 @@ namespace cinn { namespace utils { // attribute type defs -using Attribute = absl::variant, - std::vector, - std::vector, - std::vector, - int64_t, - double, - std::vector, - std::vector, - // the followings are only for generate shape op - std::vector, - cinn::dialect::SymbolBindings>; +using Attribute = std::variant, + std::vector, + std::vector, + std::vector, + int64_t, + double, + std::vector, + std::vector, + // the followings are only for generate shape op + std::vector, + cinn::dialect::SymbolBindings>; using AttributeMap = paddle::flat_hash_map; // shape type defs