From a0c20af97a9f7355c6163831fa60f6c3f8c30861 Mon Sep 17 00:00:00 2001 From: David Olsen Date: Tue, 16 Jan 2024 11:42:45 -0800 Subject: [PATCH] [CIR] Vector types, part 2 (#387) This is part 2 of implementing vector types and vector operations in ClangIR, issue #284. Create new operation `cir.vec.insert`, which changes one element of an existing vector object and returns the modified vector object. The input and output vectors are prvalues; this operation does not touch memory. The assembly format and the order of the arguments match that of llvm.insertelement in the LLVM dialect, since the operations have identical semantics. Implement vector element lvalues in class `LValue`, adding member functions `getVectorAddress()`, `getVectorPointer()`, `getVectorIdx()`, and `MakeVectorElt(...)`. The assembly format for operation `cir.vec.extract` was changed to match that of llvm.extractelement in the LLVM dialect, since the operations have identical semantics. These two features, `cir.vec.insert` and vector element lvalues, are used to implement `v[n] = e`, where `v` is a vector. This is a little tricky, because `v[n]` isn't really an lvalue, as its address cannot be taken. The only place it can be used as an lvalue is on the left-hand side of an assignment. Implement unary operators on vector objects (except for logical not on a vector mask, which will be covered in a future commit for boolean vectors). The code for lowering cir.unary for all types, in `CIRUnaryOpLowering::matchAndRewrite`, was largely rewritten. Support for unary `+` on non-vector pointer types was added. (It was already supported and tested in AST->ClangIR CodeGen, but was missing from ClangIR->LLVM Dialect lowering.) Add tests for all binary vector arithmetic operations other than relational operators and shift operators. There were all working after the previous vector types commit, but only addition had beet tested at the time. Co-authored-by: Bruno Cardoso Lopes --- clang/include/clang/CIR/Dialect/IR/CIROps.td | 33 ++- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 37 +++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenValue.h | 26 +++ .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 134 ++++++++---- clang/test/CIR/CodeGen/vectype.cpp | 121 +++++++---- clang/test/CIR/IR/invalid.cir | 55 ++++- clang/test/CIR/Lowering/unary-plus-minus.cir | 3 +- clang/test/CIR/Lowering/vectype.cpp | 201 ++++++++++++++++++ 9 files changed, 519 insertions(+), 93 deletions(-) create mode 100644 clang/test/CIR/Lowering/vectype.cpp diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td b/clang/include/clang/CIR/Dialect/IR/CIROps.td index 6fab7c65d010..49c2bce154a8 100644 --- a/clang/include/clang/CIR/Dialect/IR/CIROps.td +++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td @@ -1672,14 +1672,39 @@ def GetMemberOp : CIR_Op<"get_member"> { let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// VecInsertOp +//===----------------------------------------------------------------------===// + +def VecInsertOp : CIR_Op<"vec.insert", [Pure, + TypesMatchWith<"argument type matches vector element type", "vec", "value", + "$_self.cast().getEltType()">, + AllTypesMatch<["result", "vec"]>]> { + + let summary = "Insert one element into a vector object"; + let description = [{ + The `cir.vec.insert` operation replaces the element of the given vector at + the given index with the given value. The new vector with the inserted + element is returned. + }]; + + let arguments = (ins CIR_VectorType:$vec, AnyType:$value, CIR_IntType:$index); + let results = (outs CIR_VectorType:$result); + + let assemblyFormat = [{ + $value `,` $vec `[` $index `:` type($index) `]` attr-dict `:` type($vec) + }]; + + let hasVerifier = 0; +} + //===----------------------------------------------------------------------===// // VecExtractOp //===----------------------------------------------------------------------===// def VecExtractOp : CIR_Op<"vec.extract", [Pure, - TypesMatchWith<"type of 'result' matches element type of 'vec'", - "vec", "result", - "$_self.cast().getEltType()">]> { + TypesMatchWith<"type of 'result' matches element type of 'vec'", "vec", + "result", "$_self.cast().getEltType()">]> { let summary = "Extract one element from a vector object"; let description = [{ @@ -1691,7 +1716,7 @@ def VecExtractOp : CIR_Op<"vec.extract", [Pure, let results = (outs CIR_AnyType:$result); let assemblyFormat = [{ - $vec `[` $index `:` type($index) `]` type($vec) `->` type($result) attr-dict + $vec `[` $index `:` type($index) `]` attr-dict `:` type($vec) }]; let hasVerifier = 0; diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 47b11b9fa39d..c8c51c553360 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -545,16 +545,18 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value Value, Address Addr, bool Volatile, QualType Ty, LValueBaseInfo BaseInfo, bool isInit, bool isNontemporal) { - if (!CGM.getCodeGenOpts().PreserveVec3Type && Ty->isVectorType() && - Ty->castAs()->getNumElements() == 3) - llvm_unreachable("NYI: Special treatment of 3-element vectors"); - Value = buildToMemory(Value, Ty); if (Ty->isAtomicType()) { llvm_unreachable("NYI"); } + if (const auto *ClangVecTy = Ty->getAs()) { + if (!CGM.getCodeGenOpts().PreserveVec3Type && + ClangVecTy->getNumElements() == 3) + llvm_unreachable("NYI: Special treatment of 3-element vector store"); + } + // Update the alloca with more info on initialization. assert(Addr.getPointer() && "expected pointer to exist"); auto SrcAlloca = @@ -622,6 +624,18 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, } void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { + if (!Dst.isSimple()) { + if (Dst.isVectorElt()) { + // Read/modify/write the vector, inserting the new element + mlir::Location loc = Dst.getVectorPointer().getLoc(); + mlir::Value Vector = builder.createLoad(loc, Dst.getVectorAddress()); + Vector = builder.create( + loc, Vector, Src.getScalarVal(), Dst.getVectorIdx()); + builder.createStore(loc, Vector, Dst.getVectorAddress()); + return; + } + llvm_unreachable("NYI: non-simple store through lvalue"); + } assert(Dst.isSimple() && "only implemented simple"); // There's special magic for assigning into an ARC-qualified l-value. @@ -1387,7 +1401,10 @@ LValue CIRGenFunction::buildArraySubscriptExpr(const ArraySubscriptExpr *E, // with this subscript. if (E->getBase()->getType()->isVectorType() && !isa(E->getBase())) { - llvm_unreachable("vector subscript is NYI"); + LValue LHS = buildLValue(E->getBase()); + auto Index = EmitIdxAfterBase(/*Promote=*/false); + return LValue::MakeVectorElt(LHS.getAddress(), Index, + E->getBase()->getType(), LHS.getBaseInfo()); } // All the other cases basically behave like simple offsetting. @@ -2371,16 +2388,18 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, QualType Ty, mlir::Location Loc, LValueBaseInfo BaseInfo, bool isNontemporal) { - if (!CGM.getCodeGenOpts().PreserveVec3Type && Ty->isVectorType() && - Ty->castAs()->getNumElements() == 3) - llvm_unreachable("NYI: Special treatment of 3-element vectors"); - // Atomic operations have to be done on integral types LValue AtomicLValue = LValue::makeAddr(Addr, Ty, getContext(), BaseInfo); if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) { llvm_unreachable("NYI"); } + if (const auto *ClangVecTy = Ty->getAs()) { + if (!CGM.getCodeGenOpts().PreserveVec3Type && + ClangVecTy->getNumElements() == 3) + llvm_unreachable("NYI: Special treatment of 3-element vector load"); + } + mlir::cir::LoadOp Load = builder.create( Loc, Addr.getElementType(), Addr.getPointer()); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 880e47f6efb4..68e2ca82c2ff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1646,7 +1646,7 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { if (dstTy.isa()) return boolVal; - llvm_unreachable("destination type for negation unary operator is NYI"); + llvm_unreachable("destination type for logical-not unary operator is NYI"); } // Conversion from bool, integral, or floating-point to integral or diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index c6edeb4d4fe4..86b6f5443856 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -207,6 +207,7 @@ class LValue { unsigned Alignment; mlir::Value V; mlir::Type ElementType; + mlir::Value VectorIdx; // Index for vector subscript LValueBaseInfo BaseInfo; const CIRGenBitFieldInfo *BitFieldInfo{0}; @@ -301,6 +302,31 @@ class LValue { const clang::Qualifiers &getQuals() const { return Quals; } clang::Qualifiers &getQuals() { return Quals; } + // vector element lvalue + Address getVectorAddress() const { + return Address(getVectorPointer(), ElementType, getAlignment()); + } + mlir::Value getVectorPointer() const { + assert(isVectorElt()); + return V; + } + mlir::Value getVectorIdx() const { + assert(isVectorElt()); + return VectorIdx; + } + + static LValue MakeVectorElt(Address vecAddress, mlir::Value Index, + clang::QualType type, LValueBaseInfo BaseInfo) { + LValue R; + R.LVType = VectorElt; + R.V = vecAddress.getPointer(); + R.ElementType = vecAddress.getElementType(); + R.VectorIdx = Index; + R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(), + BaseInfo); + return R; + } + // bitfield lvalue Address getBitFieldAddress() const { return Address(getBitFieldPointer(), ElementType, getAlignment()); diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 662a12ebc7a6..2f4ac580f8c3 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -1105,12 +1105,12 @@ class CIRVectorCreateLowering // Start with an 'undef' value for the vector. Then 'insertelement' for // each of the vector elements. auto vecTy = op.getType().dyn_cast(); - assert(vecTy && "result type of cir.vec op is not VectorType"); + assert(vecTy && "result type of cir.vec.create op is not VectorType"); auto llvmTy = typeConverter->convertType(vecTy); auto loc = op.getLoc(); mlir::Value result = rewriter.create(loc, llvmTy); assert(vecTy.getSize() == op.getElements().size() && - "cir.vec operands count doesn't match vector type elements count"); + "cir.vec.create op count doesn't match vector type elements count"); for (uint64_t i = 0; i < vecTy.getSize(); ++i) { mlir::Value indexValue = rewriter.create( loc, rewriter.getI64Type(), i); @@ -1122,6 +1122,20 @@ class CIRVectorCreateLowering } }; +class CIRVectorInsertLowering + : public mlir::OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(mlir::cir::VecInsertOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.getVec(), adaptor.getValue(), adaptor.getIndex()); + return mlir::success(); + } +}; + class CIRVectorExtractLowering : public mlir::OpConversionPattern { public: @@ -1536,24 +1550,33 @@ class CIRUnaryOpLowering mlir::LogicalResult matchAndRewrite(mlir::cir::UnaryOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - mlir::Type type = op.getInput().getType(); - - auto llvmInType = adaptor.getInput().getType(); - auto llvmType = getTypeConverter()->convertType(op.getType()); + assert(op.getType() == op.getInput().getType() && + "Unary operation's operand type and result type are different"); + mlir::Type type = op.getType(); + mlir::Type elementType = type; + bool IsVector = false; + if (auto VecType = type.dyn_cast()) { + IsVector = true; + elementType = VecType.getEltType(); + } + auto llvmType = getTypeConverter()->convertType(type); + auto loc = op.getLoc(); - // Integer unary operations. - if (type.isa()) { + // Integer unary operations: + - ~ ++ -- + if (elementType.isa()) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Inc: { + assert(!IsVector && "++ not allowed on vector types"); auto One = rewriter.create( - op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); rewriter.replaceOpWithNewOp(op, llvmType, adaptor.getInput(), One); return mlir::success(); } case mlir::cir::UnaryOpKind::Dec: { + assert(!IsVector && "-- not allowed on vector types"); auto One = rewriter.create( - op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 1)); + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1)); rewriter.replaceOpWithNewOp(op, llvmType, adaptor.getInput(), One); return mlir::success(); @@ -1563,15 +1586,39 @@ class CIRUnaryOpLowering return mlir::success(); } case mlir::cir::UnaryOpKind::Minus: { - auto Zero = rewriter.create( - op.getLoc(), llvmInType, mlir::IntegerAttr::get(llvmInType, 0)); + mlir::Value Zero; + if (IsVector) + Zero = rewriter.create(loc, llvmType); + else + Zero = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, 0)); rewriter.replaceOpWithNewOp(op, llvmType, Zero, adaptor.getInput()); return mlir::success(); } case mlir::cir::UnaryOpKind::Not: { - auto MinusOne = rewriter.create( - op.getLoc(), llvmType, mlir::IntegerAttr::get(llvmType, -1)); + // bit-wise compliment operator, implemented as an XOR with -1. + mlir::Value MinusOne; + if (IsVector) { + // Creating a vector object with all -1 values is easier said than + // done. It requires a series of insertelement ops. + mlir::Type llvmElementType = + getTypeConverter()->convertType(elementType); + auto MinusOneInt = rewriter.create( + loc, llvmElementType, + mlir::IntegerAttr::get(llvmElementType, -1)); + MinusOne = rewriter.create(loc, llvmType); + auto NumElements = type.dyn_cast().getSize(); + for (uint64_t i = 0; i < NumElements; ++i) { + mlir::Value indexValue = rewriter.create( + loc, rewriter.getI64Type(), i); + MinusOne = rewriter.create( + loc, MinusOne, MinusOneInt, indexValue); + } + } else { + MinusOne = rewriter.create( + loc, llvmType, mlir::IntegerAttr::get(llvmType, -1)); + } rewriter.replaceOpWithNewOp(op, llvmType, MinusOne, adaptor.getInput()); return mlir::success(); @@ -1579,21 +1626,23 @@ class CIRUnaryOpLowering } } - // Floating point unary operations. - if (type.isa()) { + // Floating point unary operations: + - ++ -- + if (elementType.isa()) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Inc: { - auto oneAttr = rewriter.getFloatAttr(llvmInType, 1.0); - auto oneConst = rewriter.create( - op.getLoc(), llvmInType, oneAttr); + assert(!IsVector && "++ not allowed on vector types"); + auto oneAttr = rewriter.getFloatAttr(llvmType, 1.0); + auto oneConst = + rewriter.create(loc, llvmType, oneAttr); rewriter.replaceOpWithNewOp(op, llvmType, oneConst, adaptor.getInput()); return mlir::success(); } case mlir::cir::UnaryOpKind::Dec: { - auto negOneAttr = rewriter.getFloatAttr(llvmInType, -1.0); - auto negOneConst = rewriter.create( - op.getLoc(), llvmInType, negOneAttr); + assert(!IsVector && "-- not allowed on vector types"); + auto negOneAttr = rewriter.getFloatAttr(llvmType, -1.0); + auto negOneConst = + rewriter.create(loc, llvmType, negOneAttr); rewriter.replaceOpWithNewOp( op, llvmType, negOneConst, adaptor.getInput()); return mlir::success(); @@ -1602,35 +1651,48 @@ class CIRUnaryOpLowering rewriter.replaceOp(op, adaptor.getInput()); return mlir::success(); case mlir::cir::UnaryOpKind::Minus: { - auto negOneAttr = mlir::FloatAttr::get(llvmInType, -1.0); - auto negOneConst = rewriter.create( - op.getLoc(), llvmInType, negOneAttr); - rewriter.replaceOpWithNewOp( - op, llvmType, negOneConst, adaptor.getInput()); + rewriter.replaceOpWithNewOp(op, llvmType, + adaptor.getInput()); return mlir::success(); } default: - op.emitError() << "Floating point unary lowering ot implemented"; - return mlir::failure(); + return op.emitError() + << "Unknown floating-point unary operation during CIR lowering"; } } - // Boolean unary operations. - if (type.isa()) { + // Boolean unary operations: ! only. (For all others, the operand has + // already been promoted to int.) + if (elementType.isa()) { switch (op.getKind()) { case mlir::cir::UnaryOpKind::Not: + assert(!IsVector && "NYI: op! on vector mask"); rewriter.replaceOpWithNewOp( op, llvmType, adaptor.getInput(), rewriter.create( - op.getLoc(), llvmType, mlir::IntegerAttr::get(llvmType, 1))); + loc, llvmType, mlir::IntegerAttr::get(llvmType, 1))); + return mlir::success(); + default: + return op.emitError() + << "Unknown boolean unary operation during CIR lowering"; + } + } + + // Pointer unary operations: + only. (++ and -- of pointers are implemented + // with cir.ptr_stride, not cir.unary.) + if (elementType.isa()) { + switch (op.getKind()) { + case mlir::cir::UnaryOpKind::Plus: + rewriter.replaceOp(op, adaptor.getInput()); return mlir::success(); default: - op.emitError() << "Unary operator not implemented for bool type"; + op.emitError() << "Unknown pointer unary operation during CIR lowering"; return mlir::failure(); } } - return op.emitError() << "Unary operation has unsupported type: " << type; + return op.emitError() << "Unary operation has unsupported type: " + << elementType; } }; @@ -2061,8 +2123,8 @@ void populateCIRToLLVMConversionPatterns(mlir::RewritePatternSet &patterns, CIRTernaryOpLowering, CIRGetMemberOpLowering, CIRSwitchOpLowering, CIRPtrDiffOpLowering, CIRCopyOpLowering, CIRMemCpyOpLowering, CIRFAbsOpLowering, CIRVTableAddrPointOpLowering, CIRVectorCreateLowering, - CIRVectorExtractLowering, CIRStackSaveLowering, CIRStackRestoreLowering>( - converter, patterns.getContext()); + CIRVectorInsertLowering, CIRVectorExtractLowering, CIRStackSaveLowering, + CIRStackRestoreLowering>(converter, patterns.getContext()); } namespace { diff --git a/clang/test/CIR/CodeGen/vectype.cpp b/clang/test/CIR/CodeGen/vectype.cpp index 80f6bf39258c..9b4fd6254b7d 100644 --- a/clang/test/CIR/CodeGen/vectype.cpp +++ b/clang/test/CIR/CodeGen/vectype.cpp @@ -1,40 +1,89 @@ // RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o - | FileCheck %s -typedef int int4 __attribute__((vector_size(16))); -int test_vector_basic(int x, int y, int z) { - int4 a = { 1, 2, 3, 4 }; - int4 b = { x, y, z, x + y + z }; - int4 c = a + b; - return c[1]; +typedef int vi4 __attribute__((vector_size(16))); +typedef double vd2 __attribute__((vector_size(16))); + +void vector_int_test(int x) { + + // Vector constant. Not yet implemented. Expected results will change from + // cir.vec.create to cir.const. + vi4 a = { 1, 2, 3, 4 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : + + // Non-const vector initialization. + vi4 b = { x, 5, 6, x + 1 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}} : !s32i, !s32i, !s32i, !s32i) : + + // Extract element + int c = a[x]; + // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : + + // Insert element + a[x] = x; + // CHECK: %[[#LOADEDVI:]] = cir.load %[[#STORAGEVI:]] : cir.ptr >, !cir.vector + // CHECK: %[[#UPDATEDVI:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVI]][%{{[0-9]+}} : !s32i] : + // CHECK: cir.store %[[#UPDATEDVI]], %[[#STORAGEVI]] : !cir.vector, cir.ptr > + + // Binary arithmetic operations + vi4 d = a + b; + // CHECK: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 e = a - b; + // CHECK: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 f = a * b; + // CHECK: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 g = a / b; + // CHECK: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 h = a % b; + // CHECK: %{{[0-9]+}} = cir.binop(rem, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 i = a & b; + // CHECK: %{{[0-9]+}} = cir.binop(and, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 j = a | b; + // CHECK: %{{[0-9]+}} = cir.binop(or, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vi4 k = a ^ b; + // CHECK: %{{[0-9]+}} = cir.binop(xor, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + + // Unary arithmetic operations + vi4 l = +a; + // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 m = -a; + // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector + vi4 n = ~a; + // CHECK: %{{[0-9]+}} = cir.unary(not, %{{[0-9]+}}) : !cir.vector, !cir.vector } -// CHECK: %4 = cir.alloca !cir.vector, cir.ptr >, ["a", init] {alignment = 16 : i64} -// CHECK: %5 = cir.alloca !cir.vector, cir.ptr >, ["b", init] {alignment = 16 : i64} -// CHECK: %6 = cir.alloca !cir.vector, cir.ptr >, ["c", init] {alignment = 16 : i64} - -// CHECK: %7 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: %8 = cir.const(#cir.int<2> : !s32i) : !s32i -// CHECK: %9 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: %10 = cir.const(#cir.int<4> : !s32i) : !s32i -// CHECK: %11 = cir.vec.create(%7, %8, %9, %10 : !s32i, !s32i, !s32i, !s32i) : -// CHECK: cir.store %11, %4 : !cir.vector, cir.ptr > -// CHECK: %12 = cir.load %0 : cir.ptr , !s32i -// CHECK: %13 = cir.load %1 : cir.ptr , !s32i -// CHECK: %14 = cir.load %2 : cir.ptr , !s32i -// CHECK: %15 = cir.load %0 : cir.ptr , !s32i -// CHECK: %16 = cir.load %1 : cir.ptr , !s32i -// CHECK: %17 = cir.binop(add, %15, %16) : !s32i -// CHECK: %18 = cir.load %2 : cir.ptr , !s32i -// CHECK: %19 = cir.binop(add, %17, %18) : !s32i -// CHECK: %20 = cir.vec.create(%12, %13, %14, %19 : !s32i, !s32i, !s32i, !s32i) : -// CHECK: cir.store %20, %5 : !cir.vector, cir.ptr > -// CHECK: %21 = cir.load %4 : cir.ptr >, !cir.vector -// CHECK: %22 = cir.load %5 : cir.ptr >, !cir.vector -// CHECK: %23 = cir.binop(add, %21, %22) : !cir.vector -// CHECK: cir.store %23, %6 : !cir.vector, cir.ptr > -// CHECK: %24 = cir.load %6 : cir.ptr >, !cir.vector -// CHECK: %25 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: %26 = cir.vec.extract %24[%25 : !s32i] -> !s32i -// CHECK: cir.store %26, %3 : !s32i, cir.ptr -// CHECK: %27 = cir.load %3 : cir.ptr , !s32i -// CHECK: cir.return %27 : !s32i +void vector_double_test(int x, double y) { + // Vector constant. Not yet implemented. Expected results will change from + // cir.vec.create to cir.const. + vd2 a = { 1.5, 2.5 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : f64, f64) : + + // Non-const vector initialization. + vd2 b = { y, y + 1.0 }; + // CHECK: %{{[0-9]+}} = cir.vec.create(%{{[0-9]+}}, %{{[0-9]+}} : f64, f64) : + + // Extract element + double c = a[x]; + // CHECK: %{{[0-9]+}} = cir.vec.extract %{{[0-9]+}}[%{{[0-9]+}} : !s32i] : + + // Insert element + a[x] = y; + // CHECK: %[[#LOADEDVF:]] = cir.load %[[#STORAGEVF:]] : cir.ptr >, !cir.vector + // CHECK: %[[#UPDATEDVF:]] = cir.vec.insert %{{[0-9]+}}, %[[#LOADEDVF]][%{{[0-9]+}} : !s32i] : + // CHECK: cir.store %[[#UPDATEDVF]], %[[#STORAGEVF]] : !cir.vector, cir.ptr > + + // Binary arithmetic operations + vd2 d = a + b; + // CHECK: %{{[0-9]+}} = cir.binop(add, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vd2 e = a - b; + // CHECK: %{{[0-9]+}} = cir.binop(sub, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vd2 f = a * b; + // CHECK: %{{[0-9]+}} = cir.binop(mul, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + vd2 g = a / b; + // CHECK: %{{[0-9]+}} = cir.binop(div, %{{[0-9]+}}, %{{[0-9]+}}) : !cir.vector + + // Unary arithmetic operations + vd2 l = +a; + // CHECK: %{{[0-9]+}} = cir.unary(plus, %{{[0-9]+}}) : !cir.vector, !cir.vector + vd2 m = -a; + // CHECK: %{{[0-9]+}} = cir.unary(minus, %{{[0-9]+}}) : !cir.vector, !cir.vector +} diff --git a/clang/test/CIR/IR/invalid.cir b/clang/test/CIR/IR/invalid.cir index 386417ae2ffd..e73b0ef0cbbb 100644 --- a/clang/test/CIR/IR/invalid.cir +++ b/clang/test/CIR/IR/invalid.cir @@ -418,6 +418,7 @@ module { cir.func @vec_op_size() { %0 = cir.const(#cir.int<1> : !s32i) : !s32i %1 = cir.vec.create(%0 : !s32i) : // expected-error {{'cir.vec.create' op operand count of 1 doesn't match vector type '!cir.vector x 2>' element count of 2}} + cir.return } // ----- @@ -428,17 +429,61 @@ cir.func @vec_op_type() { %0 = cir.const(#cir.int<1> : !s32i) : !s32i %1 = cir.const(#cir.int<2> : !u32i) : !u32i %2 = cir.vec.create(%0, %1 : !s32i, !u32i) : // expected-error {{'cir.vec.create' op operand type '!cir.int' doesn't match vector element type '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_extract_non_int_idx() { + %0 = cir.const(1.5e+00 : f64) : f64 + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : + %3 = cir.vec.extract %2[%0 : f64] : // expected-error {{expected '<'}} + cir.return } // ----- !s32i = !cir.int !u32i = !cir.int -cir.func @vec_extract_type() { - %0 = cir.const(#cir.int<1> : !s32i) : !s32i - %1 = cir.const(#cir.int<2> : !s32i) : !s32i - %2 = cir.vec.create(%0, %1 : !s32i, !s32i) : - %3 = cir.vec.extract %2[%0 : !s32i] -> !u32i // expected-error {{'cir.vec.extract' op failed to verify that type of 'result' matches element type of 'vec'}} +cir.func @vec_extract_bad_type() { + %0 = cir.alloca !u32i, cir.ptr , ["x", init] {alignment = 4 : i64} + %1 = cir.const(#cir.int<0> : !s32i) : !s32i + %2 = cir.vec.create(%1, %1 : !s32i, !s32i) : + %3 = cir.vec.extract %2[%1 : !s32i] : // expected-note {{prior use here}} + cir.store %3, %0 : !u32i, cir.ptr // expected-error {{use of value '%3' expects different type than prior uses: '!cir.int' vs '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_extract_non_vector() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.extract %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.extract' invalid kind of Type specified}} + cir.return +} + +// ----- + +!s32i = !cir.int +!u32i = !cir.int +cir.func @vec_insert_bad_type() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.create(%0, %0 : !s32i, !s32i) : + %2 = cir.const(#cir.int<0> : !u32i) : !u32i // expected-note {{prior use here}} + %3 = cir.vec.insert %2, %1[%0 : !s32i] : // expected-error {{use of value '%2' expects different type than prior uses: '!cir.int' vs '!cir.int'}} + cir.return +} + +// ----- + +!s32i = !cir.int +cir.func @vec_insert_non_vector() { + %0 = cir.const(#cir.int<0> : !s32i) : !s32i + %1 = cir.vec.insert %0, %0[%0 : !s32i] : !s32i // expected-error {{custom op 'cir.vec.insert' invalid kind of Type specified}} + cir.return } // ----- diff --git a/clang/test/CIR/Lowering/unary-plus-minus.cir b/clang/test/CIR/Lowering/unary-plus-minus.cir index ffadbc3df3be..a4e254939912 100644 --- a/clang/test/CIR/Lowering/unary-plus-minus.cir +++ b/clang/test/CIR/Lowering/unary-plus-minus.cir @@ -37,8 +37,7 @@ module { %3 = cir.load %0 : cir.ptr , f64 %4 = cir.unary(minus, %3) : f64, f64 // MLIR: %[[#F_MINUS:]] = llvm.load %{{[0-9]}} : !llvm.ptr -> f64 - // MLIR: %[[#F_NEG_ONE:]] = llvm.mlir.constant(-1.000000e+00 : f64) : f64 - // MLIR: %5 = llvm.fmul %[[#F_NEG_ONE]], %[[#F_MINUS]] : f64 + // MLIR: %{{[0-9]}} = llvm.fneg %[[#F_MINUS]] : f64 cir.return } } diff --git a/clang/test/CIR/Lowering/vectype.cpp b/clang/test/CIR/Lowering/vectype.cpp new file mode 100644 index 000000000000..3557eba657e9 --- /dev/null +++ b/clang/test/CIR/Lowering/vectype.cpp @@ -0,0 +1,201 @@ +// RUN: %clang_cc1 -std=c++17 -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir +// RUN: cir-opt %t.cir -cir-to-llvm -o %t.mlir +// RUN: FileCheck --input-file=%t.mlir %s + +typedef int vi4 __attribute__((vector_size(16))); +typedef double vd2 __attribute__((vector_size(16))); + +void vector_int_test(int x) { + + // Vector constant. Not yet implemented. Expected results will change when + // fully implemented. + vi4 a = { 1, 2, 3, 4 }; + // CHECK: %[[#T30:]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[#T31:]] = llvm.mlir.constant(2 : i32) : i32 + // CHECK: %[[#T32:]] = llvm.mlir.constant(3 : i32) : i32 + // CHECK: %[[#T33:]] = llvm.mlir.constant(4 : i32) : i32 + // CHECK: %[[#T34:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T35:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T36:]] = llvm.insertelement %[[#T30]], %[[#T34]][%[[#T35]] : i64] : vector<4xi32> + // CHECK: %[[#T37:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T38:]] = llvm.insertelement %[[#T31]], %[[#T36]][%[[#T37]] : i64] : vector<4xi32> + // CHECK: %[[#T39:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#T40:]] = llvm.insertelement %[[#T32]], %[[#T38]][%[[#T39]] : i64] : vector<4xi32> + // CHECK: %[[#T41:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#T42:]] = llvm.insertelement %[[#T33]], %[[#T40]][%[[#T41]] : i64] : vector<4xi32> + // CHECK: llvm.store %[[#T42]], %[[#T3:]] : vector<4xi32>, !llvm.ptr + + // Non-const vector initialization. + vi4 b = { x, 5, 6, x + 1 }; + // CHECK: %[[#T43:]] = llvm.load %[[#T1:]] : !llvm.ptr -> i32 + // CHECK: %[[#T44:]] = llvm.mlir.constant(5 : i32) : i32 + // CHECK: %[[#T45:]] = llvm.mlir.constant(6 : i32) : i32 + // CHECK: %[[#T46:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T47:]] = llvm.mlir.constant(1 : i32) : i32 + // CHECK: %[[#T48:]] = llvm.add %[[#T46]], %[[#T47]] : i32 + // CHECK: %[[#T49:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T50:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T51:]] = llvm.insertelement %[[#T43]], %[[#T49]][%[[#T50]] : i64] : vector<4xi32> + // CHECK: %[[#T52:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T53:]] = llvm.insertelement %[[#T44]], %[[#T51]][%[[#T52]] : i64] : vector<4xi32> + // CHECK: %[[#T54:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#T55:]] = llvm.insertelement %[[#T45]], %[[#T53]][%[[#T54]] : i64] : vector<4xi32> + // CHECK: %[[#T56:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#T57:]] = llvm.insertelement %[[#T48]], %[[#T55]][%[[#T56]] : i64] : vector<4xi32> + // CHECK: llvm.store %[[#T57]], %[[#T5:]] : vector<4xi32>, !llvm.ptr + + // Extract element. + int c = a[x]; + // CHECK: %[[#T58:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T59:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T60:]] = llvm.extractelement %[[#T58]][%[[#T59]] : i32] : vector<4xi32> + // CHECK: llvm.store %[[#T60]], %[[#T7:]] : i32, !llvm.ptr + + // Insert element. + a[x] = x; + // CHECK: %[[#T61:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T62:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T63:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T64:]] = llvm.insertelement %[[#T61]], %[[#T63]][%[[#T62]] : i32] : vector<4xi32> + // CHECK: llvm.store %[[#T64]], %[[#T3]] : vector<4xi32>, !llvm.ptr + + // Binary arithmetic operators. + vi4 d = a + b; + // CHECK: %[[#T65:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T66:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T67:]] = llvm.add %[[#T65]], %[[#T66]] : vector<4xi32> + // CHECK: llvm.store %[[#T67]], %[[#T9:]] : vector<4xi32>, !llvm.ptr + vi4 e = a - b; + // CHECK: %[[#T68:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T69:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T70:]] = llvm.sub %[[#T68]], %[[#T69]] : vector<4xi32> + // CHECK: llvm.store %[[#T70]], %[[#T11:]] : vector<4xi32>, !llvm.ptr + vi4 f = a * b; + // CHECK: %[[#T71:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T72:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T73:]] = llvm.mul %[[#T71]], %[[#T72]] : vector<4xi32> + // CHECK: llvm.store %[[#T73]], %[[#T13:]] : vector<4xi32>, !llvm.ptr + vi4 g = a / b; + // CHECK: %[[#T74:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T75:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T76:]] = llvm.sdiv %[[#T74]], %[[#T75]] : vector<4xi32> + // CHECK: llvm.store %[[#T76]], %[[#T15:]] : vector<4xi32>, !llvm.ptr + vi4 h = a % b; + // CHECK: %[[#T77:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T78:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T79:]] = llvm.srem %[[#T77]], %[[#T78]] : vector<4xi32> + // CHECK: llvm.store %[[#T79]], %[[#T17:]] : vector<4xi32>, !llvm.ptr + vi4 i = a & b; + // CHECK: %[[#T80:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T81:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T82:]] = llvm.and %[[#T80]], %[[#T81]] : vector<4xi32> + // CHECK: llvm.store %[[#T82]], %[[#T19:]] : vector<4xi32>, !llvm.ptr + vi4 j = a | b; + // CHECK: %[[#T83:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T84:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T85:]] = llvm.or %[[#T83]], %[[#T84]] : vector<4xi32> + // CHECK: llvm.store %[[#T85]], %[[#T21:]] : vector<4xi32>, !llvm.ptr + vi4 k = a ^ b; + // CHECK: %[[#T86:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T87:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T88:]] = llvm.xor %[[#T86]], %[[#T87]] : vector<4xi32> + // CHECK: llvm.store %[[#T88]], %[[#T23:]] : vector<4xi32>, !llvm.ptr + + // Unary arithmetic operators. + vi4 l = +a; + // CHECK: %[[#T89:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: llvm.store %[[#T89]], %[[#T25:]] : vector<4xi32>, !llvm.ptr + vi4 m = -a; + // CHECK: %[[#T90:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T91:]] = llvm.mlir.zero : vector<4xi32> + // CHECK: %[[#T92:]] = llvm.sub %[[#T91]], %[[#T90]] : vector<4xi32> + // CHECK: llvm.store %[[#T92]], %[[#T27:]] : vector<4xi32>, !llvm.ptr + vi4 n = ~a; + // CHECK: %[[#T93:]] = llvm.load %[[#T3]] : !llvm.ptr -> vector<4xi32> + // CHECK: %[[#T94:]] = llvm.mlir.constant(-1 : i32) : i32 + // CHECK: %[[#T95:]] = llvm.mlir.undef : vector<4xi32> + // CHECK: %[[#T96:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T97:]] = llvm.insertelement %[[#T94]], %[[#T95]][%[[#T96]] : i64] : vector<4xi32> + // CHECK: %[[#T98:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T99:]] = llvm.insertelement %[[#T94]], %[[#T97]][%[[#T98]] : i64] : vector<4xi32> + // CHECK: %[[#T100:]] = llvm.mlir.constant(2 : i64) : i64 + // CHECK: %[[#T101:]] = llvm.insertelement %[[#T94]], %[[#T99]][%[[#T100]] : i64] : vector<4xi32> + // CHECK: %[[#T102:]] = llvm.mlir.constant(3 : i64) : i64 + // CHECK: %[[#T103:]] = llvm.insertelement %[[#T94]], %[[#T101]][%[[#T102]] : i64] : vector<4xi32> + // CHECK: %[[#T104:]] = llvm.xor %[[#T103]], %[[#T93]] : vector<4xi32> + // CHECK: llvm.store %[[#T104]], %[[#T29:]] : vector<4xi32>, !llvm.ptr +} + +void vector_double_test(int x, double y) { + + // Vector constant. Not yet implemented. Expected results will change when + // fully implemented. + vd2 a = { 1.5, 2.5 }; + // CHECK: %[[#T22:]] = llvm.mlir.constant(1.500000e+00 : f64) : f64 + // CHECK: %[[#T23:]] = llvm.mlir.constant(2.500000e+00 : f64) : f64 + // CHECK: %[[#T24:]] = llvm.mlir.undef : vector<2xf64> + // CHECK: %[[#T25:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T26:]] = llvm.insertelement %[[#T22]], %[[#T24]][%[[#T25]] : i64] : vector<2xf64> + // CHECK: %[[#T27:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T28:]] = llvm.insertelement %[[#T23]], %[[#T26]][%[[#T27]] : i64] : vector<2xf64> + // CHECK: llvm.store %[[#T28]], %[[#T5:]] : vector<2xf64>, !llvm.ptr + + // Non-const vector initialization. + vd2 b = { y, y + 1.0 }; + // CHECK: %[[#T29:]] = llvm.load %[[#T3:]] : !llvm.ptr -> f64 + // CHECK: %[[#T30:]] = llvm.load %[[#T3]] : !llvm.ptr -> f64 + // CHECK: %[[#T31:]] = llvm.mlir.constant(1.000000e+00 : f64) : f64 + // CHECK: %[[#T32:]] = llvm.fadd %[[#T30]], %[[#T31]] : f64 + // CHECK: %[[#T33:]] = llvm.mlir.undef : vector<2xf64> + // CHECK: %[[#T34:]] = llvm.mlir.constant(0 : i64) : i64 + // CHECK: %[[#T35:]] = llvm.insertelement %[[#T29]], %[[#T33]][%[[#T34]] : i64] : vector<2xf64> + // CHECK: %[[#T36:]] = llvm.mlir.constant(1 : i64) : i64 + // CHECK: %[[#T37:]] = llvm.insertelement %[[#T32]], %[[#T35]][%[[#T36]] : i64] : vector<2xf64> + // CHECK: llvm.store %[[#T37]], %[[#T7:]] : vector<2xf64>, !llvm.ptr + + // Extract element. + double c = a[x]; + // CHECK: 38 = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T39:]] = llvm.load %[[#T1]] : !llvm.ptr -> i32 + // CHECK: %[[#T40:]] = llvm.extractelement %[[#T38]][%[[#T39]] : i32] : vector<2xf64> + // CHECK: llvm.store %[[#T40]], %[[#T9:]] : f64, !llvm.ptr + + // Insert element. + a[x] = y; + // CHECK: %[[#T41:]] = llvm.load %[[#T3]] : !llvm.ptr -> f64 + // CHECK: %[[#T42:]] = llvm.load %[[#T1:]] : !llvm.ptr -> i32 + // CHECK: %[[#T43:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T44:]] = llvm.insertelement %[[#T41]], %[[#T43]][%[[#T42]] : i32] : vector<2xf64> + // CHECK: llvm.store %[[#T44]], %[[#T5]] : vector<2xf64>, !llvm.ptr + + // Binary arithmetic operators. + vd2 d = a + b; + // CHECK: %[[#T45:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T46:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T47:]] = llvm.fadd %[[#T45]], %[[#T46]] : vector<2xf64> + // CHECK: llvm.store %[[#T47]], %[[#T11:]] : vector<2xf64>, !llvm.ptr + vd2 e = a - b; + // CHECK: %[[#T48:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T49:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T50:]] = llvm.fsub %[[#T48]], %[[#T49]] : vector<2xf64> + // CHECK: llvm.store %[[#T50]], %[[#T13:]] : vector<2xf64>, !llvm.ptr + vd2 f = a * b; + // CHECK: %[[#T51:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T52:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T53:]] = llvm.fmul %[[#T51]], %[[#T52]] : vector<2xf64> + // CHECK: llvm.store %[[#T53]], %[[#T15:]] : vector<2xf64>, !llvm.ptr + vd2 g = a / b; + // CHECK: %[[#T54:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T55:]] = llvm.load %[[#T7]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T56:]] = llvm.fdiv %[[#T54]], %[[#T55]] : vector<2xf64> + // CHECK: llvm.store %[[#T56]], %[[#T17:]] : vector<2xf64>, !llvm.ptr + + // Unary arithmetic operators. + vd2 l = +a; + // CHECK: %[[#T57:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: llvm.store %[[#T57]], %[[#T19:]] : vector<2xf64>, !llvm.ptr + vd2 m = -a; + // CHECK: %[[#T58:]] = llvm.load %[[#T5]] : !llvm.ptr -> vector<2xf64> + // CHECK: %[[#T59:]] = llvm.fneg %[[#T58]] : vector<2xf64> + // CHECK: llvm.store %[[#T59]], %[[#T21:]] : vector<2xf64>, !llvm.ptr +}