|
| 1 | +//====- LoweringPrepareArm64CXXABI.cpp - Arm64 ABI specific code --------===// |
| 2 | +// |
| 3 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +// See https://llvm.org/LICENSE.txt for license information. |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | +// |
| 7 | +//===----------------------------------------------------------------------===// |
| 8 | +// |
| 9 | +// This file provides ARM64 C++ ABI specific code that is used during LLVMIR |
| 10 | +// lowering prepare. |
| 11 | +// |
| 12 | +//===----------------------------------------------------------------------===// |
| 13 | + |
| 14 | +#include "../IR/MissingFeatures.h" |
| 15 | +#include "LoweringPrepareItaniumCXXABI.h" |
| 16 | + |
| 17 | +#include <assert.h> |
| 18 | + |
| 19 | +using cir::LoweringPrepareCXXABI; |
| 20 | +using cir::MissingFeatures; |
| 21 | + |
| 22 | +using namespace mlir; |
| 23 | +using namespace mlir::cir; |
| 24 | + |
| 25 | +namespace { |
| 26 | +class LoweringPrepareAArch64CXXABI : public LoweringPrepareItaniumCXXABI { |
| 27 | +public: |
| 28 | + LoweringPrepareAArch64CXXABI(clang::TargetCXXABI::Kind k) : Kind(k) {} |
| 29 | + mlir::Value lowerVAArg(CIRBaseBuilderTy &builder, |
| 30 | + mlir::cir::VAArgOp op) override; |
| 31 | + |
| 32 | +private: |
| 33 | + clang::TargetCXXABI::Kind Kind; |
| 34 | + mlir::Value lowerGenericAArch64VAArg(CIRBaseBuilderTy &builder, |
| 35 | + mlir::cir::VAArgOp op); |
| 36 | +}; |
| 37 | +} // namespace |
| 38 | + |
| 39 | +LoweringPrepareCXXABI * |
| 40 | +LoweringPrepareCXXABI::createAArch64ABI(clang::TargetCXXABI::Kind k) { |
| 41 | + return new LoweringPrepareAArch64CXXABI(k); |
| 42 | +} |
| 43 | + |
| 44 | +mlir::Value LoweringPrepareAArch64CXXABI::lowerGenericAArch64VAArg( |
| 45 | + CIRBaseBuilderTy &builder, mlir::cir::VAArgOp op) { |
| 46 | + auto loc = op->getLoc(); |
| 47 | + auto valist = op->getOperand(0); |
| 48 | + auto opResTy = op.getType(); |
| 49 | + // front end should not produce non-scalar type of VAArgOp |
| 50 | + bool isSupportedType = |
| 51 | + opResTy.isa<mlir::cir::IntType, mlir::cir::SingleType, |
| 52 | + mlir::cir::PointerType, mlir::cir::BoolType, |
| 53 | + mlir::cir::DoubleType>(); |
| 54 | + assert(isSupportedType); |
| 55 | + |
| 56 | + assert(!MissingFeatures::handleBigEndian()); |
| 57 | + |
| 58 | + bool isFloatingType = |
| 59 | + opResTy.isa<mlir::cir::SingleType, mlir::cir::DoubleType>(); |
| 60 | + |
| 61 | + // The AArch64 va_list type and handling is specified in the Procedure Call |
| 62 | + // Standard, section B.4: |
| 63 | + // |
| 64 | + // struct { |
| 65 | + // void *__stack; |
| 66 | + // void *__gr_top; |
| 67 | + // void *__vr_top; |
| 68 | + // int __gr_offs; |
| 69 | + // int __vr_offs; |
| 70 | + // }; |
| 71 | + auto curInsertionP = builder.saveInsertionPoint(); |
| 72 | + auto currentBlock = builder.getInsertionBlock(); |
| 73 | + auto boolTy = builder.getBoolTy(); |
| 74 | + |
| 75 | + auto maybeRegBlock = builder.createBlock(builder.getBlock()->getParent()); |
| 76 | + auto inRegBlock = builder.createBlock(builder.getBlock()->getParent()); |
| 77 | + auto onStackBlock = builder.createBlock(builder.getBlock()->getParent()); |
| 78 | + |
| 79 | + //======================================= |
| 80 | + // Find out where argument was passed |
| 81 | + //======================================= |
| 82 | + |
| 83 | + // If v/gr_offs >= 0 we're already using the stack for this type of |
| 84 | + // argument. We don't want to keep updating reg_offs (in case it overflows, |
| 85 | + // though anyone passing 2GB of arguments, each at most 16 bytes, deserves |
| 86 | + // whatever they get). |
| 87 | + builder.restoreInsertionPoint(curInsertionP); |
| 88 | + // 3 is the field number of __gr_offs, 4 is the field number of __vr_offs |
| 89 | + auto offsP = builder.createGetMemberOp(loc, valist, |
| 90 | + isFloatingType ? "vr_offs" : "gr_offs", |
| 91 | + isFloatingType ? 4 : 3); |
| 92 | + auto offs = builder.create<mlir::cir::LoadOp>(loc, offsP); |
| 93 | + auto zeroValue = builder.create<mlir::cir::ConstantOp>( |
| 94 | + loc, offs.getType(), mlir::cir::IntAttr::get(offs.getType(), 0)); |
| 95 | + auto cmpRes = builder.create<mlir::cir::CmpOp>(loc, boolTy, CmpOpKind::ge, |
| 96 | + offs, zeroValue); |
| 97 | + builder.create<mlir::cir::BrCondOp>(loc, cmpRes, onStackBlock, maybeRegBlock); |
| 98 | + auto newEndBlock = currentBlock->splitBlock(op); |
| 99 | + |
| 100 | + // maybeRegBlock updates the gr_offs/vr_offs pointer for next call to va_arg |
| 101 | + // on this va_list. The fact that this is done unconditionally reflects the |
| 102 | + // fact that allocating an argument to the stack also uses up all the |
| 103 | + // remaining registers of the appropriate kind. |
| 104 | + builder.setInsertionPointToEnd(maybeRegBlock); |
| 105 | + auto boundaryValue = builder.create<mlir::cir::ConstantOp>( |
| 106 | + loc, offs.getType(), |
| 107 | + mlir::cir::IntAttr::get(offs.getType(), isFloatingType ? 16 : 8)); |
| 108 | + auto newRegsOffs = builder.create<mlir::cir::BinOp>( |
| 109 | + loc, offs.getType(), mlir::cir::BinOpKind::Add, offs, boundaryValue); |
| 110 | + builder.createStore(loc, newRegsOffs, offsP); |
| 111 | + // Now we're in a position to decide whether this argument really was in |
| 112 | + // registers or not. |
| 113 | + auto maybeRegCmpRes = builder.create<mlir::cir::CmpOp>( |
| 114 | + loc, boolTy, CmpOpKind::le, newRegsOffs, zeroValue); |
| 115 | + builder.create<mlir::cir::BrCondOp>(loc, maybeRegCmpRes, inRegBlock, |
| 116 | + onStackBlock); |
| 117 | + |
| 118 | + //======================================= |
| 119 | + // Argument was on the stack |
| 120 | + //======================================= |
| 121 | + builder.setInsertionPointToEnd(onStackBlock); |
| 122 | + auto stackP = builder.createGetMemberOp(loc, valist, "stack", 0); |
| 123 | + auto stack = builder.create<mlir::cir::LoadOp>(loc, stackP); |
| 124 | + auto ptrDiffTy = |
| 125 | + mlir::cir::IntType::get(builder.getContext(), 64, /*signed=*/false); |
| 126 | + auto eight = builder.create<mlir::cir::ConstantOp>( |
| 127 | + loc, ptrDiffTy, mlir::cir::IntAttr::get(ptrDiffTy, 8)); |
| 128 | + auto i8Ty = IntegerType::get(builder.getContext(), 8); |
| 129 | + auto i8PtrTy = PointerType::get(builder.getContext(), i8Ty); |
| 130 | + auto castStack = builder.createBitcast(stack, i8PtrTy); |
| 131 | + // Write the new value of __stack for the next call to va_arg |
| 132 | + auto newStackAsi8Ptr = builder.create<mlir::cir::PtrStrideOp>( |
| 133 | + loc, castStack.getType(), castStack, eight); |
| 134 | + auto newStack = builder.createBitcast(newStackAsi8Ptr, stack.getType()); |
| 135 | + builder.createStore(loc, newStack, stackP); |
| 136 | + builder.create<mlir::cir::BrOp>(loc, mlir::ValueRange{stack}, newEndBlock); |
| 137 | + |
| 138 | + //======================================= |
| 139 | + // Argument was in registers |
| 140 | + //======================================= |
| 141 | + // Now we emit the code for if the argument was originally passed in |
| 142 | + // registers. First start the appropriate block: |
| 143 | + builder.setInsertionPointToEnd(inRegBlock); |
| 144 | + auto regTopP = builder.createGetMemberOp(loc, valist, |
| 145 | + isFloatingType ? "vr_top" : "gr_top", |
| 146 | + isFloatingType ? 2 : 1); |
| 147 | + auto regTop = builder.create<mlir::cir::LoadOp>(loc, regTopP); |
| 148 | + auto castRegTop = builder.createBitcast(regTop, i8PtrTy); |
| 149 | + auto resAsInt8P = builder.create<mlir::cir::PtrStrideOp>( |
| 150 | + loc, castRegTop.getType(), castRegTop, offs); |
| 151 | + auto resAsVoidP = builder.createBitcast(resAsInt8P, regTop.getType()); |
| 152 | + builder.create<mlir::cir::BrOp>(loc, mlir::ValueRange{resAsVoidP}, |
| 153 | + newEndBlock); |
| 154 | + |
| 155 | + // generate additional instructions for end block |
| 156 | + builder.setInsertionPoint(op); |
| 157 | + newEndBlock->addArgument(stack.getType(), loc); |
| 158 | + auto resP = newEndBlock->getArgument(0); |
| 159 | + assert(resP.getType().isa<mlir::cir::PointerType>()); |
| 160 | + auto opResPTy = PointerType::get(builder.getContext(), opResTy); |
| 161 | + auto castResP = builder.createBitcast(resP, opResPTy); |
| 162 | + auto res = builder.create<mlir::cir::LoadOp>(loc, castResP); |
| 163 | + return res.getResult(); |
| 164 | +} |
| 165 | + |
| 166 | +mlir::Value LoweringPrepareAArch64CXXABI::lowerVAArg(CIRBaseBuilderTy &builder, |
| 167 | + mlir::cir::VAArgOp op) { |
| 168 | + |
| 169 | + if (Kind == clang::TargetCXXABI::GenericAArch64) { |
| 170 | + return lowerGenericAArch64VAArg(builder, op); |
| 171 | + } |
| 172 | + // Return empty value here so CIR Lowering don't do anything. |
| 173 | + // This would leave the responsibility to LLVM Lowering to handle cir.va_arg |
| 174 | + // which could be better if algorithm is simple. |
| 175 | + return mlir::Value(); |
| 176 | +} |
0 commit comments