Skip to content

[AIE2P][AIE2] Split symmetric G_BUILD_VECTOR into two G_BUILD_VECTORs #400

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: aie-public
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 40 additions & 6 deletions llvm/lib/Target/AIE/AIECombine.td
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,31 @@
//===----------------------------------------------------------------------===//
include "llvm/Target/GlobalISel/Combine.td"

// Explicitly listing each generic combine here ensures direct visibility and
// control over all functionalities.
def aie_all_combines : GICombineGroup<[trivial_combines, vector_ops_combines,
insert_vec_elt_combines, extract_vec_elt_combines, combines_for_extload,
combine_extracted_vector_load,
undef_combines, identity_combines, phi_combines,
simplify_add_to_sub, hoist_logic_op_with_same_opcode_hands, shifts_too_big,
reassocs, ptr_add_immed_chain,
shl_ashr_to_sext_inreg, sext_inreg_of_load,
width_reduction_combines, select_combines,
known_bits_simplifications, ext_ext_fold,
not_cmp_fold, opt_brcond_by_inverting_cond,
unmerge_merge, unmerge_cst, unmerge_dead_to_trunc,
unmerge_zext_to_zext, merge_unmerge, trunc_ext_fold, trunc_shift,
const_combines, xor_of_and_with_same_reg, ptr_add_with_zero,
shift_immed_chain, shift_of_shifted_logic_chain, load_or_combine,
div_rem_to_divrem, funnel_shift_combines, commute_shift,
form_bitfield_extract, constant_fold_binops, constant_fold_fma,
constant_fold_cast_op, fabs_fneg_fold,
intdiv_combines, mulh_combines, redundant_neg_operands,
and_or_disjoint_mask, fma_combines, fold_binop_into_select,
sub_add_reg, select_to_minmax, redundant_binop_in_equality,
fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
double_icmp_zero_and_or_combine, match_addos, combine_shuffle_concat]>;

// AIE-specific offset folding for G_GLOBAL_VALUE.
def combine_globalval_offset_matchdata : GIDefMatchData<"uint64_t">;
def combine_globalval_offset : GICombineRule<
Expand All @@ -34,6 +59,13 @@ def combine_extract_vector_elt_and_zsa_ext : GICombineRule<
(apply [{ applyExtractVecEltAndExt(*${root}, MRI, B, ${matchinfo}); }])
>;

def combine_symmetric_build_vector : GICombineRule<
(defs root:$root, build_fn_matchinfo:$matchinfo),
(match (wip_match_opcode G_BUILD_VECTOR): $root,
[{ return matchSymmetricBuildVector(*${root}, MRI, Observer, ${matchinfo}); }]),
(apply [{ Helper.applyBuildFnNoErase(*${root}, ${matchinfo}); }])
>;

def combine_splat_vector_matchdata: GIDefMatchData<"std::pair<Register, Register>">;
def combine_splat_vector : GICombineRule<
(defs root:$root, combine_splat_vector_matchdata:$matchinfo),
Expand Down Expand Up @@ -134,19 +166,20 @@ def combine_vector_shuffle_to_extract_insert_elt_to_broadcast : GICombineRule<

def AIE2PreLegalizerCombiner
: GICombiner<"AIE2PreLegalizerCombinerImpl", [ combine_unpad_vector, combine_pad_vector,
all_combines, combine_S20NarrowingOpt,
aie_all_combines, combine_S20NarrowingOpt,
combine_globalval_offset,
combine_extract_vector_elt_and_zsa_ext,
combine_splat_vector, combine_concat_to_pad_vector,
combine_single_diff_build_vector]> {
combine_single_diff_build_vector,
combine_symmetric_build_vector]> {
let CombineAllMethodName = "tryCombineAllImpl";
}

def AIE2PPreLegalizerCombiner
: GICombiner<"AIE2PPreLegalizerCombinerImpl", [ combine_unpad_vector, combine_pad_vector,
combine_vector_shuffle_to_copy,
combine_vector_shuffle_extract_subvec,
all_combines, combine_S20NarrowingOpt,
aie_all_combines, combine_S20NarrowingOpt,
combine_globalval_offset,
combine_extract_vector_elt_and_zsa_ext,
combine_splat_vector,
Expand All @@ -159,16 +192,17 @@ def AIE2PPreLegalizerCombiner
combine_vector_shuffle_to_extract_insert_elt,
combine_vector_shuffle_concat_extracted_subvectors,
combine_paired_extracts,
combine_vector_shuffle_to_extract_insert_elt_to_broadcast]> {
combine_vector_shuffle_to_extract_insert_elt_to_broadcast,
combine_symmetric_build_vector]> {
let CombineAllMethodName = "tryCombineAllImpl";
}

def AIE2PostLegalizerGenericCombiner
: GICombiner<"AIE2PostLegalizerGenericCombinerImpl", [ all_combines ]> {
: GICombiner<"AIE2PostLegalizerGenericCombinerImpl", [ aie_all_combines ]> {
}

def AIE2PPostLegalizerGenericCombiner
: GICombiner<"AIE2PPostLegalizerGenericCombinerImpl", [ all_combines ]> {
: GICombiner<"AIE2PPostLegalizerGenericCombinerImpl", [ aie_all_combines ]> {
}

def combine_extract_concat_matchdata: GIDefMatchData<"Register">;
Expand Down
119 changes: 93 additions & 26 deletions llvm/lib/Target/AIE/AIECombinerHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineInstr.h"
Expand Down Expand Up @@ -1227,6 +1228,40 @@ void llvm::applyExtractVecEltAndExt(
MatchMI->eraseFromParent();
}

static std::optional<Register>
getSplatVectorSrcReg(const MachineInstr &MI, const MachineRegisterInfo &MRI,
std::pair<unsigned, unsigned> Range) {
auto IsUndef = [&](const MachineOperand &Op) {
const MachineInstr *Undef = MRI.getVRegDef(Op.getReg());
return Undef && Undef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
};
const unsigned Start = Range.first;
const unsigned End = Range.second;
// First non-undef operand.
Register SrcReg = 0;
bool FoundSrc = false;
bool AllUndef = true;

// Find the first non-undef operand as the reference.
for (unsigned I = Start; I < End; I++) {
const MachineOperand &Op = MI.getOperand(I);
if (!IsUndef(Op)) {
if (!FoundSrc) {
SrcReg = Op.getReg();
FoundSrc = true;
} else if (Op.getReg() != SrcReg) {
return std::nullopt;
}
AllUndef = false;
}
}

if (AllUndef)
SrcReg = MI.getOperand(1).getReg();

return SrcReg;
}

// Match something like:
// %0:_(<32 x s16>) = G_BUILD_VECTOR %1:_(s16), ... x32
//
Expand Down Expand Up @@ -1254,34 +1289,12 @@ bool llvm::matchSplatVector(MachineInstr &MI, MachineRegisterInfo &MRI,
return false;
}

auto IsUndef = [&](const MachineOperand &Op) {
const MachineInstr *Undef = MRI.getVRegDef(Op.getReg());
return Undef && Undef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
};
const unsigned NumOps = MI.getNumOperands();
// First non-undef operand.
unsigned SrcReg = 0;
bool FoundSrc = false;
bool AllUndef = true;

// Find the first non-undef operand as the reference.
for (unsigned I = 1; I < NumOps; I++) {
const MachineOperand &Op = MI.getOperand(I);
if (!IsUndef(Op)) {
if (!FoundSrc) {
SrcReg = Op.getReg();
FoundSrc = true;
} else if (Op.getReg() != SrcReg) {
return false;
}
AllUndef = false;
}
}

if (AllUndef)
SrcReg = MI.getOperand(1).getReg();
auto SrcReg = getSplatVectorSrcReg(MI, MRI, std::make_pair(1, NumOps));
if (!SrcReg)
return false;

MatchInfo = {DstVecReg, SrcReg};
MatchInfo = {DstVecReg, *SrcReg};
return true;
}

Expand Down Expand Up @@ -1456,6 +1469,60 @@ bool llvm::applySingleDiffLaneBuildVector(
return true;
}

// Match something like:
// %0:_(<32 x s16>) = G_BUILD_VECTOR %1:_(s16), ... x16, %2:_(s16), ... x16
//
// To turn it into
// %3:_(<16 x s16>) = G_BUILD_VECTOR %1:_(s16), ... x16
// %4:_(<16 x s16>) = G_BUILD_VECTOR %2:_(s16), ... x16
// %0:_(<32 x s16>) = G_CONCAT_VECTORS %3:_(<16 x s16>), %4:_(<16 x s16>)
// These sub-G_BUILD_VECTOR instructions may later be combined into broadcast
// instructions by combine_splat_vector.
// TODO: Remove the original splat vector match and implement the same here.
bool llvm::matchSymmetricBuildVector(MachineInstr &MI, MachineRegisterInfo &MRI,
GISelChangeObserver &Observer,
BuildFnTy &MatchInfo) {

assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR &&
"Expected a G_BUILD_VECTOR");
const Register DstVecReg = MI.getOperand(0).getReg();
const LLT DstVecTy = MRI.getType(DstVecReg);
const unsigned DstVecSize = DstVecTy.getSizeInBits();

switch (DstVecSize) {
case 256:
case 512:
case 1024:
case 2048:
break;
default:
// unimplemented
return false;
}

// TODO: Split the G_BUILD_VECTOR either into 3/4 and 1/4 parts,
// or 1/4 and 3/4 parts, and then check if any part qualifies as a splat.
const unsigned NumOps = MI.getNumOperands();
const unsigned HalfNumElts = NumOps / 2 + 1;
auto FirstHalfSrcReg =
getSplatVectorSrcReg(MI, MRI, std::make_pair(1, HalfNumElts));
auto SecondHalfSrcReg =
getSplatVectorSrcReg(MI, MRI, std::make_pair(HalfNumElts, NumOps));

MatchInfo = [&MI, &Observer, DstVecTy](MachineIRBuilder &B) {
B.setInstrAndDebugLoc(MI);
LegalizerHelper Helper(B.getMF(), Observer, B);
// Splits the G_BUILD_VECTOR into two half-sized G_BUILD_VECTOR operations
// and then emits a G_CONCAT_VECTORS to combine them into final vector.
Helper.fewerElementsVector(
MI, 0,
DstVecTy.changeElementCount(
DstVecTy.getElementCount().divideCoefficientBy(2)));
};

return (FirstHalfSrcReg.has_value() || SecondHalfSrcReg.has_value());
}

// Match something like:
// %0(<4 x s32>), dead %1(<4 x s32>), dead %2(<4 x s32>), dead %3(<4 x s32>)
// = G_UNMERGE_VALUES %10(<16 x s32>)
Expand Down
4 changes: 4 additions & 0 deletions llvm/lib/Target/AIE/AIECombinerHelper.h
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,10 @@ bool applySingleDiffLaneBuildVector(
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B,
AIESingleDiffLaneBuildVectorMatchData &MatchInfo);

bool matchSymmetricBuildVector(MachineInstr &MI, MachineRegisterInfo &MRI,
GISelChangeObserver &Observer,
BuildFnTy &MatchInfo);

bool matchUnpadVector(MachineInstr &MI, MachineRegisterInfo &MRI,
const AIEBaseInstrInfo &TII);
void applyUnpadVector(MachineInstr &MI, MachineRegisterInfo &MRI,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,11 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV2]](s32), [[UV1]](s32), [[UV3]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
; CHECK-NEXT: $wl0 = COPY [[BUILD_VECTOR]](<8 x s32>)
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV2]](s32), [[UV1]](s32), [[UV3]](s32)
; CHECK-NEXT: [[AIE_BROADCAST_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_AIE_BROADCAST_VECTOR [[DEF]](s32)
; CHECK-NEXT: [[AIE_UNPAD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_AIE_UNPAD_VECTOR [[AIE_BROADCAST_VECTOR]](<16 x s32>)
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<4 x s32>), [[AIE_UNPAD_VECTOR]](<4 x s32>)
; CHECK-NEXT: $wl0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
%10:_(<4 x s32>) = COPY $q0
%0:_(s32), %1:_(s32), %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %10
%4:_(s32) = G_IMPLICIT_DEF
Expand All @@ -68,8 +71,11 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
; CHECK-NEXT: $wl0 = COPY [[BUILD_VECTOR]](<8 x s32>)
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV1]](s32), [[UV2]](s32), [[DEF]](s32)
; CHECK-NEXT: [[AIE_BROADCAST_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_AIE_BROADCAST_VECTOR [[DEF]](s32)
; CHECK-NEXT: [[AIE_UNPAD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_AIE_UNPAD_VECTOR [[AIE_BROADCAST_VECTOR]](<16 x s32>)
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<4 x s32>), [[AIE_UNPAD_VECTOR]](<4 x s32>)
; CHECK-NEXT: $wl0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
%10:_(<4 x s32>) = COPY $q0
%0:_(s32), %1:_(s32), %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %10
%4:_(s32) = G_IMPLICIT_DEF
Expand All @@ -86,8 +92,11 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV2]](s32), [[UV1]](s32), [[UV3]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
; CHECK-NEXT: $wl0 = COPY [[BUILD_VECTOR]](<8 x s32>)
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV2]](s32), [[UV1]](s32), [[UV3]](s32)
; CHECK-NEXT: [[AIE_BROADCAST_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_AIE_BROADCAST_VECTOR [[C]](s32)
; CHECK-NEXT: [[AIE_UNPAD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_AIE_UNPAD_VECTOR [[AIE_BROADCAST_VECTOR]](<16 x s32>)
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<4 x s32>), [[AIE_UNPAD_VECTOR]](<4 x s32>)
; CHECK-NEXT: $wl0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
%10:_(<4 x s32>) = COPY $q0
%0:_(s32), %1:_(s32), %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %10
%4:_(s32) = G_CONSTANT i32 1
Expand All @@ -104,8 +113,11 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(<8 x s32>) = COPY $wh0
; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<8 x s32>)
; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV2]](s32), [[UV1]](s32), [[UV3]](s32), [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
; CHECK-NEXT: $x0 = COPY [[BUILD_VECTOR]](<16 x s32>)
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[UV]](s32), [[UV2]](s32), [[UV1]](s32), [[UV3]](s32), [[UV4]](s32), [[UV5]](s32), [[UV6]](s32), [[UV7]](s32)
; CHECK-NEXT: [[AIE_BROADCAST_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_AIE_BROADCAST_VECTOR [[DEF]](s32)
; CHECK-NEXT: [[AIE_UNPAD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_AIE_UNPAD_VECTOR [[AIE_BROADCAST_VECTOR]](<16 x s32>)
; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<8 x s32>), [[AIE_UNPAD_VECTOR]](<8 x s32>)
; CHECK-NEXT: $x0 = COPY [[CONCAT_VECTORS]](<16 x s32>)
%10:_(<8 x s32>) = COPY $wh0
%0:_(s32), %1:_(s32), %2:_(s32), %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %10
%8:_(s32) = G_IMPLICIT_DEF
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,11 +156,11 @@ body: |
; CHECK-LABEL: name: test_build_vector_256_32bit_scl_invalid
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $r0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $r1
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32)
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY1]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32)
; CHECK-NEXT: $wl0 = COPY [[BUILD_VECTOR]](<8 x s32>)
%1:_(s32) = COPY $r0
%2:_(s32) = COPY $r1
%3:_(<8 x s32>) = G_BUILD_VECTOR %2:_(s32), %1:_(s32), %1:_(s32), %2:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32)
%3:_(<8 x s32>) = G_BUILD_VECTOR %2:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %2:_(s32), %1:_(s32)
$wl0 = COPY %3:_(<8 x s32>)
...

Expand Down Expand Up @@ -229,11 +229,11 @@ body: |
; CHECK-LABEL: name: test_build_vector_512_32bit_scl_invalid
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $r0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $r1
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32)
; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY]](s32), [[COPY1]](s32), [[COPY]](s32), [[COPY]](s32)
; CHECK-NEXT: $x0 = COPY [[BUILD_VECTOR]](<16 x s32>)
%1:_(s32) = COPY $r0
%2:_(s32) = COPY $r1
%3:_(<16 x s32>) = G_BUILD_VECTOR %1:_(s32), %2:_(s32), %1:_(s32), %2:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32)
%3:_(<16 x s32>) = G_BUILD_VECTOR %1:_(s32), %1:_(s32), %1:_(s32), %2:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %1:_(s32), %2:_(s32), %1:_(s32), %1:_(s32)
$x0 = COPY %3:_(<16 x s32>)
...

Expand Down
Loading
Loading