-
Notifications
You must be signed in to change notification settings - Fork 19
/
Copy pathAIEBaseInstrInfo.h
742 lines (642 loc) · 30.6 KB
/
AIEBaseInstrInfo.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
//===-- AIEBaseInstrInfo.h - Common AIE InstrInfo ---------------*- C++ -*-===//
//
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
// (c) Copyright 2023-2025 Advanced Micro Devices, Inc. or its affiliates
//
//===----------------------------------------------------------------------===//
//
// This file contains common TargetInstrInfo code between AIE versions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_AIE_AIEBASEINSTRRINFO_H
#define LLVM_LIB_TARGET_AIE_AIEBASEINSTRRINFO_H
#include "AIE.h"
#include "AIEMIRFormatter.h"
#include "AIETiedRegOperands.h"
#include "MCTargetDesc/AIEFormat.h"
#include "MCTargetDesc/AIEMCFormats.h"
#include "llvm/CodeGen/ResourceCycle.h"
#include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include <optional>
namespace llvm {
struct AIEBaseInstrInfo : public TargetInstrInfo {
using TargetInstrInfo::TargetInstrInfo;
// This codifies the model of ZeroOverheadLoops
class ZOLSupport {
public:
// Pseudo opcodes.
// LoopStart defines the iteration count.
// It has an explicit second operand that represents the adjustment
// that swp applies.
unsigned LoopStartOpcode;
// LoopEnd holds the branch target and the address of the last bundle.
unsigned LoopEndOpcode;
// Opcodes and registers to use when lowering the above pseudos
// SetLoopCount has the same operands as LoopStart.
unsigned SetLoopCountOpcode;
Register LCRegister;
// SetAddress takes an address and writes it to a loop register
unsigned SetAddressOpcode;
Register LSRegister;
Register LERegister;
// The distance between setup and the start of the loop, in units
// of bundles.
unsigned LoopSetupDistance;
};
/// Return the opcode for a return instruction
virtual unsigned getReturnOpcode() const {
llvm_unreachable("Target didn't implement getReturnOpcode");
}
/// Return the opcode for a call instruction
/// \param CallerF The function that makes the call
/// \param IsIndirect Select function pointer call or direct call
/// \param Select a tail call variant.
virtual unsigned getCallOpcode(const MachineFunction &CallerF,
bool IsIndirect, bool IsTailCall) const {
llvm_unreachable("Target didn't implement getCallOpcode");
}
/// Return the kind of slot that this instruction can be executed in.
/// This is used to direct the packetization of simple instructions.
/// NOTE: If this is called on a Composite Instruction (i.e. an instruction
/// defining a Packet format, owning possibly multiples slots), the returned
/// slot will be the default one (unknown).
MCSlotKind getSlotKind(unsigned Opcode) const;
virtual const MCSlotInfo *getSlotInfo(const MCSlotKind Kind) const;
/// \return Opcode of multi-slot pseudo \p MI that runs in \p Slot
std::optional<unsigned> getSlotOpcode(const MCSlotKind Slot,
const MachineInstr &MI) const;
/// \return wether \p MI is a multi-slot pseudo instruction
bool isMultiSlotPseudo(const MachineInstr &MI) const;
/// Return the Packet formats for this target
virtual const PacketFormats &getPacketFormats() const;
/// Return a nop of the given byte size, or the smallest if zero.
virtual unsigned getNopOpcode(size_t Size = 0) const {
llvm_unreachable("Target didn't implement getNopOpcode");
}
/// Return an opcode that reverses the branch condition of a given
/// instruction
/// \param Opc Opcode of the branch to reverse
/// \pre Opc must be a conditional branch
virtual unsigned getOppositeBranchOpcode(unsigned Opc) const {
llvm_unreachable("Target didn't implement getOppositeBranchOpcode");
}
/// Return the opcode of an unconditional jump
virtual unsigned getJumpOpcode() const {
llvm_unreachable("Target didn't implement getJumpOpcode");
}
/// Return Multi-Slot Pseudo opcode based on Reg type and imm. size
virtual unsigned getConstantMovOpcode(MachineRegisterInfo &MRI,
unsigned int Reg, APInt &Val) const {
llvm_unreachable("Target didn't implement getConstantMovOpcode");
}
/// Return Multi-Slot Pseudo opcode based on Reg type
virtual unsigned getScalarMovOpcode(Register DstReg, Register SrcReg) const {
llvm_unreachable("Target didn't implement getScalarMovOpcode");
}
/// Return the MOV opcode
virtual unsigned getMvSclOpcode() const {
llvm_unreachable("Target didn't implement getMvSclOpcode");
}
virtual unsigned getAddrIntrinsic2D() const {
llvm_unreachable("Target didn't implement getAddrIntrinsic2D");
}
virtual unsigned getAddrIntrinsic3D() const {
llvm_unreachable("Target didn't implement getAddrIntrinsic3D");
}
virtual unsigned getPtrAdd2DOpcode() const {
llvm_unreachable("Target didn't implement getPtrAdd2DOpcode");
}
virtual unsigned getPtrAdd3DOpcode() const {
llvm_unreachable("Target didn't implement getPtrAdd3DOpcode");
}
/// Return the MultiSlotPseudo MOV opcode
virtual unsigned getMvSclMultiSlotPseudoOpcode() const {
llvm_unreachable("Target didn't implement getMvSclOpcode");
}
/// Return the 3-address integer ADD opcode
virtual unsigned getAddSclOpcode() const {
llvm_unreachable("Target didn't implement getAddSclOpcode");
}
/// Returns the opcode for CYCLE_SEPARATOR meta instruction.
/// Used for debugging purposes
virtual unsigned getCycleSeparatorOpcode() const {
llvm_unreachable("Target didn't implement getCycleSeparatorOpcode");
}
/// Return the opcode to be used for pushing a vector element at the MSB
/// position in a vector
virtual unsigned getGenericAddVectorEltOpcode() const {
llvm_unreachable("Target didn't implement getGenericAddVectorEltOpcode");
}
/// Return the opcode to be used for inserting a vector element at an
/// arbitrary position in a vector
virtual unsigned getGenericInsertVectorEltOpcode() const {
llvm_unreachable("Target didn't implement getGenericInsertVectorEltOpcode");
}
/// Return the opcode to be used for extracting a vector element
/// \param signExt True if the extracted element shall be sign extended
virtual unsigned getGenericExtractVectorEltOpcode(bool SignExt) const {
llvm_unreachable(
"Target didn't implement getGenericExtractVectorEltOpcode");
}
/// Return the opcode to be used for padding undefined values in the high bits
/// of a vector
virtual unsigned getGenericPadVectorOpcode() const {
llvm_unreachable("Target didn't implement getGenericPadVectorOpcode");
}
/// Return the opcode to be used for extracting a smaller vector by ignoring
/// the high bits
virtual unsigned getGenericUnpadVectorOpcode() const {
llvm_unreachable("Target didn't implement getGenericUnpadVectorOpcode");
}
virtual unsigned getGenericBroadcastVectorOpcode() const {
llvm_unreachable(
"Target didn't implement getGenericBroadcastVectorOpcode!");
}
/// Return the opcode to be used for select between the words of two vectors.
virtual unsigned getGenericVSelOpcode() const {
llvm_unreachable("Target didn't implement getGenericVSelOpcode!");
}
virtual unsigned getGenericVShiftOpcode() const {
llvm_unreachable("Target didn't implement getGenericVShiftOpcode!");
}
virtual unsigned getGenericShuffleVectorOpcode() const {
llvm_unreachable("Target didn't implement getGenericShuffleVectorOpcode!");
}
/// Return the opcode to be used for subvector extraction.
virtual unsigned getGenericExtractSubvectorOpcode() const {
llvm_unreachable(
"Target didn't implement getGenericExtractSubvectorOpcode!");
}
/// Check whether Opc represents a lock instruction
virtual bool isLock(unsigned Opc) const { return false; }
/// Return an optional latency if Opc is DONE.
virtual std::optional<unsigned> getDoneLatency(const unsigned Opc) const {
return std::nullopt;
}
/// Get "implicit" latency for special instructions.
/// This is basically an extra latency, implicit to a special instruction like
/// "DONE", that we would like to give to the exit edge.
virtual unsigned getImplicitLatency(const MachineInstr &MI) const {
if (auto OptLatency = getDoneLatency(MI.getOpcode()))
return *OptLatency;
return 0;
}
/// Check whether this is a delayed scheduling barrier induced from
/// a preceding instruction with delay slots.
virtual bool isDelayedSchedBarrier(const MachineInstr &) const {
return false;
}
/// Check whether this is a scheduling barrier
virtual bool isSchedBarrier(const MachineInstr &) const { return false; }
/// Returns the number of delay slots that this instruction requires.
/// This might be 0
virtual unsigned
getNumDelaySlots(const MachineInstr &MI,
MachineInstr::QueryType Query =
MachineInstr::QueryType::AnyInBundle) const;
/// Returns the number of delay slots that should be reserved, i.e.
/// not filled in by the scheduler.
virtual unsigned getNumReservedDelaySlots(const MachineInstr &MI) const;
/// Check whether Opc represents a JNZ instruction. This is mainly for
/// detecting a downcounting loop branch.
virtual bool isJNZ(unsigned Opc) const { return false; }
/// Check whether Opc represents a JZ instruction.
virtual bool isJZ(unsigned Opc) const { return false; }
/// Check whether Opc represents a JL/JAL instruction.
virtual bool isCall(unsigned Opc) const { return false; }
// Detect instructions that induce control flow to unknown targets,
// in particular after pseudo expansion, where they are no longer
// terminators
virtual bool jumpsToUnknown(unsigned Opcode) const { return false; }
/// Check whether Opc represents an integer constant.
/// signature should be Reg <- (Imm);
virtual bool isIConst(unsigned Opc) const { return false; }
/// Chek whether Opc represents an instruction that doesn't change the
/// boolean result.
/// Signature should be Reg <- (Reg, ...)
virtual bool isBooleanNoOp(unsigned Opc) const {
return Opc == TargetOpcode::COPY;
}
/// Check whether Opc represent an instruction that inverts a condition
/// Signature should be Reg <- (Reg, ...)
virtual bool isBooleanNot(unsigned Opc) const { return false; }
/// Check whether MI is an increment with a constant amount.
/// Signature should be Reg <- (Reg, ...)
/// If it returns true, Step holds the amount.
virtual bool isConstStep(const MachineInstr &MI, int64_t &Step) const {
return false;
}
virtual bool isGenericOffsetMemOpcode(unsigned Opcode) const { return false; }
// Used for Load/Store combiners
virtual unsigned getOffsetMemOpcode(unsigned BaseMemOpcode) const {
llvm_unreachable("Target didn't implement getOffsetMemOpcode");
}
virtual std::optional<unsigned>
getCombinedPostIncOpcode(MachineInstr &BaseMemI, MachineInstr &PtrAddI,
TypeSize Size) const {
llvm_unreachable("Target didn't implement getCombinedPostIncOpcode");
}
/// Check whether Opcode is a VST.PUSH.CONV
virtual bool isFifoStoreConvOpcode(unsigned Opcode) const { return false; }
/// \return Corresponding VST.FLUSH.CONV Opcode based on \a VST.FLUSH Opcode
virtual std::optional<unsigned>
getStoreFlushConvOpcode(unsigned StoreFlushOpcode) const {
llvm_unreachable("Target didn't implement getStoreFlushConvOpcode!");
}
/// \return AIE2p OpCode based on \a IntrinsicID
virtual unsigned getOpCode(MachineInstr &MI) const {
llvm_unreachable("Target didn't implement getOpCode");
}
virtual Register getVaddSignControlRegister() const {
llvm_unreachable("Target didn't implement vaddSign control register");
}
// Opcodes related to hardware loop handling
virtual bool isHardwareLoopDec(unsigned Opcode) const { return false; }
virtual bool isHardwareLoopJNZ(unsigned Opcode) const { return false; }
virtual bool isHardwareLoopStart(unsigned Opcode) const;
virtual bool isHardwareLoopEnd(unsigned Opcode) const;
// All opcodes etc used for ZOL lowering. If this returns none, we have no
// ZOL support.
virtual std::optional<ZOLSupport> getZOLSupport() const { return {}; }
/// Check whether \p MI defines the ZOL tripcount. If this returns true, \p MI
/// should be suitable for calling adjustTripCount on it.
/// If \p Pristine is set, we check that it wasn't updated before.
virtual bool isZOLTripCountDef(const MachineInstr &MI,
bool Pristine = false) const;
/// Lower the tripcount defined by MI with Update, which is a small
/// negative integer that should be added to the tripcount
/// \pre isZOLTripCountDef(MI)
virtual void adjustTripCount(MachineInstr &MI, int Update) const;
/// Check whether this is a zero-overhead loop start block
virtual bool isZeroOverheadLoopSetupInstr(const MachineInstr &) const;
// Return number of fully-expanded 128-bit instructions, the distance
// which needs to be maintained between writing zero-overhead
// registers(lc, le, ls, etc.) and the end of the loop,
virtual unsigned getLoopSetupDistance() const;
virtual unsigned getZOLBundlesCount(const MachineBasicBlock &MBB) const;
bool isZOLBody(const MachineBasicBlock &MBB) const;
// Return the vector of Alignment Region Boundaries.
virtual std::vector<MachineBasicBlock::iterator>
getAlignmentBoundaries(MachineBasicBlock &MBB) const;
virtual unsigned getPseudoJNZDOpcode() const {
llvm_unreachable("Need to implement this hook for hardware loop support.");
}
/// Return the opcode of a scalar move
virtual unsigned getPseudoMoveOpcode() const {
llvm_unreachable("Target didn't implement getPseudoMoveOpcode!");
}
/// get stream read status register
virtual Register getSSStatusReg() const {
llvm_unreachable("Target didn't implement getSSStatusReg");
}
/// get stream write status register
virtual Register getMSStatusReg() const {
llvm_unreachable("Target didn't implement getMSStatusReg");
}
virtual unsigned getMvScl2MS(unsigned ConstTLastVal) const {
llvm_unreachable("Target didn't implement getMvScl2MS");
}
virtual unsigned getMvNBScl2MS(unsigned ConstTLastVal) const {
llvm_unreachable("Target didn't implement getMvNBScl2MS");
}
virtual unsigned getMvScl2MSTlastRegOpcode() const {
llvm_unreachable("Target didn't implement getMvScl2MSTlastRegOpcode");
}
virtual unsigned getMvNBScl2MSTlastRegOpcode() const {
llvm_unreachable("Target didn't implement getMvNBScl2MSTlastRegOpcode");
}
virtual Register getPackSignCReg() const {
llvm_unreachable("Target didn't implement getPackSignCReg");
}
virtual Register getUnpackSignCReg() const {
llvm_unreachable("Target didn't implement getUnpackSignCReg");
}
/// Information about tied operands which cannot be modeled using TableGen
/// constraints.
virtual SmallVector<TiedRegOperands, 4>
getTiedRegInfo(unsigned Opcode) const {
return {};
}
SmallVector<TiedRegOperands, 4> getTiedRegInfo(const MachineInstr &MI) const {
return getTiedRegInfo(MI.getOpcode());
}
/// Information about tied operands which can be splitted.
virtual TiedRegOperands getTiedRegInfoForSplitting(unsigned Opcode) const {
return {};
}
/// Finds the opcode that is equivalent to \p Opcode except some operands
/// are expanded into multiple sub-registers operands to facilitate register
/// allocation.
/// E.g. for AIE2: PADDA_2D -> PADDA_2D_split
virtual std::optional<unsigned>
getOpcodeWithAtomicOperands(unsigned Opcode) const {
return {};
}
/// Finds the opcode that is equivalent to \p Opcode except some sub-register
/// operands are rewritten into a larger super-register to facilitate later
/// phases of compilation like instruction encoding.
/// This is the inverse operation for \ref getOpcodeWithAtomicOperands.
/// E.g. for AIE2: PADDA_2D_split -> PADDA_2D
virtual std::optional<unsigned>
getOpcodeWithTupleOperands(unsigned Opcode) const {
return {};
}
struct PseudoBranchExpandInfo {
unsigned TargetInstrOpCode;
unsigned BarrierOpCode;
};
virtual std::optional<PseudoBranchExpandInfo>
getPseudoBranchExpandInfo(const MachineInstr &MI) const;
// Shared code
void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
bool
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
const DebugLoc &dl,
int *BytesAdded = nullptr) const override;
unsigned removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved = nullptr) const override;
bool isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const override;
std::optional<unsigned> getOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr &DefMI,
unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const override;
// Check if the MII points to a BUNDLE which contains a call instruction
bool isCallBundle(MachineBasicBlock::iterator MII) const;
// Check if the MII points to a BUNDLE which contains an instruction
// setting LE Reg
bool isZOLSetupBundle(MachineBasicBlock::iterator MII) const;
bool isLastZOLSetupBundleInMBB(MachineBasicBlock::iterator MII) const;
/// Central place to compute RAW/WAR/WAW operand latencies.
/// This uses itineraries when they exist. It returns std::nullopt for
/// instructions that are not described.
/// Note that the latency can be negative, e.g. for AIE2:
/// ST r0 --anti(-6)--> LD r0
/// or
/// MOV r0, 1 --out(-5)--> LD r0
std::optional<int> getSignedOperandLatency(const InstrItineraryData *ItinData,
const MachineInstr &DefMI,
unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx,
SDep::Kind Kind) const;
/// Returns the number of cycles that are saved if there is a bypass (pipeline
/// forwarding) between \p DefMI and \p UseMI for the given operands.
/// This returns 0 if no bypass is taken.
virtual unsigned getNumBypassedCycles(const InstrItineraryData *ItinData,
const MachineInstr &DefMI,
unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const;
/// Returns the latency to be observed to preserve the ordering of aliasing
/// memory operations.
/// E.g. in AIE2 VST.SRS to VLD has a memory latency of 3 cycles.
std::optional<int> getMemoryLatency(unsigned SrcSchedClass,
unsigned DstSchedClass) const;
/// Returns the worst-case latency to be observed to preserve the
/// ordering of aliasing memory operations. We don't know the destination
/// of the edge
int getConservativeMemoryLatency(unsigned SrcSchedClass) const;
/// Returns cycle for the first memory operation of an instruction.
/// This is usually the same as \p getLastMemoryCycle except for instructions
/// that touch memory multiple times like AIE's read-modify-write part-word
/// stores.
/// E.g. in AIE2 VST.SRS has a first cycle of 7, ST.s8 a first cycle of 5.
virtual std::optional<int> getFirstMemoryCycle(unsigned SchedClass) const;
/// Returns cycle for the last memory operation of an instruction.
/// This is usually the same as \p getFirstMemoryCycle except for instructions
/// that touch memory multiple times like AIE's read-modify-write part-word
/// stores.
/// E.g. in AIE2 VST.SRS has a last cycle of 7, ST.s8 a last cycle of 11.
virtual std::optional<int> getLastMemoryCycle(unsigned SchedClass) const;
/// Return the minimum of FirstMemoryCycle over all sched classes
virtual int getMinFirstMemoryCycle() const;
/// Return the maximum of FirstMemoryCycle over all sched classes
virtual int getMaxFirstMemoryCycle() const;
/// Return the minimum of LastMemoryCycle over all sched classes
virtual int getMinLastMemoryCycle() const;
/// Return the maximum of LastMemoryCycle over all sched classes
virtual int getMaxLastMemoryCycle() const;
/// Return cycles for memory operations of an instruction.
virtual SmallVector<int, 2> getMemoryCycles(unsigned SchedClass) const;
/// Return cycles for core to stall after lock instruction.
virtual int getCoreStallCycleAfterLock() const;
/// Return cycles for core to resume after lock instruction.
virtual int getCoreResumeCycleAfterLock() const;
/// Return the schedclass for the given instruction descriptor based on
/// operand regclass.
virtual unsigned
getSchedClass(const MCInstrDesc &Desc,
iterator_range<const MachineOperand *> Operands,
const MachineRegisterInfo &MRI) const;
const AIEBaseMCFormats *getFormatInterface() const { return FormatInterface; }
/// Verifies whether Ty is legal as an input to G_AIE_PAD_VECTOR_UNDEF or an
/// output of G_AIE_UNPAD_VECTOR
virtual bool isLegalTypeToPad(const LLT &Ty,
StringRef *ErrInfo = nullptr) const;
/// Verifies whether Ty is legal as an input to G_AIE_UNPAD_VECTOR or an
/// output of G_AIE_PAD_VECTOR_UNDEF
virtual bool isLegalTypeToUnpad(const LLT &Ty,
StringRef *ErrInfo = nullptr) const;
virtual bool verifyGenericInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const;
virtual bool verifyMemOperand(const MachineInstr &MI,
StringRef &ErrInfo) const;
bool verifyTiedRegisters(const MachineInstr &MI, StringRef &ErrInfo) const;
static bool verifySameLaneTypes(const MachineInstr &MI, StringRef &ErrInfo);
bool verifyImplicitOpsOrder(const MachineInstr &MI, StringRef &ErrInfo) const;
bool verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const override;
/// Returns whether the function that contains \p MI has been
/// legalized
static bool isLegalized(const MachineInstr &MI) {
return MI.getParent()->getParent()->getProperties().hasProperty(
MachineFunctionProperties::Property::Legalized);
}
bool canHoistCheapInst(const MachineInstr &MI) const override;
static bool regClassMatches(const TargetRegisterClass &TRC,
const TargetRegisterClass *RC, unsigned Reg);
struct VConcatOpInfo {
// First input operand index.
unsigned FirstOperand;
// Number of non-register operands.
unsigned NumOfNonRegOperands;
};
/// Return operand information related to vector concat instrinsic.
virtual std::optional<const VConcatOpInfo>
getVConcatOpInfo(const MachineInstr &MI) const;
struct VUpdateOpInfo {
// Vector to update operand index.
unsigned Src;
// Subvector to insert.
unsigned SrcSubVec;
// Position to insert operand index.
unsigned SubVectorIndex;
};
/// Return operand information related to vector update instrinsic.
virtual std::optional<const VUpdateOpInfo>
getVUpdateOpInfo(const MachineInstr &MI) const {
llvm_unreachable("Target didn't implement getVUpdateOpInfo!");
}
struct VExtractOpInfo {
// Vector to update operand index.
unsigned Src;
// Position to extract.
unsigned SubVectorIndex;
};
/// Return operand information related to vector extract instrinsic.
virtual std::optional<const VExtractOpInfo>
getVExtractOpInfo(const MachineInstr &MI) const {
llvm_unreachable("Target didn't implement getVExtractOpInfo!");
}
/// Return the maximun size for memory operations on this target.
virtual unsigned getMaxLoadStoreSize() const {
llvm_unreachable("Target didn't implement getMaxLoadStoreSize!");
}
/// Return true if this instruction can be combined with a memory operation.
virtual bool canCombineWithLoadStore(const MachineInstr &MI) const {
llvm_unreachable("Target didn't implement canCombineWithLoadStore!");
}
/// Return true if the type can be splitted to fit target's restrictions.
/// For example, by splitting those types in advance, it is possible to
/// reach more combiners during selection.
virtual bool isProfitableToSplitType(const LLT Ty) const {
llvm_unreachable("Target didn't implement isProfitableToSplitType!");
}
/// Get the native size of the source vector for basic vector operations like
/// `G_AIE_[ZS]EXT_EXTRACT_VECTOR_ELT`, `G_AIE_EXTRACT_SUBVECTOR`,
/// `G_AIE_VSEL` and `G_AIE_VSHIFT_RIGHT`.
virtual unsigned getBasicVectorBitSize() const {
llvm_unreachable("Target didn't implement getBasicVectorBitSize!");
}
/// Get size of general purpose registers (GPR)
virtual unsigned getScalarRegSize() const {
llvm_unreachable("Target didn't implement getScalarRegSize!");
}
/// Get size of basic vector registers
virtual unsigned getBasicVecRegSize() const {
llvm_unreachable("Target didn't implement getVecRegSize!");
}
/// Return the maximum supported vector size for this target.
virtual unsigned getMaxVectorBitSize() const {
llvm_unreachable("Target didn't implement getMaxVectorBitSize!");
}
/// Return the maximum vector size the target supports for a combined
/// load-store increment.
virtual unsigned getMaxSupportedLdStIncSize() const {
llvm_unreachable("Target didn't implement getMaxSupportedLdStIncSize!");
}
/// Abstract operations to help the decoding of complex operations.
struct AbstractOp {
enum class OperationType : unsigned {
VECTOR_ADD,
VECTOR_BROADCAST,
VECTOR_SELECT,
VECTOR_XWAY_LOAD
};
OperationType Type;
SmallVector<Register, 2> VectorSrcRegs;
SmallVector<Register, 2> ScalarSrcRegs;
};
/// Retrieve an abstract representation, of an instruction.
virtual std::optional<const AbstractOp>
parseAbstractOp(const MachineInstr &MI) const {
return std::nullopt;
}
public:
/// Expand a spill pseudo-instruction into actual target instructions. This
/// will essentially split the register being handled into its sub-registers,
/// until there is an actual instruction that can handle them.
/// In case sub-registers don't have nice offset, those can be aligned using
/// \ref SubRegOffsetAlign.
/// In case offset value is above the encodable limit provide with a pointer
/// register points to stack pointer and actual offset value.
void expandSpillPseudo(MachineInstr &MI, const TargetRegisterInfo &TRI,
Align SubRegOffsetAlign = Align(1),
Register SPReg = MCRegister::NoRegister,
std::optional<int64_t> OffsetVal = std::nullopt) const;
protected:
struct AIEPseudoExpandInfo {
/// OpCode to expand a PseudoInstruction to. This can be another Pseudo.
unsigned ExpandedOpCode;
/// Index of the sub-register to use when splitting the register used
/// in the initial instruction.
/// This can be NoSubRegister, but then \ref MemSize must be set.
unsigned SubRegIndex;
/// Explicit size (in bytes) that this expanded spill instruction will use
/// in memory. This is useful when the expansion can't be characterized with
/// sub-registers.
int MemSize = 0;
};
struct AIERegOffsetSpillInstrInfo {
/// Opcode for spill using register offset.
unsigned SpillOpCode;
/// Opcode for adjusting the offset register.
unsigned AdjustOffsetOpcode;
/// Target register class for offset register.
const TargetRegisterClass *OffsetRC;
};
/// Return information on how to expand a spill (load/store) pseudo
/// instruction. This returns an empty vector if the instruction does not
/// need expanding. Otherwise, the size of the vector will match the number
/// of instructions which \ref MI needs to be expanded to.
virtual SmallVector<AIEPseudoExpandInfo, 4>
getSpillPseudoExpandInfo(const MachineInstr &MI) const {
return {};
};
/// Retrieve information about a register offset instruction derived from an
/// immediate offset instruction. This functionality is particularly useful
/// when it is not possible to encode an immediate value directly within an
/// instruction, and instead requires using registers to represent the offset
/// value. This function provides the necessary information to handle such
/// cases and allows for the appropriate handling of register-based offset
/// instructions.
virtual AIERegOffsetSpillInstrInfo
getRegOffsetSpillInstrInfoFromImmOffset(const unsigned Opcode) const {
return {};
}
// Copy SrcReg to DstReg through their sub-registers.
void copyThroughSubRegs(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
MCRegister DstReg, MCRegister SrcReg,
bool KillSrc) const;
#if 0
// TODO. I guess this should wait for Davy's PR to land
// Get me a nop to fill Slot
// TODO: Strong type for Slot (MCSlotKind)
virtual unsigned getNopOpcode(unsigned Slot) const = 0;
#endif
/// If the specific machine instruction is an instruction that moves/copies
/// value from one register to another register return destination and source
/// registers as machine operands.
std::optional<DestSourcePair>
isCopyInstrImpl(const MachineInstr &MI) const override;
/// Analyze loop L, which must be a single-basic-block loop, and if the
/// conditions can be understood enough produce a PipelinerLoopInfo object.
std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override;
// Provide a ResourceCycle interface on top of the hazard recognizer
ResourceCycle *
CreateTargetScheduleState(const TargetSubtargetInfo &) const override;
const AIEBaseMCFormats *FormatInterface = nullptr;
// MIR formatter overrides for PseudoSourceValues
const MIRFormatter *getMIRFormatter() const override;
mutable std::unique_ptr<AIEMIRFormatter> Formatter;
};
} // namespace llvm
#endif // LLVM_LIB_TARGET_AIE_AIEBASEINSTRRINFO_H