Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[AArch64][SME2] Add register allocation hints for ZPRMulReg #1

Open
wants to merge 1 commit into
base: sme2-form-strided-pseudo
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 75 additions & 0 deletions llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1107,6 +1107,81 @@ unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
}
}

// FORM_STRIDED_TUPLE nodes are created to improve register allocation where
// a consecutive multi-vector tuple is constructed from the same indices of
// multiple strided loads. This may still result in unnecessary copies between
// the loads and the tuple. Here we try to return a hint to assign the
// contiguous ZPRMulReg starting at the same register as the first operand of
// the pseudo, which should be a subregister of the first strided load.
//
// For example, if the first strided load has been assigned $z16_z20_z24_z28
// and the operands of the pseudo are each accessing subregister zsub2, we
// should look through through Order to find a contiguous register which
// begins with $z24 (i.e. $z24_z25_z26_z27).
//
bool AArch64RegisterInfo::getRegAllocationHints(
Register VirtReg, ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
const AArch64Subtarget &STI = MF.getSubtarget<AArch64Subtarget>();
const TargetRegisterInfo *TRI = STI.getRegisterInfo();
const MachineRegisterInfo &MRI = MF.getRegInfo();
bool DefaultHints =
TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);

unsigned RegID = MRI.getRegClass(VirtReg)->getID();
if (RegID != AArch64::ZPR2Mul2RegClassID &&
RegID != AArch64::ZPR4Mul4RegClassID)
return DefaultHints;

for (MachineInstr &MI : MRI.def_instructions(VirtReg)) {
if (MI.getOpcode() != AArch64::FORM_STRIDED_TUPLE_X2_PSEUDO &&
MI.getOpcode() != AArch64::FORM_STRIDED_TUPLE_X4_PSEUDO)
continue;

// Look up the physical register mapped to the first load of the pseudo.
Register FirstLoadVirtReg = MI.getOperand(1).getReg();
if (!VRM->hasPhys(FirstLoadVirtReg))
continue;

unsigned SubRegIdx = 0;
MCRegister FirstLoadPhysReg = VRM->getPhys(FirstLoadVirtReg);

// The subreg number is used to access the correct unit of the
// strided register found in the map above.
switch (MI.getOperand(1).getSubReg()) {
case AArch64::zsub0:
break;
case AArch64::zsub1:
SubRegIdx = 1;
break;
case AArch64::zsub2:
SubRegIdx = 2;
break;
case AArch64::zsub3:
SubRegIdx = 3;
break;
default:
continue;
}

SmallVector<Register, 4> RegUnits;
for (MCRegUnit Unit : TRI->regunits(FirstLoadPhysReg))
RegUnits.push_back(Unit);

// Find the contiguous ZPRMul register which starts with the
// same register unit as the strided register and add to Hints.
Register StartReg = RegUnits[SubRegIdx];
for (unsigned I = 0; I < Order.size(); ++I) {
Register Reg = *TRI->regunits(Order[I]).begin();
if (Reg == StartReg)
Hints.push_back(Order[I]);
}
}

return DefaultHints;
}

unsigned AArch64RegisterInfo::getLocalAddressRegister(
const MachineFunction &MF) const {
const auto &MFI = MF.getFrameInfo();
Expand Down
5 changes: 5 additions & 0 deletions llvm/lib/Target/AArch64/AArch64RegisterInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,11 @@ class AArch64RegisterInfo final : public AArch64GenRegisterInfo {
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const override;

bool getRegAllocationHints(Register VirtReg, ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF, const VirtRegMap *VRM,
const LiveRegMatrix *Matrix) const override;

unsigned getLocalAddressRegister(const MachineFunction &MF) const;
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const;

Expand Down
128 changes: 24 additions & 104 deletions llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
Original file line number Diff line number Diff line change
Expand Up @@ -590,12 +590,8 @@ define void @udot_form_2x_tuple(ptr %ptr, i64 %stride) #0 {
; CHECK-NEXT: mov w8, wzr
; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x0]
; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x0, x1]
; CHECK-NEXT: mov z0.d, z16.d
; CHECK-NEXT: mov z1.d, z17.d
; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z24.d
; CHECK-NEXT: mov z1.d, z25.d
; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z16.b, z17.b }, z0.b[0]
; CHECK-NEXT: udot za.s[w8, 0, vgx2], { z24.b, z25.b }, z0.b[0]
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
Expand All @@ -622,26 +618,10 @@ define void @udot_form_4x_tuple(ptr %ptr, i64 %stride) #0 {
; CHECK-NEXT: add x10, x9, x1
; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
; CHECK-NEXT: mov z0.d, z16.d
; CHECK-NEXT: mov z1.d, z17.d
; CHECK-NEXT: mov z2.d, z18.d
; CHECK-NEXT: mov z3.d, z19.d
; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z20.d
; CHECK-NEXT: mov z1.d, z21.d
; CHECK-NEXT: mov z2.d, z22.d
; CHECK-NEXT: mov z3.d, z23.d
; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z24.d
; CHECK-NEXT: mov z1.d, z25.d
; CHECK-NEXT: mov z2.d, z26.d
; CHECK-NEXT: mov z3.d, z27.d
; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z28.d
; CHECK-NEXT: mov z1.d, z29.d
; CHECK-NEXT: mov z2.d, z30.d
; CHECK-NEXT: mov z3.d, z31.d
; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
Expand Down Expand Up @@ -752,12 +732,8 @@ define void @usdot_form_2x_tuple(ptr %ptr, i64 %stride) #0 {
; CHECK-NEXT: mov w8, wzr
; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x0]
; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x0, x1]
; CHECK-NEXT: mov z0.d, z16.d
; CHECK-NEXT: mov z1.d, z17.d
; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z24.d
; CHECK-NEXT: mov z1.d, z25.d
; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z16.b, z17.b }, z0.b[0]
; CHECK-NEXT: usdot za.s[w8, 0, vgx2], { z24.b, z25.b }, z0.b[0]
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
Expand All @@ -784,26 +760,10 @@ define void @usdot_form_4x_tuple(ptr %ptr, i64 %stride) #0 {
; CHECK-NEXT: add x10, x9, x1
; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
; CHECK-NEXT: mov z0.d, z16.d
; CHECK-NEXT: mov z1.d, z17.d
; CHECK-NEXT: mov z2.d, z18.d
; CHECK-NEXT: mov z3.d, z19.d
; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z20.d
; CHECK-NEXT: mov z1.d, z21.d
; CHECK-NEXT: mov z2.d, z22.d
; CHECK-NEXT: mov z3.d, z23.d
; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z24.d
; CHECK-NEXT: mov z1.d, z25.d
; CHECK-NEXT: mov z2.d, z26.d
; CHECK-NEXT: mov z3.d, z27.d
; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z28.d
; CHECK-NEXT: mov z1.d, z29.d
; CHECK-NEXT: mov z2.d, z30.d
; CHECK-NEXT: mov z3.d, z31.d
; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
Expand Down Expand Up @@ -916,12 +876,8 @@ define void @sdot_form_2x_tuple(ptr %ptr, i64 %stride) #0 {
; CHECK-NEXT: mov w8, wzr
; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x0]
; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x0, x1]
; CHECK-NEXT: mov z0.d, z16.d
; CHECK-NEXT: mov z1.d, z17.d
; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z24.d
; CHECK-NEXT: mov z1.d, z25.d
; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z16.b, z17.b }, z0.b[0]
; CHECK-NEXT: sdot za.s[w8, 0, vgx2], { z24.b, z25.b }, z0.b[0]
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
Expand All @@ -948,26 +904,10 @@ define void @sdot_form_4x_tuple(ptr %ptr, i64 %stride) #0 {
; CHECK-NEXT: add x10, x9, x1
; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
; CHECK-NEXT: mov z0.d, z16.d
; CHECK-NEXT: mov z1.d, z17.d
; CHECK-NEXT: mov z2.d, z18.d
; CHECK-NEXT: mov z3.d, z19.d
; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z20.d
; CHECK-NEXT: mov z1.d, z21.d
; CHECK-NEXT: mov z2.d, z22.d
; CHECK-NEXT: mov z3.d, z23.d
; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z24.d
; CHECK-NEXT: mov z1.d, z25.d
; CHECK-NEXT: mov z2.d, z26.d
; CHECK-NEXT: mov z3.d, z27.d
; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z28.d
; CHECK-NEXT: mov z1.d, z29.d
; CHECK-NEXT: mov z2.d, z30.d
; CHECK-NEXT: mov z3.d, z31.d
; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
Expand Down Expand Up @@ -1080,12 +1020,8 @@ define void @sudot_form_2x_tuple(ptr %ptr, i64 %stride) #0 {
; CHECK-NEXT: mov w8, wzr
; CHECK-NEXT: ld1b { z16.b, z24.b }, pn8/z, [x0]
; CHECK-NEXT: ld1b { z17.b, z25.b }, pn8/z, [x0, x1]
; CHECK-NEXT: mov z0.d, z16.d
; CHECK-NEXT: mov z1.d, z17.d
; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z24.d
; CHECK-NEXT: mov z1.d, z25.d
; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z0.b, z1.b }, z0.b[0]
; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z16.b, z17.b }, z0.b[0]
; CHECK-NEXT: sudot za.s[w8, 0, vgx2], { z24.b, z25.b }, z0.b[0]
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
Expand All @@ -1112,26 +1048,10 @@ define void @sudot_form_4x_tuple(ptr %ptr, i64 %stride) #0 {
; CHECK-NEXT: add x10, x9, x1
; CHECK-NEXT: ld1b { z18.b, z22.b, z26.b, z30.b }, pn8/z, [x0, x9]
; CHECK-NEXT: ld1b { z19.b, z23.b, z27.b, z31.b }, pn8/z, [x0, x10]
; CHECK-NEXT: mov z0.d, z16.d
; CHECK-NEXT: mov z1.d, z17.d
; CHECK-NEXT: mov z2.d, z18.d
; CHECK-NEXT: mov z3.d, z19.d
; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z20.d
; CHECK-NEXT: mov z1.d, z21.d
; CHECK-NEXT: mov z2.d, z22.d
; CHECK-NEXT: mov z3.d, z23.d
; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z24.d
; CHECK-NEXT: mov z1.d, z25.d
; CHECK-NEXT: mov z2.d, z26.d
; CHECK-NEXT: mov z3.d, z27.d
; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: mov z0.d, z28.d
; CHECK-NEXT: mov z1.d, z29.d
; CHECK-NEXT: mov z2.d, z30.d
; CHECK-NEXT: mov z3.d, z31.d
; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z0.b - z3.b }, z0.b[0]
; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z16.b - z19.b }, z0.b[0]
; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z20.b - z23.b }, z0.b[0]
; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z24.b - z27.b }, z0.b[0]
; CHECK-NEXT: sudot za.s[w8, 0, vgx4], { z28.b - z31.b }, z0.b[0]
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
Expand Down
Loading