//===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "AArch64TargetTransformInfo.h" #include "AArch64ExpandImm.h" #include "MCTargetDesc/AArch64AddressingModes.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/CodeGen/BasicTTIImpl.h" #include "llvm/CodeGen/CostTable.h" #include "llvm/CodeGen/TargetLowering.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/IntrinsicsAArch64.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/InstCombine/InstCombiner.h" #include using namespace llvm; using namespace llvm::PatternMatch; #define DEBUG_TYPE "aarch64tti" static cl::opt EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix", cl::init(true), cl::Hidden); bool AArch64TTIImpl::areInlineCompatible(const Function *Caller, const Function *Callee) const { const TargetMachine &TM = getTLI()->getTargetMachine(); const FeatureBitset &CallerBits = TM.getSubtargetImpl(*Caller)->getFeatureBits(); const FeatureBitset &CalleeBits = TM.getSubtargetImpl(*Callee)->getFeatureBits(); // Inline a callee if its target-features are a subset of the callers // target-features. return (CallerBits & CalleeBits) == CalleeBits; } /// Calculate the cost of materializing a 64-bit value. This helper /// method might only calculate a fraction of a larger immediate. Therefore it /// is valid to return a cost of ZERO. InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) { // Check if the immediate can be encoded within an instruction. if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64)) return 0; if (Val < 0) Val = ~Val; // Calculate how many moves we will need to materialize this constant. SmallVector Insn; AArch64_IMM::expandMOVImm(Val, 64, Insn); return Insn.size(); } /// Calculate the cost of materializing the given constant. InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); if (BitSize == 0) return ~0U; // Sign-extend all constants to a multiple of 64-bit. APInt ImmVal = Imm; if (BitSize & 0x3f) ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); // Split the constant into 64-bit chunks and calculate the cost for each // chunk. InstructionCost Cost = 0; for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); int64_t Val = Tmp.getSExtValue(); Cost += getIntImmCost(Val); } // We need at least one instruction to materialze the constant. return std::max(1, Cost); } InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind, Instruction *Inst) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); // There is no cost model for constants with a bit size of 0. Return TCC_Free // here, so that constant hoisting will ignore this constant. if (BitSize == 0) return TTI::TCC_Free; unsigned ImmIdx = ~0U; switch (Opcode) { default: return TTI::TCC_Free; case Instruction::GetElementPtr: // Always hoist the base address of a GetElementPtr. if (Idx == 0) return 2 * TTI::TCC_Basic; return TTI::TCC_Free; case Instruction::Store: ImmIdx = 0; break; case Instruction::Add: case Instruction::Sub: case Instruction::Mul: case Instruction::UDiv: case Instruction::SDiv: case Instruction::URem: case Instruction::SRem: case Instruction::And: case Instruction::Or: case Instruction::Xor: case Instruction::ICmp: ImmIdx = 1; break; // Always return TCC_Free for the shift value of a shift instruction. case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: if (Idx == 1) return TTI::TCC_Free; break; case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: case Instruction::IntToPtr: case Instruction::PtrToInt: case Instruction::BitCast: case Instruction::PHI: case Instruction::Call: case Instruction::Select: case Instruction::Ret: case Instruction::Load: break; } if (Idx == ImmIdx) { int NumConstants = (BitSize + 63) / 64; InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); return (Cost <= NumConstants * TTI::TCC_Basic) ? static_cast(TTI::TCC_Free) : Cost; } return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); } InstructionCost AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind) { assert(Ty->isIntegerTy()); unsigned BitSize = Ty->getPrimitiveSizeInBits(); // There is no cost model for constants with a bit size of 0. Return TCC_Free // here, so that constant hoisting will ignore this constant. if (BitSize == 0) return TTI::TCC_Free; // Most (all?) AArch64 intrinsics do not support folding immediates into the // selected instruction, so we compute the materialization cost for the // immediate directly. if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv) return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); switch (IID) { default: return TTI::TCC_Free; case Intrinsic::sadd_with_overflow: case Intrinsic::uadd_with_overflow: case Intrinsic::ssub_with_overflow: case Intrinsic::usub_with_overflow: case Intrinsic::smul_with_overflow: case Intrinsic::umul_with_overflow: if (Idx == 1) { int NumConstants = (BitSize + 63) / 64; InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); return (Cost <= NumConstants * TTI::TCC_Basic) ? static_cast(TTI::TCC_Free) : Cost; } break; case Intrinsic::experimental_stackmap: if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) return TTI::TCC_Free; break; case Intrinsic::experimental_patchpoint_void: case Intrinsic::experimental_patchpoint_i64: if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) return TTI::TCC_Free; break; case Intrinsic::experimental_gc_statepoint: if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) return TTI::TCC_Free; break; } return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind); } TargetTransformInfo::PopcntSupportKind AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) { assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); if (TyWidth == 32 || TyWidth == 64) return TTI::PSK_FastHardware; // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount. return TTI::PSK_Software; } InstructionCost AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) { auto *RetTy = ICA.getReturnType(); switch (ICA.getID()) { case Intrinsic::umin: case Intrinsic::umax: { auto LT = TLI->getTypeLegalizationCost(DL, RetTy); // umin(x,y) -> sub(x,usubsat(x,y)) // umax(x,y) -> add(x,usubsat(y,x)) if (LT.second == MVT::v2i64) return LT.first * 2; LLVM_FALLTHROUGH; } case Intrinsic::smin: case Intrinsic::smax: { static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32}; auto LT = TLI->getTypeLegalizationCost(DL, RetTy); if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) return LT.first; break; } case Intrinsic::sadd_sat: case Intrinsic::ssub_sat: case Intrinsic::uadd_sat: case Intrinsic::usub_sat: { static const auto ValidSatTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32, MVT::v2i64}; auto LT = TLI->getTypeLegalizationCost(DL, RetTy); // This is a base cost of 1 for the vadd, plus 3 extract shifts if we // need to extend the type, as it uses shr(qadd(shl, shl)). unsigned Instrs = LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4; if (any_of(ValidSatTys, [<](MVT M) { return M == LT.second; })) return LT.first * Instrs; break; } case Intrinsic::abs: { static const auto ValidAbsTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32, MVT::v2i64}; auto LT = TLI->getTypeLegalizationCost(DL, RetTy); if (any_of(ValidAbsTys, [<](MVT M) { return M == LT.second; })) return LT.first; break; } case Intrinsic::experimental_stepvector: { InstructionCost Cost = 1; // Cost of the `index' instruction auto LT = TLI->getTypeLegalizationCost(DL, RetTy); // Legalisation of illegal vectors involves an `index' instruction plus // (LT.first - 1) vector adds. if (LT.first > 1) { Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext()); InstructionCost AddCost = getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind); Cost += AddCost * (LT.first - 1); } return Cost; } case Intrinsic::bitreverse: { static const CostTblEntry BitreverseTbl[] = { {Intrinsic::bitreverse, MVT::i32, 1}, {Intrinsic::bitreverse, MVT::i64, 1}, {Intrinsic::bitreverse, MVT::v8i8, 1}, {Intrinsic::bitreverse, MVT::v16i8, 1}, {Intrinsic::bitreverse, MVT::v4i16, 2}, {Intrinsic::bitreverse, MVT::v8i16, 2}, {Intrinsic::bitreverse, MVT::v2i32, 2}, {Intrinsic::bitreverse, MVT::v4i32, 2}, {Intrinsic::bitreverse, MVT::v1i64, 2}, {Intrinsic::bitreverse, MVT::v2i64, 2}, }; const auto LegalisationCost = TLI->getTypeLegalizationCost(DL, RetTy); const auto *Entry = CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second); // Cost Model is using the legal type(i32) that i8 and i16 will be converted // to +1 so that we match the actual lowering cost if (TLI->getValueType(DL, RetTy, true) == MVT::i8 || TLI->getValueType(DL, RetTy, true) == MVT::i16) return LegalisationCost.first * Entry->Cost + 1; if (Entry) return LegalisationCost.first * Entry->Cost; break; } case Intrinsic::ctpop: { static const CostTblEntry CtpopCostTbl[] = { {ISD::CTPOP, MVT::v2i64, 4}, {ISD::CTPOP, MVT::v4i32, 3}, {ISD::CTPOP, MVT::v8i16, 2}, {ISD::CTPOP, MVT::v16i8, 1}, {ISD::CTPOP, MVT::i64, 4}, {ISD::CTPOP, MVT::v2i32, 3}, {ISD::CTPOP, MVT::v4i16, 2}, {ISD::CTPOP, MVT::v8i8, 1}, {ISD::CTPOP, MVT::i32, 5}, }; auto LT = TLI->getTypeLegalizationCost(DL, RetTy); MVT MTy = LT.second; if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) { // Extra cost of +1 when illegal vector types are legalized by promoting // the integer type. int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() != RetTy->getScalarSizeInBits() ? 1 : 0; return LT.first * Entry->Cost + ExtraCost; } break; } default: break; } return BaseT::getIntrinsicInstrCost(ICA, CostKind); } /// The function will remove redundant reinterprets casting in the presence /// of the control flow static Optional processPhiNode(InstCombiner &IC, IntrinsicInst &II) { SmallVector Worklist; auto RequiredType = II.getType(); auto *PN = dyn_cast(II.getArgOperand(0)); assert(PN && "Expected Phi Node!"); // Don't create a new Phi unless we can remove the old one. if (!PN->hasOneUse()) return None; for (Value *IncValPhi : PN->incoming_values()) { auto *Reinterpret = dyn_cast(IncValPhi); if (!Reinterpret || Reinterpret->getIntrinsicID() != Intrinsic::aarch64_sve_convert_to_svbool || RequiredType != Reinterpret->getArgOperand(0)->getType()) return None; } // Create the new Phi LLVMContext &Ctx = PN->getContext(); IRBuilder<> Builder(Ctx); Builder.SetInsertPoint(PN); PHINode *NPN = Builder.CreatePHI(RequiredType, PN->getNumIncomingValues()); Worklist.push_back(PN); for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) { auto *Reinterpret = cast(PN->getIncomingValue(I)); NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I)); Worklist.push_back(Reinterpret); } // Cleanup Phi Node and reinterprets return IC.replaceInstUsesWith(II, NPN); } static Optional instCombineConvertFromSVBool(InstCombiner &IC, IntrinsicInst &II) { // If the reinterpret instruction operand is a PHI Node if (isa(II.getArgOperand(0))) return processPhiNode(IC, II); SmallVector CandidatesForRemoval; Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr; const auto *IVTy = cast(II.getType()); // Walk the chain of conversions. while (Cursor) { // If the type of the cursor has fewer lanes than the final result, zeroing // must take place, which breaks the equivalence chain. const auto *CursorVTy = cast(Cursor->getType()); if (CursorVTy->getElementCount().getKnownMinValue() < IVTy->getElementCount().getKnownMinValue()) break; // If the cursor has the same type as I, it is a viable replacement. if (Cursor->getType() == IVTy) EarliestReplacement = Cursor; auto *IntrinsicCursor = dyn_cast(Cursor); // If this is not an SVE conversion intrinsic, this is the end of the chain. if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool || IntrinsicCursor->getIntrinsicID() == Intrinsic::aarch64_sve_convert_from_svbool)) break; CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor); Cursor = IntrinsicCursor->getOperand(0); } // If no viable replacement in the conversion chain was found, there is // nothing to do. if (!EarliestReplacement) return None; return IC.replaceInstUsesWith(II, EarliestReplacement); } static Optional instCombineSVEDup(InstCombiner &IC, IntrinsicInst &II) { IntrinsicInst *Pg = dyn_cast(II.getArgOperand(1)); if (!Pg) return None; if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) return None; const auto PTruePattern = cast(Pg->getOperand(0))->getZExtValue(); if (PTruePattern != AArch64SVEPredPattern::vl1) return None; // The intrinsic is inserting into lane zero so use an insert instead. auto *IdxTy = Type::getInt64Ty(II.getContext()); auto *Insert = InsertElementInst::Create( II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0)); Insert->insertBefore(&II); Insert->takeName(&II); return IC.replaceInstUsesWith(II, Insert); } static Optional instCombineSVECmpNE(InstCombiner &IC, IntrinsicInst &II) { LLVMContext &Ctx = II.getContext(); IRBuilder<> Builder(Ctx); Builder.SetInsertPoint(&II); // Check that the predicate is all active auto *Pg = dyn_cast(II.getArgOperand(0)); if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) return None; const auto PTruePattern = cast(Pg->getOperand(0))->getZExtValue(); if (PTruePattern != AArch64SVEPredPattern::all) return None; // Check that we have a compare of zero.. auto *DupX = dyn_cast(II.getArgOperand(2)); if (!DupX || DupX->getIntrinsicID() != Intrinsic::aarch64_sve_dup_x) return None; auto *DupXArg = dyn_cast(DupX->getArgOperand(0)); if (!DupXArg || !DupXArg->isZero()) return None; // ..against a dupq auto *DupQLane = dyn_cast(II.getArgOperand(1)); if (!DupQLane || DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane) return None; // Where the dupq is a lane 0 replicate of a vector insert if (!cast(DupQLane->getArgOperand(1))->isZero()) return None; auto *VecIns = dyn_cast(DupQLane->getArgOperand(0)); if (!VecIns || VecIns->getIntrinsicID() != Intrinsic::experimental_vector_insert) return None; // Where the vector insert is a fixed constant vector insert into undef at // index zero if (!isa(VecIns->getArgOperand(0))) return None; if (!cast(VecIns->getArgOperand(2))->isZero()) return None; auto *ConstVec = dyn_cast(VecIns->getArgOperand(1)); if (!ConstVec) return None; auto *VecTy = dyn_cast(ConstVec->getType()); auto *OutTy = dyn_cast(II.getType()); if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements()) return None; unsigned NumElts = VecTy->getNumElements(); unsigned PredicateBits = 0; // Expand intrinsic operands to a 16-bit byte level predicate for (unsigned I = 0; I < NumElts; ++I) { auto *Arg = dyn_cast(ConstVec->getAggregateElement(I)); if (!Arg) return None; if (!Arg->isZero()) PredicateBits |= 1 << (I * (16 / NumElts)); } // If all bits are zero bail early with an empty predicate if (PredicateBits == 0) { auto *PFalse = Constant::getNullValue(II.getType()); PFalse->takeName(&II); return IC.replaceInstUsesWith(II, PFalse); } // Calculate largest predicate type used (where byte predicate is largest) unsigned Mask = 8; for (unsigned I = 0; I < 16; ++I) if ((PredicateBits & (1 << I)) != 0) Mask |= (I % 8); unsigned PredSize = Mask & -Mask; auto *PredType = ScalableVectorType::get( Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8)); // Ensure all relevant bits are set for (unsigned I = 0; I < 16; I += PredSize) if ((PredicateBits & (1 << I)) == 0) return None; auto *PTruePat = ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {PredType}, {PTruePat}); auto *ConvertToSVBool = Builder.CreateIntrinsic( Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue}); auto *ConvertFromSVBool = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, {II.getType()}, {ConvertToSVBool}); ConvertFromSVBool->takeName(&II); return IC.replaceInstUsesWith(II, ConvertFromSVBool); } static Optional instCombineSVELast(InstCombiner &IC, IntrinsicInst &II) { Value *Pg = II.getArgOperand(0); Value *Vec = II.getArgOperand(1); bool IsAfter = II.getIntrinsicID() == Intrinsic::aarch64_sve_lasta; // lastX(splat(X)) --> X if (auto *SplatVal = getSplatValue(Vec)) return IC.replaceInstUsesWith(II, SplatVal); auto *C = dyn_cast(Pg); if (IsAfter && C && C->isNullValue()) { // The intrinsic is extracting lane 0 so use an extract instead. auto *IdxTy = Type::getInt64Ty(II.getContext()); auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0)); Extract->insertBefore(&II); Extract->takeName(&II); return IC.replaceInstUsesWith(II, Extract); } auto *IntrPG = dyn_cast(Pg); if (!IntrPG) return None; if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue) return None; const auto PTruePattern = cast(IntrPG->getOperand(0))->getZExtValue(); // Can the intrinsic's predicate be converted to a known constant index? unsigned Idx; switch (PTruePattern) { default: return None; case AArch64SVEPredPattern::vl1: Idx = 0; break; case AArch64SVEPredPattern::vl2: Idx = 1; break; case AArch64SVEPredPattern::vl3: Idx = 2; break; case AArch64SVEPredPattern::vl4: Idx = 3; break; case AArch64SVEPredPattern::vl5: Idx = 4; break; case AArch64SVEPredPattern::vl6: Idx = 5; break; case AArch64SVEPredPattern::vl7: Idx = 6; break; case AArch64SVEPredPattern::vl8: Idx = 7; break; case AArch64SVEPredPattern::vl16: Idx = 15; break; } // Increment the index if extracting the element after the last active // predicate element. if (IsAfter) ++Idx; // Ignore extracts whose index is larger than the known minimum vector // length. NOTE: This is an artificial constraint where we prefer to // maintain what the user asked for until an alternative is proven faster. auto *PgVTy = cast(Pg->getType()); if (Idx >= PgVTy->getMinNumElements()) return None; // The intrinsic is extracting a fixed lane so use an extract instead. auto *IdxTy = Type::getInt64Ty(II.getContext()); auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx)); Extract->insertBefore(&II); Extract->takeName(&II); return IC.replaceInstUsesWith(II, Extract); } static Optional instCombineRDFFR(InstCombiner &IC, IntrinsicInst &II) { LLVMContext &Ctx = II.getContext(); IRBuilder<> Builder(Ctx); Builder.SetInsertPoint(&II); // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr // can work with RDFFR_PP for ptest elimination. auto *AllPat = ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all); auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue, {II.getType()}, {AllPat}); auto *RDFFR = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue}); RDFFR->takeName(&II); return IC.replaceInstUsesWith(II, RDFFR); } static Optional instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) { const auto Pattern = cast(II.getArgOperand(0))->getZExtValue(); if (Pattern == AArch64SVEPredPattern::all) { LLVMContext &Ctx = II.getContext(); IRBuilder<> Builder(Ctx); Builder.SetInsertPoint(&II); Constant *StepVal = ConstantInt::get(II.getType(), NumElts); auto *VScale = Builder.CreateVScale(StepVal); VScale->takeName(&II); return IC.replaceInstUsesWith(II, VScale); } unsigned MinNumElts = 0; switch (Pattern) { default: return None; case AArch64SVEPredPattern::vl1: case AArch64SVEPredPattern::vl2: case AArch64SVEPredPattern::vl3: case AArch64SVEPredPattern::vl4: case AArch64SVEPredPattern::vl5: case AArch64SVEPredPattern::vl6: case AArch64SVEPredPattern::vl7: case AArch64SVEPredPattern::vl8: MinNumElts = Pattern; break; case AArch64SVEPredPattern::vl16: MinNumElts = 16; break; } return NumElts >= MinNumElts ? Optional(IC.replaceInstUsesWith( II, ConstantInt::get(II.getType(), MinNumElts))) : None; } static Optional instCombineSVEPTest(InstCombiner &IC, IntrinsicInst &II) { IntrinsicInst *Op1 = dyn_cast(II.getArgOperand(0)); IntrinsicInst *Op2 = dyn_cast(II.getArgOperand(1)); if (Op1 && Op2 && Op1->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool && Op2->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool && Op1->getArgOperand(0)->getType() == Op2->getArgOperand(0)->getType()) { IRBuilder<> Builder(II.getContext()); Builder.SetInsertPoint(&II); Value *Ops[] = {Op1->getArgOperand(0), Op2->getArgOperand(0)}; Type *Tys[] = {Op1->getArgOperand(0)->getType()}; auto *PTest = Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops); PTest->takeName(&II); return IC.replaceInstUsesWith(II, PTest); } return None; } static Optional instCombineSVEVectorMul(InstCombiner &IC, IntrinsicInst &II) { auto *OpPredicate = II.getOperand(0); auto *OpMultiplicand = II.getOperand(1); auto *OpMultiplier = II.getOperand(2); IRBuilder<> Builder(II.getContext()); Builder.SetInsertPoint(&II); // Return true if a given instruction is an aarch64_sve_dup_x intrinsic call // with a unit splat value, false otherwise. auto IsUnitDupX = [](auto *I) { auto *IntrI = dyn_cast(I); if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup_x) return false; auto *SplatValue = IntrI->getOperand(0); return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); }; // Return true if a given instruction is an aarch64_sve_dup intrinsic call // with a unit splat value, false otherwise. auto IsUnitDup = [](auto *I) { auto *IntrI = dyn_cast(I); if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup) return false; auto *SplatValue = IntrI->getOperand(2); return match(SplatValue, m_FPOne()) || match(SplatValue, m_One()); }; // The OpMultiplier variable should always point to the dup (if any), so // swap if necessary. if (IsUnitDup(OpMultiplicand) || IsUnitDupX(OpMultiplicand)) std::swap(OpMultiplier, OpMultiplicand); if (IsUnitDupX(OpMultiplier)) { // [f]mul pg (dupx 1) %n => %n OpMultiplicand->takeName(&II); return IC.replaceInstUsesWith(II, OpMultiplicand); } else if (IsUnitDup(OpMultiplier)) { // [f]mul pg (dup pg 1) %n => %n auto *DupInst = cast(OpMultiplier); auto *DupPg = DupInst->getOperand(1); // TODO: this is naive. The optimization is still valid if DupPg // 'encompasses' OpPredicate, not only if they're the same predicate. if (OpPredicate == DupPg) { OpMultiplicand->takeName(&II); return IC.replaceInstUsesWith(II, OpMultiplicand); } } return None; } static Optional instCombineSVETBL(InstCombiner &IC, IntrinsicInst &II) { auto *OpVal = II.getOperand(0); auto *OpIndices = II.getOperand(1); VectorType *VTy = cast(II.getType()); // Check whether OpIndices is an aarch64_sve_dup_x intrinsic call with // constant splat value < minimal element count of result. auto *DupXIntrI = dyn_cast(OpIndices); if (!DupXIntrI || DupXIntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup_x) return None; auto *SplatValue = dyn_cast(DupXIntrI->getOperand(0)); if (!SplatValue || SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue())) return None; // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to // splat_vector(extractelement(OpVal, SplatValue)) for further optimization. IRBuilder<> Builder(II.getContext()); Builder.SetInsertPoint(&II); auto *Extract = Builder.CreateExtractElement(OpVal, SplatValue); auto *VectorSplat = Builder.CreateVectorSplat(VTy->getElementCount(), Extract); VectorSplat->takeName(&II); return IC.replaceInstUsesWith(II, VectorSplat); } Optional AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const { Intrinsic::ID IID = II.getIntrinsicID(); switch (IID) { default: break; case Intrinsic::aarch64_sve_convert_from_svbool: return instCombineConvertFromSVBool(IC, II); case Intrinsic::aarch64_sve_dup: return instCombineSVEDup(IC, II); case Intrinsic::aarch64_sve_cmpne: case Intrinsic::aarch64_sve_cmpne_wide: return instCombineSVECmpNE(IC, II); case Intrinsic::aarch64_sve_rdffr: return instCombineRDFFR(IC, II); case Intrinsic::aarch64_sve_lasta: case Intrinsic::aarch64_sve_lastb: return instCombineSVELast(IC, II); case Intrinsic::aarch64_sve_cntd: return instCombineSVECntElts(IC, II, 2); case Intrinsic::aarch64_sve_cntw: return instCombineSVECntElts(IC, II, 4); case Intrinsic::aarch64_sve_cnth: return instCombineSVECntElts(IC, II, 8); case Intrinsic::aarch64_sve_cntb: return instCombineSVECntElts(IC, II, 16); case Intrinsic::aarch64_sve_ptest_any: case Intrinsic::aarch64_sve_ptest_first: case Intrinsic::aarch64_sve_ptest_last: return instCombineSVEPTest(IC, II); case Intrinsic::aarch64_sve_mul: case Intrinsic::aarch64_sve_fmul: return instCombineSVEVectorMul(IC, II); case Intrinsic::aarch64_sve_tbl: return instCombineSVETBL(IC, II); } return None; } bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode, ArrayRef Args) { // A helper that returns a vector type from the given type. The number of // elements in type Ty determine the vector width. auto toVectorTy = [&](Type *ArgTy) { return VectorType::get(ArgTy->getScalarType(), cast(DstTy)->getElementCount()); }; // Exit early if DstTy is not a vector type whose elements are at least // 16-bits wide. if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16) return false; // Determine if the operation has a widening variant. We consider both the // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the // instructions. // // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we // verify that their extending operands are eliminated during code // generation. switch (Opcode) { case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2). case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2). break; default: return false; } // To be a widening instruction (either the "wide" or "long" versions), the // second operand must be a sign- or zero extend having a single user. We // only consider extends having a single user because they may otherwise not // be eliminated. if (Args.size() != 2 || (!isa(Args[1]) && !isa(Args[1])) || !Args[1]->hasOneUse()) return false; auto *Extend = cast(Args[1]); // Legalize the destination type and ensure it can be used in a widening // operation. auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy); unsigned DstElTySize = DstTyL.second.getScalarSizeInBits(); if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits()) return false; // Legalize the source type and ensure it can be used in a widening // operation. auto *SrcTy = toVectorTy(Extend->getSrcTy()); auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy); unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits(); if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits()) return false; // Get the total number of vector elements in the legalized types. InstructionCost NumDstEls = DstTyL.first * DstTyL.second.getVectorMinNumElements(); InstructionCost NumSrcEls = SrcTyL.first * SrcTyL.second.getVectorMinNumElements(); // Return true if the legalized types have the same number of vector elements // and the destination element type size is twice that of the source type. return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize; } InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind, const Instruction *I) { int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); // If the cast is observable, and it is used by a widening instruction (e.g., // uaddl, saddw, etc.), it may be free. if (I && I->hasOneUse()) { auto *SingleUser = cast(*I->user_begin()); SmallVector Operands(SingleUser->operand_values()); if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) { // If the cast is the second operand, it is free. We will generate either // a "wide" or "long" version of the widening instruction. if (I == SingleUser->getOperand(1)) return 0; // If the cast is not the second operand, it will be free if it looks the // same as the second operand. In this case, we will generate a "long" // version of the widening instruction. if (auto *Cast = dyn_cast(SingleUser->getOperand(1))) if (I->getOpcode() == unsigned(Cast->getOpcode()) && cast(I)->getSrcTy() == Cast->getSrcTy()) return 0; } } // TODO: Allow non-throughput costs that aren't binary. auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { if (CostKind != TTI::TCK_RecipThroughput) return Cost == 0 ? 0 : 1; return Cost; }; EVT SrcTy = TLI->getValueType(DL, Src); EVT DstTy = TLI->getValueType(DL, Dst); if (!SrcTy.isSimple() || !DstTy.isSimple()) return AdjustCost( BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); static const TypeConversionCostTblEntry ConversionTbl[] = { { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 }, { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, // Truncations on nxvmiN { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 }, { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 }, { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 }, { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 }, { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 }, { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 }, { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 }, { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 }, { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 }, { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 }, { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 }, { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 }, { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 }, { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 }, { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 }, { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 }, // The number of shll instructions for the extension. { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 }, // LowerVectorINT_TO_FP: { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, // Complex: to v2f32 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 }, { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 }, { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 }, // Complex: to v4f32 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 }, { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, // Complex: to v8f32 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10 }, { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, // Complex: to v16f32 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 }, // Complex: to v2f64 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 }, { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 }, { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 }, // LowerVectorFP_TO_INT { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 }, { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext). { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 }, { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 }, { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 }, { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 }, { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 }, { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 }, // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 }, { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 }, // Complex, from nxv2f32. { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f32, 1 }, // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2. { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 }, { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 }, { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }, { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 }, // Complex, from nxv2f64. { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f64, 1 }, // Complex, from nxv4f32. { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 }, { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 }, { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 }, { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f32, 1 }, // Complex, from nxv8f64. Illegal -> illegal conversions not required. { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 }, { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f64, 7 }, // Complex, from nxv4f64. Illegal -> illegal conversions not required. { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 }, { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 }, { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f64, 3 }, // Complex, from nxv8f32. Illegal -> illegal conversions not required. { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 }, { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f32, 3 }, // Complex, from nxv8f16. { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, { ISD::FP_TO_SINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 }, { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 }, { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 }, { ISD::FP_TO_UINT, MVT::nxv8i8, MVT::nxv8f16, 1 }, // Complex, from nxv4f16. { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, { ISD::FP_TO_SINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 }, { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 }, { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 }, { ISD::FP_TO_UINT, MVT::nxv4i8, MVT::nxv4f16, 1 }, // Complex, from nxv2f16. { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, { ISD::FP_TO_SINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 }, { ISD::FP_TO_UINT, MVT::nxv2i8, MVT::nxv2f16, 1 }, // Truncate from nxvmf32 to nxvmf16. { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 }, { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 }, { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 }, // Truncate from nxvmf64 to nxvmf16. { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 }, { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 }, { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 }, // Truncate from nxvmf64 to nxvmf32. { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 }, { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 }, { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 }, // Extend from nxvmf16 to nxvmf32. { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1}, { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1}, { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2}, // Extend from nxvmf16 to nxvmf64. { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1}, { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2}, { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4}, // Extend from nxvmf32 to nxvmf64. { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1}, { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2}, { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6}, }; if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT())) return AdjustCost(Entry->Cost); return AdjustCost( BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); } InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) { // Make sure we were given a valid extend opcode. assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) && "Invalid opcode"); // We are extending an element we extract from a vector, so the source type // of the extend is the element type of the vector. auto *Src = VecTy->getElementType(); // Sign- and zero-extends are for integer types only. assert(isa(Dst) && isa(Src) && "Invalid type"); // Get the cost for the extract. We compute the cost (if any) for the extend // below. InstructionCost Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index); // Legalize the types. auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy); auto DstVT = TLI->getValueType(DL, Dst); auto SrcVT = TLI->getValueType(DL, Src); TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; // If the resulting type is still a vector and the destination type is legal, // we may get the extension for free. If not, get the default cost for the // extend. if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT)) return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, CostKind); // The destination type should be larger than the element type. If not, get // the default cost for the extend. if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits()) return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, CostKind); switch (Opcode) { default: llvm_unreachable("Opcode should be either SExt or ZExt"); // For sign-extends, we only need a smov, which performs the extension // automatically. case Instruction::SExt: return Cost; // For zero-extends, the extend is performed automatically by a umov unless // the destination type is i64 and the element type is i8 or i16. case Instruction::ZExt: if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u) return Cost; } // If we are unable to perform the extend for free, get the default cost. return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None, CostKind); } InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) { if (CostKind != TTI::TCK_RecipThroughput) return Opcode == Instruction::PHI ? 0 : 1; assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind"); // Branches are assumed to be predicted. return 0; } InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { assert(Val->isVectorTy() && "This must be a vector type"); if (Index != -1U) { // Legalize the type. std::pair LT = TLI->getTypeLegalizationCost(DL, Val); // This type is legalized to a scalar type. if (!LT.second.isVector()) return 0; // The type may be split. Normalize the index to the new type. unsigned Width = LT.second.getVectorNumElements(); Index = Index % Width; // The element at index zero is already inside the vector. if (Index == 0) return 0; } // All other insert/extracts cost this much. return ST->getVectorInsertExtractBaseCost(); } InstructionCost AArch64TTIImpl::getArithmeticInstrCost( unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo, TTI::OperandValueProperties Opd2PropInfo, ArrayRef Args, const Instruction *CxtI) { // TODO: Handle more cost kinds. if (CostKind != TTI::TCK_RecipThroughput) return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo, Args, CxtI); // Legalize the type. std::pair LT = TLI->getTypeLegalizationCost(DL, Ty); // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.), // add in the widening overhead specified by the sub-target. Since the // extends feeding widening instructions are performed automatically, they // aren't present in the generated code and have a zero cost. By adding a // widening overhead here, we attach the total cost of the combined operation // to the widening instruction. InstructionCost Cost = 0; if (isWideningInstruction(Ty, Opcode, Args)) Cost += ST->getWideningBaseCost(); int ISD = TLI->InstructionOpcodeToISD(Opcode); switch (ISD) { default: return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo); case ISD::SDIV: if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue && Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { // On AArch64, scalar signed division by constants power-of-two are // normally expanded to the sequence ADD + CMP + SELECT + SRA. // The OperandValue properties many not be same as that of previous // operation; conservatively assume OP_None. Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info, TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Opd1Info, Opd2Info, TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind, Opd1Info, Opd2Info, TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info, TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); return Cost; } LLVM_FALLTHROUGH; case ISD::UDIV: if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) { auto VT = TLI->getValueType(DL, Ty); if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) { // Vector signed division by constant are expanded to the // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division // to MULHS + SUB + SRL + ADD + SRL. InstructionCost MulCost = getArithmeticInstrCost( Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info, TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); InstructionCost AddCost = getArithmeticInstrCost( Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info, TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); InstructionCost ShrCost = getArithmeticInstrCost( Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info, TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1; } } Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo); if (Ty->isVectorTy()) { // On AArch64, vector divisions are not supported natively and are // expanded into scalar divisions of each pair of elements. Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo); Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo); // TODO: if one of the arguments is scalar, then it's not necessary to // double the cost of handling the vector elements. Cost += Cost; } return Cost; case ISD::MUL: if (LT.second != MVT::v2i64) return (Cost + 1) * LT.first; // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive // as elements are extracted from the vectors and the muls scalarized. // As getScalarizationOverhead is a bit too pessimistic, we estimate the // cost for a i64 vector directly here, which is: // - four i64 extracts, // - two i64 inserts, and // - two muls. // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with // LT.first = 2 the cost is 16. return LT.first * 8; case ISD::ADD: case ISD::XOR: case ISD::OR: case ISD::AND: // These nodes are marked as 'custom' for combining purposes only. // We know that they are legal. See LowerAdd in ISelLowering. return (Cost + 1) * LT.first; case ISD::FADD: // These nodes are marked as 'custom' just to lower them to SVE. // We know said lowering will incur no additional cost. if (isa(Ty) && !Ty->getScalarType()->isFP128Ty()) return (Cost + 2) * LT.first; return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo); } } InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, const SCEV *Ptr) { // Address computations in vectorized code with non-consecutive addresses will // likely result in more instructions compared to scalar code where the // computation can more often be merged into the index mode. The resulting // extra micro-ops can significantly decrease throughput. unsigned NumVectorInstToHideOverhead = 10; int MaxMergeDistance = 64; if (Ty->isVectorTy() && SE && !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1)) return NumVectorInstToHideOverhead; // In many cases the address computation is not merged into the instruction // addressing mode. return 1; } InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred, TTI::TargetCostKind CostKind, const Instruction *I) { // TODO: Handle other cost kinds. if (CostKind != TTI::TCK_RecipThroughput) return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); int ISD = TLI->InstructionOpcodeToISD(Opcode); // We don't lower some vector selects well that are wider than the register // width. if (isa(ValTy) && ISD == ISD::SELECT) { // We would need this many instructions to hide the scalarization happening. const int AmortizationCost = 20; // If VecPred is not set, check if we can get a predicate from the context // instruction, if its type matches the requested ValTy. if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) { CmpInst::Predicate CurrentPred; if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(), m_Value()))) VecPred = CurrentPred; } // Check if we have a compare/select chain that can be lowered using CMxx & // BFI pair. if (CmpInst::isIntPredicate(VecPred)) { static const auto ValidMinMaxTys = {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32, MVT::v4i32, MVT::v2i64}; auto LT = TLI->getTypeLegalizationCost(DL, ValTy); if (any_of(ValidMinMaxTys, [<](MVT M) { return M == LT.second; })) return LT.first; } static const TypeConversionCostTblEntry VectorSelectTbl[] = { { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 }, { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 }, { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 }, { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost }, { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost } }; EVT SelCondTy = TLI->getValueType(DL, CondTy); EVT SelValTy = TLI->getValueType(DL, ValTy); if (SelCondTy.isSimple() && SelValTy.isSimple()) { if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD, SelCondTy.getSimpleVT(), SelValTy.getSimpleVT())) return Entry->Cost; } } // The base case handles scalable vectors fine for now, since it treats the // cost as 1 * legalization cost. return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); } AArch64TTIImpl::TTI::MemCmpExpansionOptions AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { TTI::MemCmpExpansionOptions Options; if (ST->requiresStrictAlign()) { // TODO: Add cost modeling for strict align. Misaligned loads expand to // a bunch of instructions when strict align is enabled. return Options; } Options.AllowOverlappingLoads = true; Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); Options.NumLoadsPerBlock = Options.MaxNumLoads; // TODO: Though vector loads usually perform well on AArch64, in some targets // they may wake up the FP unit, which raises the power consumption. Perhaps // they could be used with no holds barred (-O3). Options.LoadSizes = {8, 4, 2, 1}; return Options; } InstructionCost AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind) { if (!isa(Src)) return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind); auto LT = TLI->getTypeLegalizationCost(DL, Src); if (!LT.first.isValid()) return InstructionCost::getInvalid(); // The code-generator is currently not able to handle scalable vectors // of yet, so return an invalid cost to avoid selecting // it. This change will be removed when code-generation for these types is // sufficiently reliable. if (cast(Src)->getElementCount() == ElementCount::getScalable(1)) return InstructionCost::getInvalid(); return LT.first * 2; } InstructionCost AArch64TTIImpl::getGatherScatterOpCost( unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask, Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) { if (!isa(DataTy)) return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask, Alignment, CostKind, I); auto *VT = cast(DataTy); auto LT = TLI->getTypeLegalizationCost(DL, DataTy); if (!LT.first.isValid()) return InstructionCost::getInvalid(); // The code-generator is currently not able to handle scalable vectors // of yet, so return an invalid cost to avoid selecting // it. This change will be removed when code-generation for these types is // sufficiently reliable. if (cast(DataTy)->getElementCount() == ElementCount::getScalable(1)) return InstructionCost::getInvalid(); ElementCount LegalVF = LT.second.getVectorElementCount(); InstructionCost MemOpCost = getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I); return LT.first * MemOpCost * getMaxNumElements(LegalVF); } bool AArch64TTIImpl::useNeonVector(const Type *Ty) const { return isa(Ty) && !ST->useSVEForFixedLengthVectors(); } InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty, MaybeAlign Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, const Instruction *I) { EVT VT = TLI->getValueType(DL, Ty, true); // Type legalization can't handle structs if (VT == MVT::Other) return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace, CostKind); auto LT = TLI->getTypeLegalizationCost(DL, Ty); if (!LT.first.isValid()) return InstructionCost::getInvalid(); // The code-generator is currently not able to handle scalable vectors // of yet, so return an invalid cost to avoid selecting // it. This change will be removed when code-generation for these types is // sufficiently reliable. if (auto *VTy = dyn_cast(Ty)) if (VTy->getElementCount() == ElementCount::getScalable(1)) return InstructionCost::getInvalid(); // TODO: consider latency as well for TCK_SizeAndLatency. if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency) return LT.first; if (CostKind != TTI::TCK_RecipThroughput) return 1; if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store && LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) { // Unaligned stores are extremely inefficient. We don't split all // unaligned 128-bit stores because the negative impact that has shown in // practice on inlined block copy code. // We make such stores expensive so that we will only vectorize if there // are 6 other instructions getting vectorized. const int AmortizationCost = 6; return LT.first * 2 * AmortizationCost; } // Check truncating stores and extending loads. if (useNeonVector(Ty) && Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) { // v4i8 types are lowered to scalar a load/store and sshll/xtn. if (VT == MVT::v4i8) return 2; // Otherwise we need to scalarize. return cast(Ty)->getNumElements() * 2; } return LT.first; } InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost( unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef Indices, Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { assert(Factor >= 2 && "Invalid interleave factor"); auto *VecVTy = cast(VecTy); if (!UseMaskForCond && !UseMaskForGaps && Factor <= TLI->getMaxSupportedInterleaveFactor()) { unsigned NumElts = VecVTy->getNumElements(); auto *SubVecTy = FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor); // ldN/stN only support legal vector types of size 64 or 128 in bits. // Accesses having vector types that are a multiple of 128 bits can be // matched to more than one ldN/stN instruction. if (NumElts % Factor == 0 && TLI->isLegalInterleavedAccessType(SubVecTy, DL)) return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL); } return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, Alignment, AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); } InstructionCost AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef Tys) { InstructionCost Cost = 0; TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; for (auto *I : Tys) { if (!I->isVectorTy()) continue; if (I->getScalarSizeInBits() * cast(I)->getNumElements() == 128) Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) + getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind); } return Cost; } unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) { return ST->getMaxInterleaveFactor(); } // For Falkor, we want to avoid having too many strided loads in a loop since // that can exhaust the HW prefetcher resources. We adjust the unroller // MaxCount preference below to attempt to ensure unrolling doesn't create too // many strided loads. static void getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE, TargetTransformInfo::UnrollingPreferences &UP) { enum { MaxStridedLoads = 7 }; auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) { int StridedLoads = 0; // FIXME? We could make this more precise by looking at the CFG and // e.g. not counting loads in each side of an if-then-else diamond. for (const auto BB : L->blocks()) { for (auto &I : *BB) { LoadInst *LMemI = dyn_cast(&I); if (!LMemI) continue; Value *PtrValue = LMemI->getPointerOperand(); if (L->isLoopInvariant(PtrValue)) continue; const SCEV *LSCEV = SE.getSCEV(PtrValue); const SCEVAddRecExpr *LSCEVAddRec = dyn_cast(LSCEV); if (!LSCEVAddRec || !LSCEVAddRec->isAffine()) continue; // FIXME? We could take pairing of unrolled load copies into account // by looking at the AddRec, but we would probably have to limit this // to loops with no stores or other memory optimization barriers. ++StridedLoads; // We've seen enough strided loads that seeing more won't make a // difference. if (StridedLoads > MaxStridedLoads / 2) return StridedLoads; } } return StridedLoads; }; int StridedLoads = countStridedLoads(L, SE); LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads << " strided loads\n"); // Pick the largest power of 2 unroll count that won't result in too many // strided loads. if (StridedLoads) { UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads); LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to " << UP.MaxCount << '\n'); } } void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP) { // Enable partial unrolling and runtime unrolling. BaseT::getUnrollingPreferences(L, SE, UP); // For inner loop, it is more likely to be a hot one, and the runtime check // can be promoted out from LICM pass, so the overhead is less, let's try // a larger threshold to unroll more loops. if (L->getLoopDepth() > 1) UP.PartialThreshold *= 2; // Disable partial & runtime unrolling on -Os. UP.PartialOptSizeThreshold = 0; if (ST->getProcFamily() == AArch64Subtarget::Falkor && EnableFalkorHWPFUnrollFix) getFalkorUnrollingPreferences(L, SE, UP); // Scan the loop: don't unroll loops with calls as this could prevent // inlining. Don't unroll vector loops either, as they don't benefit much from // unrolling. for (auto *BB : L->getBlocks()) { for (auto &I : *BB) { // Don't unroll vectorised loop. if (I.getType()->isVectorTy()) return; if (isa(I) || isa(I)) { if (const Function *F = cast(I).getCalledFunction()) { if (!isLoweredToCall(F)) continue; } return; } } } // Enable runtime unrolling for in-order models // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by // checking for that case, we can ensure that the default behaviour is // unchanged if (ST->getProcFamily() != AArch64Subtarget::Others && !ST->getSchedModel().isOutOfOrder()) { UP.Runtime = true; UP.Partial = true; UP.UpperBound = true; UP.UnrollRemainder = true; UP.DefaultUnrollRuntimeCount = 4; UP.UnrollAndJam = true; UP.UnrollAndJamInnerLoopThreshold = 60; } } void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) { BaseT::getPeelingPreferences(L, SE, PP); } Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType) { switch (Inst->getIntrinsicID()) { default: return nullptr; case Intrinsic::aarch64_neon_st2: case Intrinsic::aarch64_neon_st3: case Intrinsic::aarch64_neon_st4: { // Create a struct type StructType *ST = dyn_cast(ExpectedType); if (!ST) return nullptr; unsigned NumElts = Inst->getNumArgOperands() - 1; if (ST->getNumElements() != NumElts) return nullptr; for (unsigned i = 0, e = NumElts; i != e; ++i) { if (Inst->getArgOperand(i)->getType() != ST->getElementType(i)) return nullptr; } Value *Res = UndefValue::get(ExpectedType); IRBuilder<> Builder(Inst); for (unsigned i = 0, e = NumElts; i != e; ++i) { Value *L = Inst->getArgOperand(i); Res = Builder.CreateInsertValue(Res, L, i); } return Res; } case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_ld3: case Intrinsic::aarch64_neon_ld4: if (Inst->getType() == ExpectedType) return Inst; return nullptr; } } bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) { switch (Inst->getIntrinsicID()) { default: break; case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_ld3: case Intrinsic::aarch64_neon_ld4: Info.ReadMem = true; Info.WriteMem = false; Info.PtrVal = Inst->getArgOperand(0); break; case Intrinsic::aarch64_neon_st2: case Intrinsic::aarch64_neon_st3: case Intrinsic::aarch64_neon_st4: Info.ReadMem = false; Info.WriteMem = true; Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1); break; } switch (Inst->getIntrinsicID()) { default: return false; case Intrinsic::aarch64_neon_ld2: case Intrinsic::aarch64_neon_st2: Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS; break; case Intrinsic::aarch64_neon_ld3: case Intrinsic::aarch64_neon_st3: Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS; break; case Intrinsic::aarch64_neon_ld4: case Intrinsic::aarch64_neon_st4: Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS; break; } return true; } /// See if \p I should be considered for address type promotion. We check if \p /// I is a sext with right type and used in memory accesses. If it used in a /// "complex" getelementptr, we allow it to be promoted without finding other /// sext instructions that sign extended the same initial value. A getelementptr /// is considered as "complex" if it has more than 2 operands. bool AArch64TTIImpl::shouldConsiderAddressTypePromotion( const Instruction &I, bool &AllowPromotionWithoutCommonHeader) { bool Considerable = false; AllowPromotionWithoutCommonHeader = false; if (!isa(&I)) return false; Type *ConsideredSExtType = Type::getInt64Ty(I.getParent()->getParent()->getContext()); if (I.getType() != ConsideredSExtType) return false; // See if the sext is the one with the right type and used in at least one // GetElementPtrInst. for (const User *U : I.users()) { if (const GetElementPtrInst *GEPInst = dyn_cast(U)) { Considerable = true; // A getelementptr is considered as "complex" if it has more than 2 // operands. We will promote a SExt used in such complex GEP as we // expect some computation to be merged if they are done on 64 bits. if (GEPInst->getNumOperands() > 2) { AllowPromotionWithoutCommonHeader = true; break; } } } return Considerable; } bool AArch64TTIImpl::isLegalToVectorizeReduction( const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { if (!VF.isScalable()) return true; Type *Ty = RdxDesc.getRecurrenceType(); if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty)) return false; switch (RdxDesc.getRecurrenceKind()) { case RecurKind::Add: case RecurKind::FAdd: case RecurKind::And: case RecurKind::Or: case RecurKind::Xor: case RecurKind::SMin: case RecurKind::SMax: case RecurKind::UMin: case RecurKind::UMax: case RecurKind::FMin: case RecurKind::FMax: return true; default: return false; } } InstructionCost AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy, bool IsUnsigned, TTI::TargetCostKind CostKind) { if (!isa(Ty)) return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind); assert((isa(Ty) && isa(CondTy)) && "Both vector needs to be scalable"); std::pair LT = TLI->getTypeLegalizationCost(DL, Ty); InstructionCost LegalizationCost = 0; if (LT.first > 1) { Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext()); unsigned CmpOpcode = Ty->isFPOrFPVectorTy() ? Instruction::FCmp : Instruction::ICmp; LegalizationCost = getCmpSelInstrCost(CmpOpcode, LegalVTy, LegalVTy, CmpInst::BAD_ICMP_PREDICATE, CostKind) + getCmpSelInstrCost(Instruction::Select, LegalVTy, LegalVTy, CmpInst::BAD_ICMP_PREDICATE, CostKind); LegalizationCost *= LT.first - 1; } return LegalizationCost + /*Cost of horizontal reduction*/ 2; } InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE( unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) { std::pair LT = TLI->getTypeLegalizationCost(DL, ValTy); InstructionCost LegalizationCost = 0; if (LT.first > 1) { Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext()); LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind); LegalizationCost *= LT.first - 1; } int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); // Add the final reduction cost for the legal horizontal reduction switch (ISD) { case ISD::ADD: case ISD::AND: case ISD::OR: case ISD::XOR: case ISD::FADD: return LegalizationCost + 2; default: return InstructionCost::getInvalid(); } } InstructionCost AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, Optional FMF, TTI::TargetCostKind CostKind) { if (TTI::requiresOrderedReduction(FMF)) { if (!isa(ValTy)) return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); if (Opcode != Instruction::FAdd) return InstructionCost::getInvalid(); auto *VTy = cast(ValTy); InstructionCost Cost = getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind); Cost *= getMaxNumElements(VTy->getElementCount()); return Cost; } if (isa(ValTy)) return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind); std::pair LT = TLI->getTypeLegalizationCost(DL, ValTy); MVT MTy = LT.second; int ISD = TLI->InstructionOpcodeToISD(Opcode); assert(ISD && "Invalid opcode"); // Horizontal adds can use the 'addv' instruction. We model the cost of these // instructions as twice a normal vector add, plus 1 for each legalization // step (LT.first). This is the only arithmetic vector reduction operation for // which we have an instruction. // OR, XOR and AND costs should match the codegen from: // OR: llvm/test/CodeGen/AArch64/reduce-or.ll // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll // AND: llvm/test/CodeGen/AArch64/reduce-and.ll static const CostTblEntry CostTblNoPairwise[]{ {ISD::ADD, MVT::v8i8, 2}, {ISD::ADD, MVT::v16i8, 2}, {ISD::ADD, MVT::v4i16, 2}, {ISD::ADD, MVT::v8i16, 2}, {ISD::ADD, MVT::v4i32, 2}, {ISD::OR, MVT::v8i8, 15}, {ISD::OR, MVT::v16i8, 17}, {ISD::OR, MVT::v4i16, 7}, {ISD::OR, MVT::v8i16, 9}, {ISD::OR, MVT::v2i32, 3}, {ISD::OR, MVT::v4i32, 5}, {ISD::OR, MVT::v2i64, 3}, {ISD::XOR, MVT::v8i8, 15}, {ISD::XOR, MVT::v16i8, 17}, {ISD::XOR, MVT::v4i16, 7}, {ISD::XOR, MVT::v8i16, 9}, {ISD::XOR, MVT::v2i32, 3}, {ISD::XOR, MVT::v4i32, 5}, {ISD::XOR, MVT::v2i64, 3}, {ISD::AND, MVT::v8i8, 15}, {ISD::AND, MVT::v16i8, 17}, {ISD::AND, MVT::v4i16, 7}, {ISD::AND, MVT::v8i16, 9}, {ISD::AND, MVT::v2i32, 3}, {ISD::AND, MVT::v4i32, 5}, {ISD::AND, MVT::v2i64, 3}, }; switch (ISD) { default: break; case ISD::ADD: if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy)) return (LT.first - 1) + Entry->Cost; break; case ISD::XOR: case ISD::AND: case ISD::OR: const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy); if (!Entry) break; auto *ValVTy = cast(ValTy); if (!ValVTy->getElementType()->isIntegerTy(1) && MTy.getVectorNumElements() <= ValVTy->getNumElements() && isPowerOf2_32(ValVTy->getNumElements())) { InstructionCost ExtraCost = 0; if (LT.first != 1) { // Type needs to be split, so there is an extra cost of LT.first - 1 // arithmetic ops. auto *Ty = FixedVectorType::get(ValTy->getElementType(), MTy.getVectorNumElements()); ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind); ExtraCost *= LT.first - 1; } return Entry->Cost + ExtraCost; } break; } return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); } InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) { static const CostTblEntry ShuffleTbl[] = { { TTI::SK_Splice, MVT::nxv16i8, 1 }, { TTI::SK_Splice, MVT::nxv8i16, 1 }, { TTI::SK_Splice, MVT::nxv4i32, 1 }, { TTI::SK_Splice, MVT::nxv2i64, 1 }, { TTI::SK_Splice, MVT::nxv2f16, 1 }, { TTI::SK_Splice, MVT::nxv4f16, 1 }, { TTI::SK_Splice, MVT::nxv8f16, 1 }, { TTI::SK_Splice, MVT::nxv2bf16, 1 }, { TTI::SK_Splice, MVT::nxv4bf16, 1 }, { TTI::SK_Splice, MVT::nxv8bf16, 1 }, { TTI::SK_Splice, MVT::nxv2f32, 1 }, { TTI::SK_Splice, MVT::nxv4f32, 1 }, { TTI::SK_Splice, MVT::nxv2f64, 1 }, }; std::pair LT = TLI->getTypeLegalizationCost(DL, Tp); Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext()); TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; EVT PromotedVT = LT.second.getScalarType() == MVT::i1 ? TLI->getPromotedVTForPredicate(EVT(LT.second)) : LT.second; Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext()); InstructionCost LegalizationCost = 0; if (Index < 0) { LegalizationCost = getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy, CmpInst::BAD_ICMP_PREDICATE, CostKind) + getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy, CmpInst::BAD_ICMP_PREDICATE, CostKind); } // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp // Cost performed on a promoted type. if (LT.second.getScalarType() == MVT::i1) { LegalizationCost += getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy, TTI::CastContextHint::None, CostKind) + getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy, TTI::CastContextHint::None, CostKind); } const auto *Entry = CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT()); assert(Entry && "Illegal Type for Splice"); LegalizationCost += Entry->Cost; return LegalizationCost * LT.first; } InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef Mask, int Index, VectorType *SubTp) { Kind = improveShuffleKindFromMask(Kind, Mask); if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose || Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc || Kind == TTI::SK_Reverse) { static const CostTblEntry ShuffleTbl[] = { // Broadcast shuffle kinds can be performed with 'dup'. { TTI::SK_Broadcast, MVT::v8i8, 1 }, { TTI::SK_Broadcast, MVT::v16i8, 1 }, { TTI::SK_Broadcast, MVT::v4i16, 1 }, { TTI::SK_Broadcast, MVT::v8i16, 1 }, { TTI::SK_Broadcast, MVT::v2i32, 1 }, { TTI::SK_Broadcast, MVT::v4i32, 1 }, { TTI::SK_Broadcast, MVT::v2i64, 1 }, { TTI::SK_Broadcast, MVT::v2f32, 1 }, { TTI::SK_Broadcast, MVT::v4f32, 1 }, { TTI::SK_Broadcast, MVT::v2f64, 1 }, // Transpose shuffle kinds can be performed with 'trn1/trn2' and // 'zip1/zip2' instructions. { TTI::SK_Transpose, MVT::v8i8, 1 }, { TTI::SK_Transpose, MVT::v16i8, 1 }, { TTI::SK_Transpose, MVT::v4i16, 1 }, { TTI::SK_Transpose, MVT::v8i16, 1 }, { TTI::SK_Transpose, MVT::v2i32, 1 }, { TTI::SK_Transpose, MVT::v4i32, 1 }, { TTI::SK_Transpose, MVT::v2i64, 1 }, { TTI::SK_Transpose, MVT::v2f32, 1 }, { TTI::SK_Transpose, MVT::v4f32, 1 }, { TTI::SK_Transpose, MVT::v2f64, 1 }, // Select shuffle kinds. // TODO: handle vXi8/vXi16. { TTI::SK_Select, MVT::v2i32, 1 }, // mov. { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar). { TTI::SK_Select, MVT::v2i64, 1 }, // mov. { TTI::SK_Select, MVT::v2f32, 1 }, // mov. { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar). { TTI::SK_Select, MVT::v2f64, 1 }, // mov. // PermuteSingleSrc shuffle kinds. { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov. { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case. { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov. { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov. { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case. { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov. { TTI::SK_PermuteSingleSrc, MVT::v4i16, 3 }, // perfectshuffle worst case. { TTI::SK_PermuteSingleSrc, MVT::v4f16, 3 }, // perfectshuffle worst case. { TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3 }, // perfectshuffle worst case. { TTI::SK_PermuteSingleSrc, MVT::v8i16, 8 }, // constpool + load + tbl { TTI::SK_PermuteSingleSrc, MVT::v8f16, 8 }, // constpool + load + tbl { TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8 }, // constpool + load + tbl { TTI::SK_PermuteSingleSrc, MVT::v8i8, 8 }, // constpool + load + tbl { TTI::SK_PermuteSingleSrc, MVT::v16i8, 8 }, // constpool + load + tbl // Reverse can be lowered with `rev`. { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov. { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov. { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov. { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov. // Broadcast shuffle kinds for scalable vectors { TTI::SK_Broadcast, MVT::nxv16i8, 1 }, { TTI::SK_Broadcast, MVT::nxv8i16, 1 }, { TTI::SK_Broadcast, MVT::nxv4i32, 1 }, { TTI::SK_Broadcast, MVT::nxv2i64, 1 }, { TTI::SK_Broadcast, MVT::nxv2f16, 1 }, { TTI::SK_Broadcast, MVT::nxv4f16, 1 }, { TTI::SK_Broadcast, MVT::nxv8f16, 1 }, { TTI::SK_Broadcast, MVT::nxv2bf16, 1 }, { TTI::SK_Broadcast, MVT::nxv4bf16, 1 }, { TTI::SK_Broadcast, MVT::nxv8bf16, 1 }, { TTI::SK_Broadcast, MVT::nxv2f32, 1 }, { TTI::SK_Broadcast, MVT::nxv4f32, 1 }, { TTI::SK_Broadcast, MVT::nxv2f64, 1 }, { TTI::SK_Broadcast, MVT::nxv16i1, 1 }, { TTI::SK_Broadcast, MVT::nxv8i1, 1 }, { TTI::SK_Broadcast, MVT::nxv4i1, 1 }, { TTI::SK_Broadcast, MVT::nxv2i1, 1 }, // Handle the cases for vector.reverse with scalable vectors { TTI::SK_Reverse, MVT::nxv16i8, 1 }, { TTI::SK_Reverse, MVT::nxv8i16, 1 }, { TTI::SK_Reverse, MVT::nxv4i32, 1 }, { TTI::SK_Reverse, MVT::nxv2i64, 1 }, { TTI::SK_Reverse, MVT::nxv2f16, 1 }, { TTI::SK_Reverse, MVT::nxv4f16, 1 }, { TTI::SK_Reverse, MVT::nxv8f16, 1 }, { TTI::SK_Reverse, MVT::nxv2bf16, 1 }, { TTI::SK_Reverse, MVT::nxv4bf16, 1 }, { TTI::SK_Reverse, MVT::nxv8bf16, 1 }, { TTI::SK_Reverse, MVT::nxv2f32, 1 }, { TTI::SK_Reverse, MVT::nxv4f32, 1 }, { TTI::SK_Reverse, MVT::nxv2f64, 1 }, { TTI::SK_Reverse, MVT::nxv16i1, 1 }, { TTI::SK_Reverse, MVT::nxv8i1, 1 }, { TTI::SK_Reverse, MVT::nxv4i1, 1 }, { TTI::SK_Reverse, MVT::nxv2i1, 1 }, }; std::pair LT = TLI->getTypeLegalizationCost(DL, Tp); if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second)) return LT.first * Entry->Cost; } if (Kind == TTI::SK_Splice && isa(Tp)) return getSpliceCost(Tp, Index); return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp); }