//===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //==-----------------------------------------------------------------------===// // /// \file /// Defines an instruction selector for the AMDGPU target. // //===----------------------------------------------------------------------===// #include "AMDGPU.h" #include "AMDGPUTargetMachine.h" #include "SIMachineFunctionInfo.h" #include "llvm/Analysis/LegacyDivergenceAnalysis.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/IR/IntrinsicsAMDGPU.h" #include "llvm/InitializePasses.h" #ifdef EXPENSIVE_CHECKS #include "llvm/Analysis/LoopInfo.h" #include "llvm/IR/Dominators.h" #endif #define DEBUG_TYPE "isel" using namespace llvm; namespace llvm { class R600InstrInfo; } // end namespace llvm //===----------------------------------------------------------------------===// // Instruction Selector Implementation //===----------------------------------------------------------------------===// namespace { static bool isNullConstantOrUndef(SDValue V) { if (V.isUndef()) return true; ConstantSDNode *Const = dyn_cast(V); return Const != nullptr && Const->isNullValue(); } static bool getConstantValue(SDValue N, uint32_t &Out) { // This is only used for packed vectors, where ussing 0 for undef should // always be good. if (N.isUndef()) { Out = 0; return true; } if (const ConstantSDNode *C = dyn_cast(N)) { Out = C->getAPIntValue().getSExtValue(); return true; } if (const ConstantFPSDNode *C = dyn_cast(N)) { Out = C->getValueAPF().bitcastToAPInt().getSExtValue(); return true; } return false; } // TODO: Handle undef as zero static SDNode *packConstantV2I16(const SDNode *N, SelectionDAG &DAG, bool Negate = false) { assert(N->getOpcode() == ISD::BUILD_VECTOR && N->getNumOperands() == 2); uint32_t LHSVal, RHSVal; if (getConstantValue(N->getOperand(0), LHSVal) && getConstantValue(N->getOperand(1), RHSVal)) { SDLoc SL(N); uint32_t K = Negate ? (-LHSVal & 0xffff) | (-RHSVal << 16) : (LHSVal & 0xffff) | (RHSVal << 16); return DAG.getMachineNode(AMDGPU::S_MOV_B32, SL, N->getValueType(0), DAG.getTargetConstant(K, SL, MVT::i32)); } return nullptr; } static SDNode *packNegConstantV2I16(const SDNode *N, SelectionDAG &DAG) { return packConstantV2I16(N, DAG, true); } /// AMDGPU specific code to select AMDGPU machine instructions for /// SelectionDAG operations. class AMDGPUDAGToDAGISel : public SelectionDAGISel { // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can // make the right decision when generating code for different targets. const GCNSubtarget *Subtarget; // Default FP mode for the current function. AMDGPU::SIModeRegisterDefaults Mode; bool EnableLateStructurizeCFG; // Instructions that will be lowered with a final instruction that zeros the // high result bits. bool fp16SrcZerosHighBits(unsigned Opc) const; public: explicit AMDGPUDAGToDAGISel(TargetMachine *TM = nullptr, CodeGenOpt::Level OptLevel = CodeGenOpt::Default) : SelectionDAGISel(*TM, OptLevel) { EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG; } ~AMDGPUDAGToDAGISel() override = default; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.addRequired(); #ifdef EXPENSIVE_CHECKS AU.addRequired(); AU.addRequired(); #endif SelectionDAGISel::getAnalysisUsage(AU); } bool matchLoadD16FromBuildVector(SDNode *N) const; bool runOnMachineFunction(MachineFunction &MF) override; void PreprocessISelDAG() override; void Select(SDNode *N) override; StringRef getPassName() const override; void PostprocessISelDAG() override; protected: void SelectBuildVector(SDNode *N, unsigned RegClassID); private: std::pair foldFrameIndex(SDValue N) const; bool isNoNanSrc(SDValue N) const; bool isInlineImmediate(const SDNode *N, bool Negated = false) const; bool isNegInlineImmediate(const SDNode *N) const { return isInlineImmediate(N, true); } bool isInlineImmediate16(int64_t Imm) const { return AMDGPU::isInlinableLiteral16(Imm, Subtarget->hasInv2PiInlineImm()); } bool isInlineImmediate32(int64_t Imm) const { return AMDGPU::isInlinableLiteral32(Imm, Subtarget->hasInv2PiInlineImm()); } bool isInlineImmediate64(int64_t Imm) const { return AMDGPU::isInlinableLiteral64(Imm, Subtarget->hasInv2PiInlineImm()); } bool isInlineImmediate(const APFloat &Imm) const { return Subtarget->getInstrInfo()->isInlineConstant(Imm); } bool isVGPRImm(const SDNode *N) const; bool isUniformLoad(const SDNode *N) const; bool isUniformBr(const SDNode *N) const; bool isBaseWithConstantOffset64(SDValue Addr, SDValue &LHS, SDValue &RHS) const; MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const; SDNode *glueCopyToOp(SDNode *N, SDValue NewChain, SDValue Glue) const; SDNode *glueCopyToM0(SDNode *N, SDValue Val) const; SDNode *glueCopyToM0LDSInit(SDNode *N) const; const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const; virtual bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset); virtual bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset); bool isDSOffsetLegal(SDValue Base, unsigned Offset) const; bool isDSOffset2Legal(SDValue Base, unsigned Offset0, unsigned Offset1, unsigned Size) const; bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const; bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, SDValue &Offset1) const; bool SelectDS128Bit8ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, SDValue &Offset1) const; bool SelectDSReadWrite2(SDValue Ptr, SDValue &Base, SDValue &Offset0, SDValue &Offset1, unsigned Size) const; bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, SDValue &SOffset, SDValue &Offset, SDValue &Offen, SDValue &Idxen, SDValue &Addr64) const; bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, SDValue &SOffset, SDValue &Offset) const; bool SelectMUBUFScratchOffen(SDNode *Parent, SDValue Addr, SDValue &RSrc, SDValue &VAddr, SDValue &SOffset, SDValue &ImmOffset) const; bool SelectMUBUFScratchOffset(SDNode *Parent, SDValue Addr, SDValue &SRsrc, SDValue &Soffset, SDValue &Offset) const; bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset, SDValue &Offset) const; bool SelectFlatOffsetImpl(SDNode *N, SDValue Addr, SDValue &VAddr, SDValue &Offset, uint64_t FlatVariant) const; bool SelectFlatOffset(SDNode *N, SDValue Addr, SDValue &VAddr, SDValue &Offset) const; bool SelectGlobalOffset(SDNode *N, SDValue Addr, SDValue &VAddr, SDValue &Offset) const; bool SelectScratchOffset(SDNode *N, SDValue Addr, SDValue &VAddr, SDValue &Offset) const; bool SelectGlobalSAddr(SDNode *N, SDValue Addr, SDValue &SAddr, SDValue &VOffset, SDValue &Offset) const; bool SelectScratchSAddr(SDNode *N, SDValue Addr, SDValue &SAddr, SDValue &Offset) const; bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset, bool &Imm) const; SDValue Expand32BitAddress(SDValue Addr) const; bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset, bool &Imm) const; bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const; bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const; bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const; bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const; bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const; bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const; bool SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const; bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods, bool AllowAbs = true) const; bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const; bool SelectVOP3BMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; bool SelectVOP3NoMods(SDValue In, SDValue &Src) const; bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods, SDValue &Clamp, SDValue &Omod) const; bool SelectVOP3BMods0(SDValue In, SDValue &Src, SDValue &SrcMods, SDValue &Clamp, SDValue &Omod) const; bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods, SDValue &Clamp, SDValue &Omod) const; bool SelectVOP3OMods(SDValue In, SDValue &Src, SDValue &Clamp, SDValue &Omod) const; bool SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; bool SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const; bool SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; bool SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const; bool SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const; SDValue getHi16Elt(SDValue In) const; SDValue getMaterializedScalarImm32(int64_t Val, const SDLoc &DL) const; void SelectADD_SUB_I64(SDNode *N); void SelectAddcSubb(SDNode *N); void SelectUADDO_USUBO(SDNode *N); void SelectDIV_SCALE(SDNode *N); void SelectMAD_64_32(SDNode *N); void SelectFMA_W_CHAIN(SDNode *N); void SelectFMUL_W_CHAIN(SDNode *N); SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val, uint32_t Offset, uint32_t Width); void SelectS_BFEFromShifts(SDNode *N); void SelectS_BFE(SDNode *N); bool isCBranchSCC(const SDNode *N) const; void SelectBRCOND(SDNode *N); void SelectFMAD_FMA(SDNode *N); void SelectATOMIC_CMP_SWAP(SDNode *N); void SelectDSAppendConsume(SDNode *N, unsigned IntrID); void SelectDS_GWS(SDNode *N, unsigned IntrID); void SelectInterpP1F16(SDNode *N); void SelectINTRINSIC_W_CHAIN(SDNode *N); void SelectINTRINSIC_WO_CHAIN(SDNode *N); void SelectINTRINSIC_VOID(SDNode *N); protected: // Include the pieces autogenerated from the target description. #include "AMDGPUGenDAGISel.inc" }; class R600DAGToDAGISel : public AMDGPUDAGToDAGISel { const R600Subtarget *Subtarget; bool isConstantLoad(const MemSDNode *N, int cbID) const; bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr); bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg, SDValue& Offset); public: explicit R600DAGToDAGISel(TargetMachine *TM, CodeGenOpt::Level OptLevel) : AMDGPUDAGToDAGISel(TM, OptLevel) {} void Select(SDNode *N) override; bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset) override; bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset) override; bool runOnMachineFunction(MachineFunction &MF) override; void PreprocessISelDAG() override {} protected: // Include the pieces autogenerated from the target description. #include "R600GenDAGISel.inc" }; static SDValue stripBitcast(SDValue Val) { return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val; } // Figure out if this is really an extract of the high 16-bits of a dword. static bool isExtractHiElt(SDValue In, SDValue &Out) { In = stripBitcast(In); if (In.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { if (ConstantSDNode *Idx = dyn_cast(In.getOperand(1))) { if (!Idx->isOne()) return false; Out = In.getOperand(0); return true; } } if (In.getOpcode() != ISD::TRUNCATE) return false; SDValue Srl = In.getOperand(0); if (Srl.getOpcode() == ISD::SRL) { if (ConstantSDNode *ShiftAmt = dyn_cast(Srl.getOperand(1))) { if (ShiftAmt->getZExtValue() == 16) { Out = stripBitcast(Srl.getOperand(0)); return true; } } } return false; } // Look through operations that obscure just looking at the low 16-bits of the // same register. static SDValue stripExtractLoElt(SDValue In) { if (In.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { if (ConstantSDNode *Idx = dyn_cast(In.getOperand(1))) { if (Idx->isNullValue() && In.getValueSizeInBits() <= 32) return In.getOperand(0); } } if (In.getOpcode() == ISD::TRUNCATE) { SDValue Src = In.getOperand(0); if (Src.getValueType().getSizeInBits() == 32) return stripBitcast(Src); } return In; } } // end anonymous namespace INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel", "AMDGPU DAG->DAG Pattern Instruction Selection", false, false) INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo) INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis) INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis) #ifdef EXPENSIVE_CHECKS INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) #endif INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel", "AMDGPU DAG->DAG Pattern Instruction Selection", false, false) /// This pass converts a legalized DAG into a AMDGPU-specific // DAG, ready for instruction scheduling. FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM, CodeGenOpt::Level OptLevel) { return new AMDGPUDAGToDAGISel(TM, OptLevel); } /// This pass converts a legalized DAG into a R600-specific // DAG, ready for instruction scheduling. FunctionPass *llvm::createR600ISelDag(TargetMachine *TM, CodeGenOpt::Level OptLevel) { return new R600DAGToDAGISel(TM, OptLevel); } bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { #ifdef EXPENSIVE_CHECKS DominatorTree & DT = getAnalysis().getDomTree(); LoopInfo * LI = &getAnalysis().getLoopInfo(); for (auto &L : LI->getLoopsInPreorder()) { assert(L->isLCSSAForm(DT)); } #endif Subtarget = &MF.getSubtarget(); Mode = AMDGPU::SIModeRegisterDefaults(MF.getFunction()); return SelectionDAGISel::runOnMachineFunction(MF); } bool AMDGPUDAGToDAGISel::fp16SrcZerosHighBits(unsigned Opc) const { // XXX - only need to list legal operations. switch (Opc) { case ISD::FADD: case ISD::FSUB: case ISD::FMUL: case ISD::FDIV: case ISD::FREM: case ISD::FCANONICALIZE: case ISD::UINT_TO_FP: case ISD::SINT_TO_FP: case ISD::FABS: // Fabs is lowered to a bit operation, but it's an and which will clear the // high bits anyway. case ISD::FSQRT: case ISD::FSIN: case ISD::FCOS: case ISD::FPOWI: case ISD::FPOW: case ISD::FLOG: case ISD::FLOG2: case ISD::FLOG10: case ISD::FEXP: case ISD::FEXP2: case ISD::FCEIL: case ISD::FTRUNC: case ISD::FRINT: case ISD::FNEARBYINT: case ISD::FROUND: case ISD::FFLOOR: case ISD::FMINNUM: case ISD::FMAXNUM: case AMDGPUISD::FRACT: case AMDGPUISD::CLAMP: case AMDGPUISD::COS_HW: case AMDGPUISD::SIN_HW: case AMDGPUISD::FMIN3: case AMDGPUISD::FMAX3: case AMDGPUISD::FMED3: case AMDGPUISD::FMAD_FTZ: case AMDGPUISD::RCP: case AMDGPUISD::RSQ: case AMDGPUISD::RCP_IFLAG: case AMDGPUISD::LDEXP: // On gfx10, all 16-bit instructions preserve the high bits. return Subtarget->getGeneration() <= AMDGPUSubtarget::GFX9; case ISD::FP_ROUND: // We may select fptrunc (fma/mad) to mad_mixlo, which does not zero the // high bits on gfx9. // TODO: If we had the source node we could see if the source was fma/mad return Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS; case ISD::FMA: case ISD::FMAD: case AMDGPUISD::DIV_FIXUP: return Subtarget->getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS; default: // fcopysign, select and others may be lowered to 32-bit bit operations // which don't zero the high bits. return false; } } bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const { assert(Subtarget->d16PreservesUnusedBits()); MVT VT = N->getValueType(0).getSimpleVT(); if (VT != MVT::v2i16 && VT != MVT::v2f16) return false; SDValue Lo = N->getOperand(0); SDValue Hi = N->getOperand(1); LoadSDNode *LdHi = dyn_cast(stripBitcast(Hi)); // build_vector lo, (load ptr) -> load_d16_hi ptr, lo // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo // Need to check for possible indirect dependencies on the other half of the // vector to avoid introducing a cycle. if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) { SDVTList VTList = CurDAG->getVTList(VT, MVT::Other); SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo); SDValue Ops[] = { LdHi->getChain(), LdHi->getBasePtr(), TiedIn }; unsigned LoadOp = AMDGPUISD::LOAD_D16_HI; if (LdHi->getMemoryVT() == MVT::i8) { LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ? AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8; } else { assert(LdHi->getMemoryVT() == MVT::i16); } SDValue NewLoadHi = CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList, Ops, LdHi->getMemoryVT(), LdHi->getMemOperand()); CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi); CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1)); return true; } // build_vector (load ptr), hi -> load_d16_lo ptr, hi // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi LoadSDNode *LdLo = dyn_cast(stripBitcast(Lo)); if (LdLo && Lo.hasOneUse()) { SDValue TiedIn = getHi16Elt(Hi); if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode())) return false; SDVTList VTList = CurDAG->getVTList(VT, MVT::Other); unsigned LoadOp = AMDGPUISD::LOAD_D16_LO; if (LdLo->getMemoryVT() == MVT::i8) { LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ? AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8; } else { assert(LdLo->getMemoryVT() == MVT::i16); } TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn); SDValue Ops[] = { LdLo->getChain(), LdLo->getBasePtr(), TiedIn }; SDValue NewLoadLo = CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList, Ops, LdLo->getMemoryVT(), LdLo->getMemOperand()); CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo); CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1)); return true; } return false; } void AMDGPUDAGToDAGISel::PreprocessISelDAG() { if (!Subtarget->d16PreservesUnusedBits()) return; SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); bool MadeChange = false; while (Position != CurDAG->allnodes_begin()) { SDNode *N = &*--Position; if (N->use_empty()) continue; switch (N->getOpcode()) { case ISD::BUILD_VECTOR: MadeChange |= matchLoadD16FromBuildVector(N); break; default: break; } } if (MadeChange) { CurDAG->RemoveDeadNodes(); LLVM_DEBUG(dbgs() << "After PreProcess:\n"; CurDAG->dump();); } } bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const { if (TM.Options.NoNaNsFPMath) return true; // TODO: Move into isKnownNeverNaN if (N->getFlags().hasNoNaNs()) return true; return CurDAG->isKnownNeverNaN(N); } bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N, bool Negated) const { if (N->isUndef()) return true; const SIInstrInfo *TII = Subtarget->getInstrInfo(); if (Negated) { if (const ConstantSDNode *C = dyn_cast(N)) return TII->isInlineConstant(-C->getAPIntValue()); if (const ConstantFPSDNode *C = dyn_cast(N)) return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt()); } else { if (const ConstantSDNode *C = dyn_cast(N)) return TII->isInlineConstant(C->getAPIntValue()); if (const ConstantFPSDNode *C = dyn_cast(N)) return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt()); } return false; } /// Determine the register class for \p OpNo /// \returns The register class of the virtual register that will be used for /// the given operand number \OpNo or NULL if the register class cannot be /// determined. const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N, unsigned OpNo) const { if (!N->isMachineOpcode()) { if (N->getOpcode() == ISD::CopyToReg) { Register Reg = cast(N->getOperand(1))->getReg(); if (Reg.isVirtual()) { MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo(); return MRI.getRegClass(Reg); } const SIRegisterInfo *TRI = static_cast(Subtarget)->getRegisterInfo(); return TRI->getPhysRegClass(Reg); } return nullptr; } switch (N->getMachineOpcode()) { default: { const MCInstrDesc &Desc = Subtarget->getInstrInfo()->get(N->getMachineOpcode()); unsigned OpIdx = Desc.getNumDefs() + OpNo; if (OpIdx >= Desc.getNumOperands()) return nullptr; int RegClass = Desc.OpInfo[OpIdx].RegClass; if (RegClass == -1) return nullptr; return Subtarget->getRegisterInfo()->getRegClass(RegClass); } case AMDGPU::REG_SEQUENCE: { unsigned RCID = cast(N->getOperand(0))->getZExtValue(); const TargetRegisterClass *SuperRC = Subtarget->getRegisterInfo()->getRegClass(RCID); SDValue SubRegOp = N->getOperand(OpNo + 1); unsigned SubRegIdx = cast(SubRegOp)->getZExtValue(); return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx); } } } SDNode *AMDGPUDAGToDAGISel::glueCopyToOp(SDNode *N, SDValue NewChain, SDValue Glue) const { SmallVector Ops; Ops.push_back(NewChain); // Replace the chain. for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) Ops.push_back(N->getOperand(i)); Ops.push_back(Glue); return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops); } SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const { const SITargetLowering& Lowering = *static_cast(getTargetLowering()); assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain"); SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N), Val); return glueCopyToOp(N, M0, M0.getValue(1)); } SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const { unsigned AS = cast(N)->getAddressSpace(); if (AS == AMDGPUAS::LOCAL_ADDRESS) { if (Subtarget->ldsRequiresM0Init()) return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32)); } else if (AS == AMDGPUAS::REGION_ADDRESS) { MachineFunction &MF = CurDAG->getMachineFunction(); unsigned Value = MF.getInfo()->getGDSSize(); return glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32)); } return N; } MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm, EVT VT) const { SDNode *Lo = CurDAG->getMachineNode( AMDGPU::S_MOV_B32, DL, MVT::i32, CurDAG->getTargetConstant(Imm & 0xFFFFFFFF, DL, MVT::i32)); SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, CurDAG->getTargetConstant(Imm >> 32, DL, MVT::i32)); const SDValue Ops[] = { CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32), SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32), SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)}; return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops); } void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) { EVT VT = N->getValueType(0); unsigned NumVectorElts = VT.getVectorNumElements(); EVT EltVT = VT.getVectorElementType(); SDLoc DL(N); SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32); if (NumVectorElts == 1) { CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0), RegClass); return; } assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not " "supported yet"); // 32 = Max Num Vector Elements // 2 = 2 REG_SEQUENCE operands per element (value, subreg index) // 1 = Vector Register Class SmallVector RegSeqArgs(NumVectorElts * 2 + 1); bool IsGCN = CurDAG->getSubtarget().getTargetTriple().getArch() == Triple::amdgcn; RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32); bool IsRegSeq = true; unsigned NOps = N->getNumOperands(); for (unsigned i = 0; i < NOps; i++) { // XXX: Why is this here? if (isa(N->getOperand(i))) { IsRegSeq = false; break; } unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i) : R600RegisterInfo::getSubRegFromChannel(i); RegSeqArgs[1 + (2 * i)] = N->getOperand(i); RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32); } if (NOps != NumVectorElts) { // Fill in the missing undef elements if this was a scalar_to_vector. assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts); MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, EltVT); for (unsigned i = NOps; i < NumVectorElts; ++i) { unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i) : R600RegisterInfo::getSubRegFromChannel(i); RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0); RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32); } } if (!IsRegSeq) SelectCode(N); CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs); } void AMDGPUDAGToDAGISel::Select(SDNode *N) { unsigned int Opc = N->getOpcode(); if (N->isMachineOpcode()) { N->setNodeId(-1); return; // Already selected. } // isa almost works but is slightly too permissive for some DS // intrinsics. if (Opc == ISD::LOAD || Opc == ISD::STORE || isa(N) || (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC || Opc == ISD::ATOMIC_LOAD_FADD || Opc == AMDGPUISD::ATOMIC_LOAD_FMIN || Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) { N = glueCopyToM0LDSInit(N); SelectCode(N); return; } switch (Opc) { default: break; // We are selecting i64 ADD here instead of custom lower it during // DAG legalization, so we can fold some i64 ADDs used for address // calculation into the LOAD and STORE instructions. case ISD::ADDC: case ISD::ADDE: case ISD::SUBC: case ISD::SUBE: { if (N->getValueType(0) != MVT::i64) break; SelectADD_SUB_I64(N); return; } case ISD::ADDCARRY: case ISD::SUBCARRY: if (N->getValueType(0) != MVT::i32) break; SelectAddcSubb(N); return; case ISD::UADDO: case ISD::USUBO: { SelectUADDO_USUBO(N); return; } case AMDGPUISD::FMUL_W_CHAIN: { SelectFMUL_W_CHAIN(N); return; } case AMDGPUISD::FMA_W_CHAIN: { SelectFMA_W_CHAIN(N); return; } case ISD::SCALAR_TO_VECTOR: case ISD::BUILD_VECTOR: { EVT VT = N->getValueType(0); unsigned NumVectorElts = VT.getVectorNumElements(); if (VT.getScalarSizeInBits() == 16) { if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) { if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) { ReplaceNode(N, Packed); return; } } break; } assert(VT.getVectorElementType().bitsEq(MVT::i32)); unsigned RegClassID = SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * 32)->getID(); SelectBuildVector(N, RegClassID); return; } case ISD::BUILD_PAIR: { SDValue RC, SubReg0, SubReg1; SDLoc DL(N); if (N->getValueType(0) == MVT::i128) { RC = CurDAG->getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32); SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32); SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32); } else if (N->getValueType(0) == MVT::i64) { RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32); SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32); SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32); } else { llvm_unreachable("Unhandled value type for BUILD_PAIR"); } const SDValue Ops[] = { RC, N->getOperand(0), SubReg0, N->getOperand(1), SubReg1 }; ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, N->getValueType(0), Ops)); return; } case ISD::Constant: case ISD::ConstantFP: { if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N)) break; uint64_t Imm; if (ConstantFPSDNode *FP = dyn_cast(N)) Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue(); else { ConstantSDNode *C = cast(N); Imm = C->getZExtValue(); } SDLoc DL(N); ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0))); return; } case AMDGPUISD::BFE_I32: case AMDGPUISD::BFE_U32: { // There is a scalar version available, but unlike the vector version which // has a separate operand for the offset and width, the scalar version packs // the width and offset into a single operand. Try to move to the scalar // version if the offsets are constant, so that we can try to keep extended // loads of kernel arguments in SGPRs. // TODO: Technically we could try to pattern match scalar bitshifts of // dynamic values, but it's probably not useful. ConstantSDNode *Offset = dyn_cast(N->getOperand(1)); if (!Offset) break; ConstantSDNode *Width = dyn_cast(N->getOperand(2)); if (!Width) break; bool Signed = Opc == AMDGPUISD::BFE_I32; uint32_t OffsetVal = Offset->getZExtValue(); uint32_t WidthVal = Width->getZExtValue(); ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, SDLoc(N), N->getOperand(0), OffsetVal, WidthVal)); return; } case AMDGPUISD::DIV_SCALE: { SelectDIV_SCALE(N); return; } case AMDGPUISD::MAD_I64_I32: case AMDGPUISD::MAD_U64_U32: { SelectMAD_64_32(N); return; } case ISD::CopyToReg: { const SITargetLowering& Lowering = *static_cast(getTargetLowering()); N = Lowering.legalizeTargetIndependentNode(N, *CurDAG); break; } case ISD::AND: case ISD::SRL: case ISD::SRA: case ISD::SIGN_EXTEND_INREG: if (N->getValueType(0) != MVT::i32) break; SelectS_BFE(N); return; case ISD::BRCOND: SelectBRCOND(N); return; case ISD::FMAD: case ISD::FMA: SelectFMAD_FMA(N); return; case AMDGPUISD::ATOMIC_CMP_SWAP: SelectATOMIC_CMP_SWAP(N); return; case AMDGPUISD::CVT_PKRTZ_F16_F32: case AMDGPUISD::CVT_PKNORM_I16_F32: case AMDGPUISD::CVT_PKNORM_U16_F32: case AMDGPUISD::CVT_PK_U16_U32: case AMDGPUISD::CVT_PK_I16_I32: { // Hack around using a legal type if f16 is illegal. if (N->getValueType(0) == MVT::i32) { MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16; N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT), { N->getOperand(0), N->getOperand(1) }); SelectCode(N); return; } break; } case ISD::INTRINSIC_W_CHAIN: { SelectINTRINSIC_W_CHAIN(N); return; } case ISD::INTRINSIC_WO_CHAIN: { SelectINTRINSIC_WO_CHAIN(N); return; } case ISD::INTRINSIC_VOID: { SelectINTRINSIC_VOID(N); return; } } SelectCode(N); } bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const { const BasicBlock *BB = FuncInfo->MBB->getBasicBlock(); const Instruction *Term = BB->getTerminator(); return Term->getMetadata("amdgpu.uniform") || Term->getMetadata("structurizecfg.uniform"); } static bool getBaseWithOffsetUsingSplitOR(SelectionDAG &DAG, SDValue Addr, SDValue &N0, SDValue &N1) { if (Addr.getValueType() == MVT::i64 && Addr.getOpcode() == ISD::BITCAST && Addr.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { // As we split 64-bit `or` earlier, it's complicated pattern to match, i.e. // (i64 (bitcast (v2i32 (build_vector // (or (extract_vector_elt V, 0), OFFSET), // (extract_vector_elt V, 1))))) SDValue Lo = Addr.getOperand(0).getOperand(0); if (Lo.getOpcode() == ISD::OR && DAG.isBaseWithConstantOffset(Lo)) { SDValue BaseLo = Lo.getOperand(0); SDValue BaseHi = Addr.getOperand(0).getOperand(1); // Check that split base (Lo and Hi) are extracted from the same one. if (BaseLo.getOpcode() == ISD::EXTRACT_VECTOR_ELT && BaseHi.getOpcode() == ISD::EXTRACT_VECTOR_ELT && BaseLo.getOperand(0) == BaseHi.getOperand(0) && // Lo is statically extracted from index 0. isa(BaseLo.getOperand(1)) && BaseLo.getConstantOperandVal(1) == 0 && // Hi is statically extracted from index 0. isa(BaseHi.getOperand(1)) && BaseHi.getConstantOperandVal(1) == 1) { N0 = BaseLo.getOperand(0).getOperand(0); N1 = Lo.getOperand(1); return true; } } } return false; } bool AMDGPUDAGToDAGISel::isBaseWithConstantOffset64(SDValue Addr, SDValue &LHS, SDValue &RHS) const { if (CurDAG->isBaseWithConstantOffset(Addr)) { LHS = Addr.getOperand(0); RHS = Addr.getOperand(1); return true; } if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, LHS, RHS)) { assert(LHS && RHS && isa(RHS)); return true; } return false; } StringRef AMDGPUDAGToDAGISel::getPassName() const { return "AMDGPU DAG->DAG Pattern Instruction Selection"; } //===----------------------------------------------------------------------===// // Complex Patterns //===----------------------------------------------------------------------===// bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset) { return false; } bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset) { ConstantSDNode *C; SDLoc DL(Addr); if ((C = dyn_cast(Addr))) { Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) && (C = dyn_cast(Addr.getOperand(0)))) { Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) && (C = dyn_cast(Addr.getOperand(1)))) { Base = Addr.getOperand(0); Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); } else { Base = Addr; Offset = CurDAG->getTargetConstant(0, DL, MVT::i32); } return true; } SDValue AMDGPUDAGToDAGISel::getMaterializedScalarImm32(int64_t Val, const SDLoc &DL) const { SDNode *Mov = CurDAG->getMachineNode( AMDGPU::S_MOV_B32, DL, MVT::i32, CurDAG->getTargetConstant(Val, DL, MVT::i32)); return SDValue(Mov, 0); } // FIXME: Should only handle addcarry/subcarry void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) { SDLoc DL(N); SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); unsigned Opcode = N->getOpcode(); bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE); bool ProduceCarry = ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC; bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE; SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32); SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32); SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i32, LHS, Sub0); SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i32, LHS, Sub1); SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i32, RHS, Sub0); SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i32, RHS, Sub1); SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue); static const unsigned OpcMap[2][2][2] = { {{AMDGPU::S_SUB_U32, AMDGPU::S_ADD_U32}, {AMDGPU::V_SUB_CO_U32_e32, AMDGPU::V_ADD_CO_U32_e32}}, {{AMDGPU::S_SUBB_U32, AMDGPU::S_ADDC_U32}, {AMDGPU::V_SUBB_U32_e32, AMDGPU::V_ADDC_U32_e32}}}; unsigned Opc = OpcMap[0][N->isDivergent()][IsAdd]; unsigned CarryOpc = OpcMap[1][N->isDivergent()][IsAdd]; SDNode *AddLo; if (!ConsumeCarry) { SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) }; AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args); } else { SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) }; AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args); } SDValue AddHiArgs[] = { SDValue(Hi0, 0), SDValue(Hi1, 0), SDValue(AddLo, 1) }; SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs); SDValue RegSequenceArgs[] = { CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32), SDValue(AddLo,0), Sub0, SDValue(AddHi,0), Sub1, }; SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::i64, RegSequenceArgs); if (ProduceCarry) { // Replace the carry-use ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1)); } // Replace the remaining uses. ReplaceNode(N, RegSequence); } void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) { SDLoc DL(N); SDValue LHS = N->getOperand(0); SDValue RHS = N->getOperand(1); SDValue CI = N->getOperand(2); if (N->isDivergent()) { unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; CurDAG->SelectNodeTo( N, Opc, N->getVTList(), {LHS, RHS, CI, CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/}); } else { unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::S_ADD_CO_PSEUDO : AMDGPU::S_SUB_CO_PSEUDO; CurDAG->SelectNodeTo(N, Opc, N->getVTList(), {LHS, RHS, CI}); } } void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) { // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned // carry out despite the _i32 name. These were renamed in VI to _U32. // FIXME: We should probably rename the opcodes here. bool IsAdd = N->getOpcode() == ISD::UADDO; bool IsVALU = N->isDivergent(); for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); UI != E; ++UI) if (UI.getUse().getResNo() == 1) { if ((IsAdd && (UI->getOpcode() != ISD::ADDCARRY)) || (!IsAdd && (UI->getOpcode() != ISD::SUBCARRY))) { IsVALU = true; break; } } if (IsVALU) { unsigned Opc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; CurDAG->SelectNodeTo( N, Opc, N->getVTList(), {N->getOperand(0), N->getOperand(1), CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/}); } else { unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO : AMDGPU::S_USUBO_PSEUDO; CurDAG->SelectNodeTo(N, Opc, N->getVTList(), {N->getOperand(0), N->getOperand(1)}); } } void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) { SDLoc SL(N); // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod SDValue Ops[10]; SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]); SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]); SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]); Ops[8] = N->getOperand(0); Ops[9] = N->getOperand(4); CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32_e64, N->getVTList(), Ops); } void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) { SDLoc SL(N); // src0_modifiers, src0, src1_modifiers, src1, clamp, omod SDValue Ops[8]; SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]); SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]); Ops[6] = N->getOperand(0); Ops[7] = N->getOperand(3); CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops); } // We need to handle this here because tablegen doesn't support matching // instructions with multiple outputs. void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) { SDLoc SL(N); EVT VT = N->getValueType(0); assert(VT == MVT::f32 || VT == MVT::f64); unsigned Opc = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64_e64 : AMDGPU::V_DIV_SCALE_F32_e64; // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, // omod SDValue Ops[8]; SelectVOP3BMods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]); SelectVOP3BMods(N->getOperand(1), Ops[3], Ops[2]); SelectVOP3BMods(N->getOperand(2), Ops[5], Ops[4]); CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); } // We need to handle this here because tablegen doesn't support matching // instructions with multiple outputs. void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) { SDLoc SL(N); bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32; unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64; SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1); SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), Clamp }; CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); } bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset) const { if (!isUInt<16>(Offset)) return false; if (!Base || Subtarget->hasUsableDSOffset() || Subtarget->unsafeDSOffsetFoldingEnabled()) return true; // On Southern Islands instruction with a negative base value and an offset // don't seem to work. return CurDAG->SignBitIsZero(Base); } bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base, SDValue &Offset) const { SDLoc DL(Addr); if (CurDAG->isBaseWithConstantOffset(Addr)) { SDValue N0 = Addr.getOperand(0); SDValue N1 = Addr.getOperand(1); ConstantSDNode *C1 = cast(N1); if (isDSOffsetLegal(N0, C1->getSExtValue())) { // (add n0, c0) Base = N0; Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); return true; } } else if (Addr.getOpcode() == ISD::SUB) { // sub C, x -> add (sub 0, x), C if (const ConstantSDNode *C = dyn_cast(Addr.getOperand(0))) { int64_t ByteOffset = C->getSExtValue(); if (isDSOffsetLegal(SDValue(), ByteOffset)) { SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); // XXX - This is kind of hacky. Create a dummy sub node so we can check // the known bits in isDSOffsetLegal. We need to emit the selected node // here, so this is thrown away. SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32, Zero, Addr.getOperand(1)); if (isDSOffsetLegal(Sub, ByteOffset)) { SmallVector Opnds; Opnds.push_back(Zero); Opnds.push_back(Addr.getOperand(1)); // FIXME: Select to VOP3 version for with-carry. unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32; if (Subtarget->hasAddNoCarry()) { SubOp = AMDGPU::V_SUB_U32_e64; Opnds.push_back( CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit } MachineSDNode *MachineSub = CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds); Base = SDValue(MachineSub, 0); Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16); return true; } } } } else if (const ConstantSDNode *CAddr = dyn_cast(Addr)) { // If we have a constant address, prefer to put the constant into the // offset. This can save moves to load the constant address since multiple // operations can share the zero base address register, and enables merging // into read2 / write2 instructions. SDLoc DL(Addr); if (isDSOffsetLegal(SDValue(), CAddr->getZExtValue())) { SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32, Zero); Base = SDValue(MovZero, 0); Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16); return true; } } // default case Base = Addr; Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16); return true; } bool AMDGPUDAGToDAGISel::isDSOffset2Legal(SDValue Base, unsigned Offset0, unsigned Offset1, unsigned Size) const { if (Offset0 % Size != 0 || Offset1 % Size != 0) return false; if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size)) return false; if (!Base || Subtarget->hasUsableDSOffset() || Subtarget->unsafeDSOffsetFoldingEnabled()) return true; // On Southern Islands instruction with a negative base value and an offset // don't seem to work. return CurDAG->SignBitIsZero(Base); } // TODO: If offset is too big, put low 16-bit into offset. bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base, SDValue &Offset0, SDValue &Offset1) const { return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 4); } bool AMDGPUDAGToDAGISel::SelectDS128Bit8ByteAligned(SDValue Addr, SDValue &Base, SDValue &Offset0, SDValue &Offset1) const { return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 8); } bool AMDGPUDAGToDAGISel::SelectDSReadWrite2(SDValue Addr, SDValue &Base, SDValue &Offset0, SDValue &Offset1, unsigned Size) const { SDLoc DL(Addr); if (CurDAG->isBaseWithConstantOffset(Addr)) { SDValue N0 = Addr.getOperand(0); SDValue N1 = Addr.getOperand(1); ConstantSDNode *C1 = cast(N1); unsigned OffsetValue0 = C1->getZExtValue(); unsigned OffsetValue1 = OffsetValue0 + Size; // (add n0, c0) if (isDSOffset2Legal(N0, OffsetValue0, OffsetValue1, Size)) { Base = N0; Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8); Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8); return true; } } else if (Addr.getOpcode() == ISD::SUB) { // sub C, x -> add (sub 0, x), C if (const ConstantSDNode *C = dyn_cast(Addr.getOperand(0))) { unsigned OffsetValue0 = C->getZExtValue(); unsigned OffsetValue1 = OffsetValue0 + Size; if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) { SDLoc DL(Addr); SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); // XXX - This is kind of hacky. Create a dummy sub node so we can check // the known bits in isDSOffsetLegal. We need to emit the selected node // here, so this is thrown away. SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32, Zero, Addr.getOperand(1)); if (isDSOffset2Legal(Sub, OffsetValue0, OffsetValue1, Size)) { SmallVector Opnds; Opnds.push_back(Zero); Opnds.push_back(Addr.getOperand(1)); unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32; if (Subtarget->hasAddNoCarry()) { SubOp = AMDGPU::V_SUB_U32_e64; Opnds.push_back( CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit } MachineSDNode *MachineSub = CurDAG->getMachineNode( SubOp, DL, MVT::getIntegerVT(Size * 8), Opnds); Base = SDValue(MachineSub, 0); Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8); Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8); return true; } } } } else if (const ConstantSDNode *CAddr = dyn_cast(Addr)) { unsigned OffsetValue0 = CAddr->getZExtValue(); unsigned OffsetValue1 = OffsetValue0 + Size; if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) { SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32); MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32, Zero); Base = SDValue(MovZero, 0); Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8); Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8); return true; } } // default case Base = Addr; Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8); Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8); return true; } bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, SDValue &VAddr, SDValue &SOffset, SDValue &Offset, SDValue &Offen, SDValue &Idxen, SDValue &Addr64) const { // Subtarget prefers to use flat instruction // FIXME: This should be a pattern predicate and not reach here if (Subtarget->useFlatForGlobal()) return false; SDLoc DL(Addr); Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1); Offen = CurDAG->getTargetConstant(0, DL, MVT::i1); Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1); SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32); ConstantSDNode *C1 = nullptr; SDValue N0 = Addr; if (CurDAG->isBaseWithConstantOffset(Addr)) { C1 = cast(Addr.getOperand(1)); if (isUInt<32>(C1->getZExtValue())) N0 = Addr.getOperand(0); else C1 = nullptr; } if (N0.getOpcode() == ISD::ADD) { // (add N2, N3) -> addr64, or // (add (add N2, N3), C1) -> addr64 SDValue N2 = N0.getOperand(0); SDValue N3 = N0.getOperand(1); Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1); if (N2->isDivergent()) { if (N3->isDivergent()) { // Both N2 and N3 are divergent. Use N0 (the result of the add) as the // addr64, and construct the resource from a 0 address. Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0); VAddr = N0; } else { // N2 is divergent, N3 is not. Ptr = N3; VAddr = N2; } } else { // N2 is not divergent. Ptr = N2; VAddr = N3; } Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); } else if (N0->isDivergent()) { // N0 is divergent. Use it as the addr64, and construct the resource from a // 0 address. Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0); VAddr = N0; Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1); } else { // N0 -> offset, or // (N0 + C1) -> offset VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32); Ptr = N0; } if (!C1) { // No offset. Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); return true; } if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) { // Legal offset for instruction. Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); return true; } // Illegal offset, store it in soffset. Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); SOffset = SDValue(CurDAG->getMachineNode( AMDGPU::S_MOV_B32, DL, MVT::i32, CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)), 0); return true; } bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, SDValue &SOffset, SDValue &Offset) const { SDValue Ptr, Offen, Idxen, Addr64; // addr64 bit was removed for volcanic islands. // FIXME: This should be a pattern predicate and not reach here if (!Subtarget->hasAddr64()) return false; if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64)) return false; ConstantSDNode *C = cast(Addr64); if (C->getSExtValue()) { SDLoc DL(Addr); const SITargetLowering& Lowering = *static_cast(getTargetLowering()); SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0); return true; } return false; } std::pair AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const { SDLoc DL(N); auto *FI = dyn_cast(N); SDValue TFI = FI ? CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0)) : N; // We rebase the base address into an absolute stack address and hence // use constant 0 for soffset. This value must be retained until // frame elimination and eliminateFrameIndex will choose the appropriate // frame register if need be. return std::make_pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32)); } bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent, SDValue Addr, SDValue &Rsrc, SDValue &VAddr, SDValue &SOffset, SDValue &ImmOffset) const { SDLoc DL(Addr); MachineFunction &MF = CurDAG->getMachineFunction(); const SIMachineFunctionInfo *Info = MF.getInfo(); Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32); if (ConstantSDNode *CAddr = dyn_cast(Addr)) { int64_t Imm = CAddr->getSExtValue(); const int64_t NullPtr = AMDGPUTargetMachine::getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS); // Don't fold null pointer. if (Imm != NullPtr) { SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32); MachineSDNode *MovHighBits = CurDAG->getMachineNode( AMDGPU::V_MOV_B32_e32, DL, MVT::i32, HighBits); VAddr = SDValue(MovHighBits, 0); SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32); ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16); return true; } } if (CurDAG->isBaseWithConstantOffset(Addr)) { // (add n0, c1) SDValue N0 = Addr.getOperand(0); SDValue N1 = Addr.getOperand(1); // Offsets in vaddr must be positive if range checking is enabled. // // The total computation of vaddr + soffset + offset must not overflow. If // vaddr is negative, even if offset is 0 the sgpr offset add will end up // overflowing. // // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would // always perform a range check. If a negative vaddr base index was used, // this would fail the range check. The overall address computation would // compute a valid address, but this doesn't happen due to the range // check. For out-of-bounds MUBUF loads, a 0 is returned. // // Therefore it should be safe to fold any VGPR offset on gfx9 into the // MUBUF vaddr, but not on older subtargets which can only do this if the // sign bit is known 0. ConstantSDNode *C1 = cast(N1); if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) && (!Subtarget->privateMemoryResourceIsRangeChecked() || CurDAG->SignBitIsZero(N0))) { std::tie(VAddr, SOffset) = foldFrameIndex(N0); ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16); return true; } } // (node) std::tie(VAddr, SOffset) = foldFrameIndex(Addr); ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16); return true; } static bool IsCopyFromSGPR(const SIRegisterInfo &TRI, SDValue Val) { if (Val.getOpcode() != ISD::CopyFromReg) return false; auto RC = TRI.getPhysRegClass(cast(Val.getOperand(1))->getReg()); return RC && TRI.isSGPRClass(RC); } bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent, SDValue Addr, SDValue &SRsrc, SDValue &SOffset, SDValue &Offset) const { const SIRegisterInfo *TRI = static_cast(Subtarget->getRegisterInfo()); MachineFunction &MF = CurDAG->getMachineFunction(); const SIMachineFunctionInfo *Info = MF.getInfo(); SDLoc DL(Addr); // CopyFromReg if (IsCopyFromSGPR(*TRI, Addr)) { SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32); SOffset = Addr; Offset = CurDAG->getTargetConstant(0, DL, MVT::i16); return true; } ConstantSDNode *CAddr; if (Addr.getOpcode() == ISD::ADD) { // Add (CopyFromReg ) CAddr = dyn_cast(Addr.getOperand(1)); if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue())) return false; if (!IsCopyFromSGPR(*TRI, Addr.getOperand(0))) return false; SOffset = Addr.getOperand(0); } else if ((CAddr = dyn_cast(Addr)) && SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue())) { // SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32); } else { return false; } SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32); Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16); return true; } bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset, SDValue &Offset ) const { SDValue Ptr, VAddr, Offen, Idxen, Addr64; const SIInstrInfo *TII = static_cast(Subtarget->getInstrInfo()); if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64)) return false; if (!cast(Offen)->getSExtValue() && !cast(Idxen)->getSExtValue() && !cast(Addr64)->getSExtValue()) { uint64_t Rsrc = TII->getDefaultRsrcDataFormat() | APInt::getAllOnesValue(32).getZExtValue(); // Size SDLoc DL(Addr); const SITargetLowering& Lowering = *static_cast(getTargetLowering()); SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0); return true; } return false; } // Find a load or store from corresponding pattern root. // Roots may be build_vector, bitconvert or their combinations. static MemSDNode* findMemSDNode(SDNode *N) { N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode(); if (MemSDNode *MN = dyn_cast(N)) return MN; assert(isa(N)); for (SDValue V : N->op_values()) if (MemSDNode *MN = dyn_cast(AMDGPUTargetLowering::stripBitcast(V))) return MN; llvm_unreachable("cannot find MemSDNode in the pattern!"); } bool AMDGPUDAGToDAGISel::SelectFlatOffsetImpl(SDNode *N, SDValue Addr, SDValue &VAddr, SDValue &Offset, uint64_t FlatVariant) const { int64_t OffsetVal = 0; unsigned AS = findMemSDNode(N)->getAddressSpace(); bool CanHaveFlatSegmentOffsetBug = Subtarget->hasFlatSegmentOffsetBug() && FlatVariant == SIInstrFlags::FLAT && (AS == AMDGPUAS::FLAT_ADDRESS || AS == AMDGPUAS::GLOBAL_ADDRESS); if (Subtarget->hasFlatInstOffsets() && !CanHaveFlatSegmentOffsetBug) { SDValue N0, N1; if (isBaseWithConstantOffset64(Addr, N0, N1)) { int64_t COffsetVal = cast(N1)->getSExtValue(); const SIInstrInfo *TII = Subtarget->getInstrInfo(); if (TII->isLegalFLATOffset(COffsetVal, AS, FlatVariant)) { Addr = N0; OffsetVal = COffsetVal; } else { // If the offset doesn't fit, put the low bits into the offset field and // add the rest. // // For a FLAT instruction the hardware decides whether to access // global/scratch/shared memory based on the high bits of vaddr, // ignoring the offset field, so we have to ensure that when we add // remainder to vaddr it still points into the same underlying object. // The easiest way to do that is to make sure that we split the offset // into two pieces that are both >= 0 or both <= 0. SDLoc DL(N); uint64_t RemainderOffset; std::tie(OffsetVal, RemainderOffset) = TII->splitFlatOffset(COffsetVal, AS, FlatVariant); SDValue AddOffsetLo = getMaterializedScalarImm32(Lo_32(RemainderOffset), DL); SDValue Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1); if (Addr.getValueType().getSizeInBits() == 32) { SmallVector Opnds; Opnds.push_back(N0); Opnds.push_back(AddOffsetLo); unsigned AddOp = AMDGPU::V_ADD_CO_U32_e32; if (Subtarget->hasAddNoCarry()) { AddOp = AMDGPU::V_ADD_U32_e64; Opnds.push_back(Clamp); } Addr = SDValue(CurDAG->getMachineNode(AddOp, DL, MVT::i32, Opnds), 0); } else { // TODO: Should this try to use a scalar add pseudo if the base address // is uniform and saddr is usable? SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32); SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32); SDNode *N0Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i32, N0, Sub0); SDNode *N0Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i32, N0, Sub1); SDValue AddOffsetHi = getMaterializedScalarImm32(Hi_32(RemainderOffset), DL); SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i1); SDNode *Add = CurDAG->getMachineNode(AMDGPU::V_ADD_CO_U32_e64, DL, VTs, {AddOffsetLo, SDValue(N0Lo, 0), Clamp}); SDNode *Addc = CurDAG->getMachineNode( AMDGPU::V_ADDC_U32_e64, DL, VTs, {AddOffsetHi, SDValue(N0Hi, 0), SDValue(Add, 1), Clamp}); SDValue RegSequenceArgs[] = { CurDAG->getTargetConstant(AMDGPU::VReg_64RegClassID, DL, MVT::i32), SDValue(Add, 0), Sub0, SDValue(Addc, 0), Sub1}; Addr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::i64, RegSequenceArgs), 0); } } } } VAddr = Addr; Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i16); return true; } bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N, SDValue Addr, SDValue &VAddr, SDValue &Offset) const { return SelectFlatOffsetImpl(N, Addr, VAddr, Offset, SIInstrFlags::FLAT); } bool AMDGPUDAGToDAGISel::SelectGlobalOffset(SDNode *N, SDValue Addr, SDValue &VAddr, SDValue &Offset) const { return SelectFlatOffsetImpl(N, Addr, VAddr, Offset, SIInstrFlags::FlatGlobal); } bool AMDGPUDAGToDAGISel::SelectScratchOffset(SDNode *N, SDValue Addr, SDValue &VAddr, SDValue &Offset) const { return SelectFlatOffsetImpl(N, Addr, VAddr, Offset, SIInstrFlags::FlatScratch); } // If this matches zero_extend i32:x, return x static SDValue matchZExtFromI32(SDValue Op) { if (Op.getOpcode() != ISD::ZERO_EXTEND) return SDValue(); SDValue ExtSrc = Op.getOperand(0); return (ExtSrc.getValueType() == MVT::i32) ? ExtSrc : SDValue(); } // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset) bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N, SDValue Addr, SDValue &SAddr, SDValue &VOffset, SDValue &Offset) const { int64_t ImmOffset = 0; // Match the immediate offset first, which canonically is moved as low as // possible. SDValue LHS, RHS; if (isBaseWithConstantOffset64(Addr, LHS, RHS)) { int64_t COffsetVal = cast(RHS)->getSExtValue(); const SIInstrInfo *TII = Subtarget->getInstrInfo(); if (TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal)) { Addr = LHS; ImmOffset = COffsetVal; } else if (!LHS->isDivergent()) { if (COffsetVal > 0) { SDLoc SL(N); // saddr + large_offset -> saddr + // (voffset = large_offset & ~MaxOffset) + // (large_offset & MaxOffset); int64_t SplitImmOffset, RemainderOffset; std::tie(SplitImmOffset, RemainderOffset) = TII->splitFlatOffset( COffsetVal, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal); if (isUInt<32>(RemainderOffset)) { SDNode *VMov = CurDAG->getMachineNode( AMDGPU::V_MOV_B32_e32, SL, MVT::i32, CurDAG->getTargetConstant(RemainderOffset, SDLoc(), MVT::i32)); VOffset = SDValue(VMov, 0); SAddr = LHS; Offset = CurDAG->getTargetConstant(SplitImmOffset, SDLoc(), MVT::i16); return true; } } // We are adding a 64 bit SGPR and a constant. If constant bus limit // is 1 we would need to perform 1 or 2 extra moves for each half of // the constant and it is better to do a scalar add and then issue a // single VALU instruction to materialize zero. Otherwise it is less // instructions to perform VALU adds with immediates or inline literals. unsigned NumLiterals = !TII->isInlineConstant(APInt(32, COffsetVal & 0xffffffff)) + !TII->isInlineConstant(APInt(32, COffsetVal >> 32)); if (Subtarget->getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals) return false; } } // Match the variable offset. if (Addr.getOpcode() == ISD::ADD) { LHS = Addr.getOperand(0); RHS = Addr.getOperand(1); if (!LHS->isDivergent()) { // add (i64 sgpr), (zero_extend (i32 vgpr)) if (SDValue ZextRHS = matchZExtFromI32(RHS)) { SAddr = LHS; VOffset = ZextRHS; } } if (!SAddr && !RHS->isDivergent()) { // add (zero_extend (i32 vgpr)), (i64 sgpr) if (SDValue ZextLHS = matchZExtFromI32(LHS)) { SAddr = RHS; VOffset = ZextLHS; } } if (SAddr) { Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16); return true; } } if (Addr->isDivergent() || Addr.getOpcode() == ISD::UNDEF || isa(Addr)) return false; // It's cheaper to materialize a single 32-bit zero for vaddr than the two // moves required to copy a 64-bit SGPR to VGPR. SAddr = Addr; SDNode *VMov = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, SDLoc(Addr), MVT::i32, CurDAG->getTargetConstant(0, SDLoc(), MVT::i32)); VOffset = SDValue(VMov, 0); Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16); return true; } static SDValue SelectSAddrFI(SelectionDAG *CurDAG, SDValue SAddr) { if (auto FI = dyn_cast(SAddr)) { SAddr = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0)); } else if (SAddr.getOpcode() == ISD::ADD && isa(SAddr.getOperand(0))) { // Materialize this into a scalar move for scalar address to avoid // readfirstlane. auto FI = cast(SAddr.getOperand(0)); SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0)); SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_I32, SDLoc(SAddr), MVT::i32, TFI, SAddr.getOperand(1)), 0); } return SAddr; } // Match (32-bit SGPR base) + sext(imm offset) bool AMDGPUDAGToDAGISel::SelectScratchSAddr(SDNode *Parent, SDValue Addr, SDValue &SAddr, SDValue &Offset) const { if (Addr->isDivergent()) return false; SDLoc DL(Addr); int64_t COffsetVal = 0; if (CurDAG->isBaseWithConstantOffset(Addr)) { COffsetVal = cast(Addr.getOperand(1))->getSExtValue(); SAddr = Addr.getOperand(0); } else { SAddr = Addr; } SAddr = SelectSAddrFI(CurDAG, SAddr); const SIInstrInfo *TII = Subtarget->getInstrInfo(); if (!TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch)) { int64_t SplitImmOffset, RemainderOffset; std::tie(SplitImmOffset, RemainderOffset) = TII->splitFlatOffset( COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, SIInstrFlags::FlatScratch); COffsetVal = SplitImmOffset; SDValue AddOffset = SAddr.getOpcode() == ISD::TargetFrameIndex ? getMaterializedScalarImm32(Lo_32(RemainderOffset), DL) : CurDAG->getTargetConstant(RemainderOffset, DL, MVT::i32); SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_I32, DL, MVT::i32, SAddr, AddOffset), 0); } Offset = CurDAG->getTargetConstant(COffsetVal, DL, MVT::i16); return true; } bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset, bool &Imm) const { ConstantSDNode *C = dyn_cast(ByteOffsetNode); if (!C) { if (ByteOffsetNode.getValueType().isScalarInteger() && ByteOffsetNode.getValueType().getSizeInBits() == 32) { Offset = ByteOffsetNode; Imm = false; return true; } if (ByteOffsetNode.getOpcode() == ISD::ZERO_EXTEND) { if (ByteOffsetNode.getOperand(0).getValueType().getSizeInBits() == 32) { Offset = ByteOffsetNode.getOperand(0); Imm = false; return true; } } return false; } SDLoc SL(ByteOffsetNode); // GFX9 and GFX10 have signed byte immediate offsets. int64_t ByteOffset = C->getSExtValue(); Optional EncodedOffset = AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, false); if (EncodedOffset) { Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32); Imm = true; return true; } // SGPR and literal offsets are unsigned. if (ByteOffset < 0) return false; EncodedOffset = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, ByteOffset); if (EncodedOffset) { Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32); return true; } if (!isUInt<32>(ByteOffset) && !isInt<32>(ByteOffset)) return false; SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32); Offset = SDValue( CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, C32Bit), 0); return true; } SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const { if (Addr.getValueType() != MVT::i32) return Addr; // Zero-extend a 32-bit address. SDLoc SL(Addr); const MachineFunction &MF = CurDAG->getMachineFunction(); const SIMachineFunctionInfo *Info = MF.getInfo(); unsigned AddrHiVal = Info->get32BitAddressHighBits(); SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32); const SDValue Ops[] = { CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32), Addr, CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32), SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi), 0), CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32), }; return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64, Ops), 0); } bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset, bool &Imm) const { SDLoc SL(Addr); // A 32-bit (address + offset) should not cause unsigned 32-bit integer // wraparound, because s_load instructions perform the addition in 64 bits. if ((Addr.getValueType() != MVT::i32 || Addr->getFlags().hasNoUnsignedWrap())) { SDValue N0, N1; // Extract the base and offset if possible. if (CurDAG->isBaseWithConstantOffset(Addr) || Addr.getOpcode() == ISD::ADD) { N0 = Addr.getOperand(0); N1 = Addr.getOperand(1); } else if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, N0, N1)) { assert(N0 && N1 && isa(N1)); } if (N0 && N1) { if (SelectSMRDOffset(N1, Offset, Imm)) { SBase = Expand32BitAddress(N0); return true; } } } SBase = Expand32BitAddress(Addr); Offset = CurDAG->getTargetConstant(0, SL, MVT::i32); Imm = true; return true; } bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const { bool Imm = false; return SelectSMRD(Addr, SBase, Offset, Imm) && Imm; } bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const { assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); bool Imm = false; if (!SelectSMRD(Addr, SBase, Offset, Imm)) return false; return !Imm && isa(Offset); } bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const { bool Imm = false; return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm && !isa(Offset); } bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const { if (ConstantSDNode *C = dyn_cast(Addr)) { // The immediate offset for S_BUFFER instructions is unsigned. if (auto Imm = AMDGPU::getSMRDEncodedOffset(*Subtarget, C->getZExtValue(), true)) { Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32); return true; } } return false; } bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const { assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); if (ConstantSDNode *C = dyn_cast(Addr)) { if (auto Imm = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, C->getZExtValue())) { Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32); return true; } } return false; } bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const { SDLoc DL(Index); if (CurDAG->isBaseWithConstantOffset(Index)) { SDValue N0 = Index.getOperand(0); SDValue N1 = Index.getOperand(1); ConstantSDNode *C1 = cast(N1); // (add n0, c0) // Don't peel off the offset (c0) if doing so could possibly lead // the base (n0) to be negative. // (or n0, |c0|) can never change a sign given isBaseWithConstantOffset. if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0) || (Index->getOpcode() == ISD::OR && C1->getSExtValue() >= 0)) { Base = N0; Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32); return true; } } if (isa(Index)) return false; Base = Index; Offset = CurDAG->getTargetConstant(0, DL, MVT::i32); return true; } SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val, uint32_t Offset, uint32_t Width) { // Transformation function, pack the offset and width of a BFE into // the format expected by the S_BFE_I32 / S_BFE_U32. In the second // source, bits [5:0] contain the offset and bits [22:16] the width. uint32_t PackedVal = Offset | (Width << 16); SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32); return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst); } void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) { // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c) // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c) // Predicate: 0 < b <= c < 32 const SDValue &Shl = N->getOperand(0); ConstantSDNode *B = dyn_cast(Shl->getOperand(1)); ConstantSDNode *C = dyn_cast(N->getOperand(1)); if (B && C) { uint32_t BVal = B->getZExtValue(); uint32_t CVal = C->getZExtValue(); if (0 < BVal && BVal <= CVal && CVal < 32) { bool Signed = N->getOpcode() == ISD::SRA; unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal, 32 - CVal)); return; } } SelectCode(N); } void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) { switch (N->getOpcode()) { case ISD::AND: if (N->getOperand(0).getOpcode() == ISD::SRL) { // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)" // Predicate: isMask(mask) const SDValue &Srl = N->getOperand(0); ConstantSDNode *Shift = dyn_cast(Srl.getOperand(1)); ConstantSDNode *Mask = dyn_cast(N->getOperand(1)); if (Shift && Mask) { uint32_t ShiftVal = Shift->getZExtValue(); uint32_t MaskVal = Mask->getZExtValue(); if (isMask_32(MaskVal)) { uint32_t WidthVal = countPopulation(MaskVal); ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), Srl.getOperand(0), ShiftVal, WidthVal)); return; } } } break; case ISD::SRL: if (N->getOperand(0).getOpcode() == ISD::AND) { // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)" // Predicate: isMask(mask >> b) const SDValue &And = N->getOperand(0); ConstantSDNode *Shift = dyn_cast(N->getOperand(1)); ConstantSDNode *Mask = dyn_cast(And->getOperand(1)); if (Shift && Mask) { uint32_t ShiftVal = Shift->getZExtValue(); uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal; if (isMask_32(MaskVal)) { uint32_t WidthVal = countPopulation(MaskVal); ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), And.getOperand(0), ShiftVal, WidthVal)); return; } } } else if (N->getOperand(0).getOpcode() == ISD::SHL) { SelectS_BFEFromShifts(N); return; } break; case ISD::SRA: if (N->getOperand(0).getOpcode() == ISD::SHL) { SelectS_BFEFromShifts(N); return; } break; case ISD::SIGN_EXTEND_INREG: { // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8 SDValue Src = N->getOperand(0); if (Src.getOpcode() != ISD::SRL) break; const ConstantSDNode *Amt = dyn_cast(Src.getOperand(1)); if (!Amt) break; unsigned Width = cast(N->getOperand(1))->getVT().getSizeInBits(); ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0), Amt->getZExtValue(), Width)); return; } } SelectCode(N); } bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const { assert(N->getOpcode() == ISD::BRCOND); if (!N->hasOneUse()) return false; SDValue Cond = N->getOperand(1); if (Cond.getOpcode() == ISD::CopyToReg) Cond = Cond.getOperand(2); if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse()) return false; MVT VT = Cond.getOperand(0).getSimpleValueType(); if (VT == MVT::i32) return true; if (VT == MVT::i64) { auto ST = static_cast(Subtarget); ISD::CondCode CC = cast(Cond.getOperand(2))->get(); return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64(); } return false; } void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) { SDValue Cond = N->getOperand(1); if (Cond.isUndef()) { CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other, N->getOperand(2), N->getOperand(0)); return; } const GCNSubtarget *ST = static_cast(Subtarget); const SIRegisterInfo *TRI = ST->getRegisterInfo(); bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N); unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ; Register CondReg = UseSCCBr ? AMDGPU::SCC : TRI->getVCC(); SDLoc SL(N); if (!UseSCCBr) { // This is the case that we are selecting to S_CBRANCH_VCCNZ. We have not // analyzed what generates the vcc value, so we do not know whether vcc // bits for disabled lanes are 0. Thus we need to mask out bits for // disabled lanes. // // For the case that we select S_CBRANCH_SCC1 and it gets // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls // SIInstrInfo::moveToVALU which inserts the S_AND). // // We could add an analysis of what generates the vcc value here and omit // the S_AND when is unnecessary. But it would be better to add a separate // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it // catches both cases. Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64, SL, MVT::i1, CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC, MVT::i1), Cond), 0); } SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond); CurDAG->SelectNodeTo(N, BrOp, MVT::Other, N->getOperand(2), // Basic Block VCC.getValue(0)); } void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) { MVT VT = N->getSimpleValueType(0); bool IsFMA = N->getOpcode() == ISD::FMA; if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() && !Subtarget->hasFmaMixInsts()) || ((IsFMA && Subtarget->hasMadMixInsts()) || (!IsFMA && Subtarget->hasFmaMixInsts()))) { SelectCode(N); return; } SDValue Src0 = N->getOperand(0); SDValue Src1 = N->getOperand(1); SDValue Src2 = N->getOperand(2); unsigned Src0Mods, Src1Mods, Src2Mods; // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand // using the conversion from f16. bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods); bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods); bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods); assert((IsFMA || !Mode.allFP32Denormals()) && "fmad selected with denormals enabled"); // TODO: We can select this with f32 denormals enabled if all the sources are // converted from f16 (in which case fmad isn't legal). if (Sel0 || Sel1 || Sel2) { // For dummy operands. SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32); SDValue Ops[] = { CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0, CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1, CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2, CurDAG->getTargetConstant(0, SDLoc(), MVT::i1), Zero, Zero }; CurDAG->SelectNodeTo(N, IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32, MVT::f32, Ops); } else { SelectCode(N); } } // This is here because there isn't a way to use the generated sub0_sub1 as the // subreg index to EXTRACT_SUBREG in tablegen. void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) { MemSDNode *Mem = cast(N); unsigned AS = Mem->getAddressSpace(); if (AS == AMDGPUAS::FLAT_ADDRESS) { SelectCode(N); return; } MVT VT = N->getSimpleValueType(0); bool Is32 = (VT == MVT::i32); SDLoc SL(N); MachineSDNode *CmpSwap = nullptr; if (Subtarget->hasAddr64()) { SDValue SRsrc, VAddr, SOffset, Offset; if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset)) { unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN : AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN; SDValue CmpVal = Mem->getOperand(2); SDValue CPol = CurDAG->getTargetConstant(AMDGPU::CPol::GLC, SL, MVT::i32); // XXX - Do we care about glue operands? SDValue Ops[] = {CmpVal, VAddr, SRsrc, SOffset, Offset, CPol, Mem->getChain()}; CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops); } } if (!CmpSwap) { SDValue SRsrc, SOffset, Offset; if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset)) { unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN : AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN; SDValue CmpVal = Mem->getOperand(2); SDValue CPol = CurDAG->getTargetConstant(AMDGPU::CPol::GLC, SL, MVT::i32); SDValue Ops[] = {CmpVal, SRsrc, SOffset, Offset, CPol, Mem->getChain()}; CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops); } } if (!CmpSwap) { SelectCode(N); return; } MachineMemOperand *MMO = Mem->getMemOperand(); CurDAG->setNodeMemRefs(CmpSwap, {MMO}); unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1; SDValue Extract = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0)); ReplaceUses(SDValue(N, 0), Extract); ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1)); CurDAG->RemoveDeadNode(N); } void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) { // The address is assumed to be uniform, so if it ends up in a VGPR, it will // be copied to an SGPR with readfirstlane. unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; SDValue Chain = N->getOperand(0); SDValue Ptr = N->getOperand(2); MemIntrinsicSDNode *M = cast(N); MachineMemOperand *MMO = M->getMemOperand(); bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS; SDValue Offset; if (CurDAG->isBaseWithConstantOffset(Ptr)) { SDValue PtrBase = Ptr.getOperand(0); SDValue PtrOffset = Ptr.getOperand(1); const APInt &OffsetVal = cast(PtrOffset)->getAPIntValue(); if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue())) { N = glueCopyToM0(N, PtrBase); Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32); } } if (!Offset) { N = glueCopyToM0(N, Ptr); Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32); } SDValue Ops[] = { Offset, CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32), Chain, N->getOperand(N->getNumOperands() - 1) // New glue }; SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); CurDAG->setNodeMemRefs(cast(Selected), {MMO}); } static unsigned gwsIntrinToOpcode(unsigned IntrID) { switch (IntrID) { case Intrinsic::amdgcn_ds_gws_init: return AMDGPU::DS_GWS_INIT; case Intrinsic::amdgcn_ds_gws_barrier: return AMDGPU::DS_GWS_BARRIER; case Intrinsic::amdgcn_ds_gws_sema_v: return AMDGPU::DS_GWS_SEMA_V; case Intrinsic::amdgcn_ds_gws_sema_br: return AMDGPU::DS_GWS_SEMA_BR; case Intrinsic::amdgcn_ds_gws_sema_p: return AMDGPU::DS_GWS_SEMA_P; case Intrinsic::amdgcn_ds_gws_sema_release_all: return AMDGPU::DS_GWS_SEMA_RELEASE_ALL; default: llvm_unreachable("not a gws intrinsic"); } } void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) { if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all && !Subtarget->hasGWSSemaReleaseAll()) { // Let this error. SelectCode(N); return; } // Chain, intrinsic ID, vsrc, offset const bool HasVSrc = N->getNumOperands() == 4; assert(HasVSrc || N->getNumOperands() == 3); SDLoc SL(N); SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2); int ImmOffset = 0; MemIntrinsicSDNode *M = cast(N); MachineMemOperand *MMO = M->getMemOperand(); // Don't worry if the offset ends up in a VGPR. Only one lane will have // effect, so SIFixSGPRCopies will validly insert readfirstlane. // The resource id offset is computed as ( + M0[21:16] + // offset field) % 64. Some versions of the programming guide omit the m0 // part, or claim it's from offset 0. if (ConstantSDNode *ConstOffset = dyn_cast(BaseOffset)) { // If we have a constant offset, try to use the 0 in m0 as the base. // TODO: Look into changing the default m0 initialization value. If the // default -1 only set the low 16-bits, we could leave it as-is and add 1 to // the immediate offset. glueCopyToM0(N, CurDAG->getTargetConstant(0, SL, MVT::i32)); ImmOffset = ConstOffset->getZExtValue(); } else { if (CurDAG->isBaseWithConstantOffset(BaseOffset)) { ImmOffset = BaseOffset.getConstantOperandVal(1); BaseOffset = BaseOffset.getOperand(0); } // Prefer to do the shift in an SGPR since it should be possible to use m0 // as the result directly. If it's already an SGPR, it will be eliminated // later. SDNode *SGPROffset = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32, BaseOffset); // Shift to offset in m0 SDNode *M0Base = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32, SDValue(SGPROffset, 0), CurDAG->getTargetConstant(16, SL, MVT::i32)); glueCopyToM0(N, SDValue(M0Base, 0)); } SDValue Chain = N->getOperand(0); SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32); const unsigned Opc = gwsIntrinToOpcode(IntrID); SmallVector Ops; if (HasVSrc) Ops.push_back(N->getOperand(2)); Ops.push_back(OffsetField); Ops.push_back(Chain); SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops); CurDAG->setNodeMemRefs(cast(Selected), {MMO}); } void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) { if (Subtarget->getLDSBankCount() != 16) { // This is a single instruction with a pattern. SelectCode(N); return; } SDLoc DL(N); // This requires 2 instructions. It is possible to write a pattern to support // this, but the generated isel emitter doesn't correctly deal with multiple // output instructions using the same physical register input. The copy to m0 // is incorrectly placed before the second instruction. // // TODO: Match source modifiers. // // def : Pat < // (int_amdgcn_interp_p1_f16 // (VOP3Mods f32:$src0, i32:$src0_modifiers), // (i32 timm:$attrchan), (i32 timm:$attr), // (i1 timm:$high), M0), // (V_INTERP_P1LV_F16 $src0_modifiers, VGPR_32:$src0, timm:$attr, // timm:$attrchan, 0, // (V_INTERP_MOV_F32 2, timm:$attr, timm:$attrchan), timm:$high)> { // let Predicates = [has16BankLDS]; // } // 16 bank LDS SDValue ToM0 = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AMDGPU::M0, N->getOperand(5), SDValue()); SDVTList VTs = CurDAG->getVTList(MVT::f32, MVT::Other); SDNode *InterpMov = CurDAG->getMachineNode(AMDGPU::V_INTERP_MOV_F32, DL, VTs, { CurDAG->getTargetConstant(2, DL, MVT::i32), // P0 N->getOperand(3), // Attr N->getOperand(2), // Attrchan ToM0.getValue(1) // In glue }); SDNode *InterpP1LV = CurDAG->getMachineNode(AMDGPU::V_INTERP_P1LV_F16, DL, MVT::f32, { CurDAG->getTargetConstant(0, DL, MVT::i32), // $src0_modifiers N->getOperand(1), // Src0 N->getOperand(3), // Attr N->getOperand(2), // Attrchan CurDAG->getTargetConstant(0, DL, MVT::i32), // $src2_modifiers SDValue(InterpMov, 0), // Src2 - holds two f16 values selected by high N->getOperand(4), // high CurDAG->getTargetConstant(0, DL, MVT::i1), // $clamp CurDAG->getTargetConstant(0, DL, MVT::i32), // $omod SDValue(InterpMov, 1) }); CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), SDValue(InterpP1LV, 0)); } void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) { unsigned IntrID = cast(N->getOperand(1))->getZExtValue(); switch (IntrID) { case Intrinsic::amdgcn_ds_append: case Intrinsic::amdgcn_ds_consume: { if (N->getValueType(0) != MVT::i32) break; SelectDSAppendConsume(N, IntrID); return; } } SelectCode(N); } void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) { unsigned IntrID = cast(N->getOperand(0))->getZExtValue(); unsigned Opcode; switch (IntrID) { case Intrinsic::amdgcn_wqm: Opcode = AMDGPU::WQM; break; case Intrinsic::amdgcn_softwqm: Opcode = AMDGPU::SOFT_WQM; break; case Intrinsic::amdgcn_wwm: case Intrinsic::amdgcn_strict_wwm: Opcode = AMDGPU::STRICT_WWM; break; case Intrinsic::amdgcn_strict_wqm: Opcode = AMDGPU::STRICT_WQM; break; case Intrinsic::amdgcn_interp_p1_f16: SelectInterpP1F16(N); return; default: SelectCode(N); return; } SDValue Src = N->getOperand(1); CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src}); } void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) { unsigned IntrID = cast(N->getOperand(1))->getZExtValue(); switch (IntrID) { case Intrinsic::amdgcn_ds_gws_init: case Intrinsic::amdgcn_ds_gws_barrier: case Intrinsic::amdgcn_ds_gws_sema_v: case Intrinsic::amdgcn_ds_gws_sema_br: case Intrinsic::amdgcn_ds_gws_sema_p: case Intrinsic::amdgcn_ds_gws_sema_release_all: SelectDS_GWS(N, IntrID); return; default: break; } SelectCode(N); } bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &Mods, bool AllowAbs) const { Mods = 0; Src = In; if (Src.getOpcode() == ISD::FNEG) { Mods |= SISrcMods::NEG; Src = Src.getOperand(0); } if (AllowAbs && Src.getOpcode() == ISD::FABS) { Mods |= SISrcMods::ABS; Src = Src.getOperand(0); } return true; } bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const { unsigned Mods; if (SelectVOP3ModsImpl(In, Src, Mods)) { SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); return true; } return false; } bool AMDGPUDAGToDAGISel::SelectVOP3BMods(SDValue In, SDValue &Src, SDValue &SrcMods) const { unsigned Mods; if (SelectVOP3ModsImpl(In, Src, Mods, /* AllowAbs */ false)) { SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); return true; } return false; } bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const { SelectVOP3Mods(In, Src, SrcMods); return isNoNanSrc(Src); } bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const { if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG) return false; Src = In; return true; } bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods, SDValue &Clamp, SDValue &Omod) const { SDLoc DL(In); Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1); Omod = CurDAG->getTargetConstant(0, DL, MVT::i1); return SelectVOP3Mods(In, Src, SrcMods); } bool AMDGPUDAGToDAGISel::SelectVOP3BMods0(SDValue In, SDValue &Src, SDValue &SrcMods, SDValue &Clamp, SDValue &Omod) const { SDLoc DL(In); Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1); Omod = CurDAG->getTargetConstant(0, DL, MVT::i1); return SelectVOP3BMods(In, Src, SrcMods); } bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src, SDValue &Clamp, SDValue &Omod) const { Src = In; SDLoc DL(In); Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1); Omod = CurDAG->getTargetConstant(0, DL, MVT::i1); return true; } bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const { unsigned Mods = 0; Src = In; if (Src.getOpcode() == ISD::FNEG) { Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); Src = Src.getOperand(0); } if (Src.getOpcode() == ISD::BUILD_VECTOR) { unsigned VecMods = Mods; SDValue Lo = stripBitcast(Src.getOperand(0)); SDValue Hi = stripBitcast(Src.getOperand(1)); if (Lo.getOpcode() == ISD::FNEG) { Lo = stripBitcast(Lo.getOperand(0)); Mods ^= SISrcMods::NEG; } if (Hi.getOpcode() == ISD::FNEG) { Hi = stripBitcast(Hi.getOperand(0)); Mods ^= SISrcMods::NEG_HI; } if (isExtractHiElt(Lo, Lo)) Mods |= SISrcMods::OP_SEL_0; if (isExtractHiElt(Hi, Hi)) Mods |= SISrcMods::OP_SEL_1; unsigned VecSize = Src.getValueSizeInBits(); Lo = stripExtractLoElt(Lo); Hi = stripExtractLoElt(Hi); if (Lo.getValueSizeInBits() > VecSize) { Lo = CurDAG->getTargetExtractSubreg( (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0, SDLoc(In), MVT::getIntegerVT(VecSize), Lo); } if (Hi.getValueSizeInBits() > VecSize) { Hi = CurDAG->getTargetExtractSubreg( (VecSize > 32) ? AMDGPU::sub0_sub1 : AMDGPU::sub0, SDLoc(In), MVT::getIntegerVT(VecSize), Hi); } assert(Lo.getValueSizeInBits() <= VecSize && Hi.getValueSizeInBits() <= VecSize); if (Lo == Hi && !isInlineImmediate(Lo.getNode())) { // Really a scalar input. Just select from the low half of the register to // avoid packing. if (VecSize == 32 || VecSize == Lo.getValueSizeInBits()) { Src = Lo; } else { assert(Lo.getValueSizeInBits() == 32 && VecSize == 64); SDLoc SL(In); SDValue Undef = SDValue( CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, SL, Lo.getValueType()), 0); auto RC = Lo->isDivergent() ? AMDGPU::VReg_64RegClassID : AMDGPU::SReg_64RegClassID; const SDValue Ops[] = { CurDAG->getTargetConstant(RC, SL, MVT::i32), Lo, CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32), Undef, CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32) }; Src = SDValue(CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SL, Src.getValueType(), Ops), 0); } SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); return true; } if (VecSize == 64 && Lo == Hi && isa(Lo)) { uint64_t Lit = cast(Lo)->getValueAPF() .bitcastToAPInt().getZExtValue(); if (AMDGPU::isInlinableLiteral32(Lit, Subtarget->hasInv2PiInlineImm())) { Src = CurDAG->getTargetConstant(Lit, SDLoc(In), MVT::i64);; SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); return true; } } Mods = VecMods; } // Packed instructions do not have abs modifiers. Mods |= SISrcMods::OP_SEL_1; SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); return true; } bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const { Src = In; // FIXME: Handle op_sel SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32); return true; } bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const { // FIXME: Handle op_sel return SelectVOP3Mods(In, Src, SrcMods); } // The return value is not whether the match is possible (which it always is), // but whether or not it a conversion is really used. bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const { Mods = 0; SelectVOP3ModsImpl(In, Src, Mods); if (Src.getOpcode() == ISD::FP_EXTEND) { Src = Src.getOperand(0); assert(Src.getValueType() == MVT::f16); Src = stripBitcast(Src); // Be careful about folding modifiers if we already have an abs. fneg is // applied last, so we don't want to apply an earlier fneg. if ((Mods & SISrcMods::ABS) == 0) { unsigned ModsTmp; SelectVOP3ModsImpl(Src, Src, ModsTmp); if ((ModsTmp & SISrcMods::NEG) != 0) Mods ^= SISrcMods::NEG; if ((ModsTmp & SISrcMods::ABS) != 0) Mods |= SISrcMods::ABS; } // op_sel/op_sel_hi decide the source type and source. // If the source's op_sel_hi is set, it indicates to do a conversion from fp16. // If the sources's op_sel is set, it picks the high half of the source // register. Mods |= SISrcMods::OP_SEL_1; if (isExtractHiElt(Src, Src)) { Mods |= SISrcMods::OP_SEL_0; // TODO: Should we try to look for neg/abs here? } return true; } return false; } bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const { unsigned Mods = 0; SelectVOP3PMadMixModsImpl(In, Src, Mods); SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); return true; } SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const { if (In.isUndef()) return CurDAG->getUNDEF(MVT::i32); if (ConstantSDNode *C = dyn_cast(In)) { SDLoc SL(In); return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32); } if (ConstantFPSDNode *C = dyn_cast(In)) { SDLoc SL(In); return CurDAG->getConstant( C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32); } SDValue Src; if (isExtractHiElt(In, Src)) return Src; return SDValue(); } bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const { assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn); const SIRegisterInfo *SIRI = static_cast(Subtarget->getRegisterInfo()); const SIInstrInfo * SII = static_cast(Subtarget->getInstrInfo()); unsigned Limit = 0; bool AllUsesAcceptSReg = true; for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end(); Limit < 10 && U != E; ++U, ++Limit) { const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo()); // If the register class is unknown, it could be an unknown // register class that needs to be an SGPR, e.g. an inline asm // constraint if (!RC || SIRI->isSGPRClass(RC)) return false; if (RC != &AMDGPU::VS_32RegClass) { AllUsesAcceptSReg = false; SDNode * User = *U; if (User->isMachineOpcode()) { unsigned Opc = User->getMachineOpcode(); MCInstrDesc Desc = SII->get(Opc); if (Desc.isCommutable()) { unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo(); unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex; if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) { unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs(); const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo); if (CommutedRC == &AMDGPU::VS_32RegClass) AllUsesAcceptSReg = true; } } } // If "AllUsesAcceptSReg == false" so far we haven't suceeded // commuting current user. This means have at least one use // that strictly require VGPR. Thus, we will not attempt to commute // other user instructions. if (!AllUsesAcceptSReg) break; } } return !AllUsesAcceptSReg && (Limit < 10); } bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const { auto Ld = cast(N); return Ld->getAlignment() >= 4 && ( ( ( Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT ) && !N->isDivergent() ) || ( Subtarget->getScalarizeGlobalBehavior() && Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && Ld->isSimple() && !N->isDivergent() && static_cast( getTargetLowering())->isMemOpHasNoClobberedMemOperand(N) ) ); } void AMDGPUDAGToDAGISel::PostprocessISelDAG() { const AMDGPUTargetLowering& Lowering = *static_cast(getTargetLowering()); bool IsModified = false; do { IsModified = false; // Go over all selected nodes and try to fold them a bit more SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin(); while (Position != CurDAG->allnodes_end()) { SDNode *Node = &*Position++; MachineSDNode *MachineNode = dyn_cast(Node); if (!MachineNode) continue; SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG); if (ResNode != Node) { if (ResNode) ReplaceUses(Node, ResNode); IsModified = true; } } CurDAG->RemoveDeadNodes(); } while (IsModified); } bool R600DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { Subtarget = &MF.getSubtarget(); return SelectionDAGISel::runOnMachineFunction(MF); } bool R600DAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const { if (!N->readMem()) return false; if (CbId == -1) return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS || N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT; return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId; } bool R600DAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr) { if (ConstantSDNode *Cst = dyn_cast(Addr)) { IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr), true); return true; } return false; } bool R600DAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr, SDValue& BaseReg, SDValue &Offset) { if (!isa(Addr)) { BaseReg = Addr; Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true); return true; } return false; } void R600DAGToDAGISel::Select(SDNode *N) { unsigned int Opc = N->getOpcode(); if (N->isMachineOpcode()) { N->setNodeId(-1); return; // Already selected. } switch (Opc) { default: break; case AMDGPUISD::BUILD_VERTICAL_VECTOR: case ISD::SCALAR_TO_VECTOR: case ISD::BUILD_VECTOR: { EVT VT = N->getValueType(0); unsigned NumVectorElts = VT.getVectorNumElements(); unsigned RegClassID; // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG // that adds a 128 bits reg copy when going through TwoAddressInstructions // pass. We want to avoid 128 bits copies as much as possible because they // can't be bundled by our scheduler. switch(NumVectorElts) { case 2: RegClassID = R600::R600_Reg64RegClassID; break; case 4: if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR) RegClassID = R600::R600_Reg128VerticalRegClassID; else RegClassID = R600::R600_Reg128RegClassID; break; default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR"); } SelectBuildVector(N, RegClassID); return; } } SelectCode(N); } bool R600DAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset) { ConstantSDNode *C; SDLoc DL(Addr); if ((C = dyn_cast(Addr))) { Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) && (C = dyn_cast(Addr.getOperand(0)))) { Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32); Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) && (C = dyn_cast(Addr.getOperand(1)))) { Base = Addr.getOperand(0); Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32); } else { Base = Addr; Offset = CurDAG->getTargetConstant(0, DL, MVT::i32); } return true; } bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset) { ConstantSDNode *IMMOffset; if (Addr.getOpcode() == ISD::ADD && (IMMOffset = dyn_cast(Addr.getOperand(1))) && isInt<16>(IMMOffset->getZExtValue())) { Base = Addr.getOperand(0); Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr), MVT::i32); return true; // If the pointer address is constant, we can move it to the offset field. } else if ((IMMOffset = dyn_cast(Addr)) && isInt<16>(IMMOffset->getZExtValue())) { Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), SDLoc(CurDAG->getEntryNode()), R600::ZERO, MVT::i32); Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr), MVT::i32); return true; } // Default case, no offset Base = Addr; Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32); return true; }