Skip to content

Conversation

@AZero13
Copy link
Contributor

@AZero13 AZero13 commented Oct 13, 2025

Map ARMISD::CMN to tCMN instead of armcmpz.

Rename the cmn instructions to match this new reality.

@llvmbot
Copy link
Member

llvmbot commented Oct 13, 2025

@llvm/pr-subscribers-backend-arm

Author: AZero13 (AZero13)

Changes

Map ARMISD::CMN to tCMN instead of armcmpz

Also, do the proper checks in ISel.


Full diff: https://github.com/llvm/llvm-project/pull/163223.diff

9 Files Affected:

  • (modified) llvm/lib/Target/ARM/ARMFeatures.h (+1-1)
  • (modified) llvm/lib/Target/ARM/ARMISelLowering.cpp (+35)
  • (modified) llvm/lib/Target/ARM/ARMInstrInfo.td (+1-1)
  • (modified) llvm/lib/Target/ARM/ARMInstrThumb.td (+6-12)
  • (modified) llvm/lib/Target/ARM/ARMInstrThumb2.td (+16-7)
  • (modified) llvm/lib/Target/ARM/ARMLatencyMutations.cpp (+1-1)
  • (modified) llvm/lib/Target/ARM/ARMScheduleM55.td (+2-2)
  • (modified) llvm/lib/Target/ARM/Thumb2SizeReduction.cpp (+1-3)
  • (modified) llvm/test/MC/ARM/thumb-shift-encoding.s (+1-1)
diff --git a/llvm/lib/Target/ARM/ARMFeatures.h b/llvm/lib/Target/ARM/ARMFeatures.h index 99e0ef05b5e21..eeb67abe27512 100644 --- a/llvm/lib/Target/ARM/ARMFeatures.h +++ b/llvm/lib/Target/ARM/ARMFeatures.h @@ -51,7 +51,7 @@ inline bool isV8EligibleForIT(const InstrType *Instr) { // Outside of an IT block, these set CPSR. return IsCPSRDead(Instr); case ARM::tADDrSPi: - case ARM::tCMNz: + case ARM::tCMN: case ARM::tCMPi8: case ARM::tCMPr: case ARM::tLDRBi: diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 67ea2dd3df792..8a5ae545252f9 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -4678,6 +4678,29 @@ static bool isFloatingPointZero(SDValue Op) { return false; } +static bool isSafeSignedCMN(SDValue Op, SelectionDAG &DAG) { + // 0 - INT_MIN sign wraps, so no signed wrap means cmn is safe. + if (Op->getFlags().hasNoSignedWrap()) + return true; + + // We can still figure out if the second operand is safe to use + // in a CMN instruction by checking if it is known to be not the minimum + // signed value. If it is not, then we can safely use CMN. + // Note: We can eventually remove this check and simply rely on + // Op->getFlags().hasNoSignedWrap() once SelectionDAG/ISelLowering + // consistently sets them appropriately when making said nodes. + + KnownBits KnownSrc = DAG.computeKnownBits(Op.getOperand(1)); + return !KnownSrc.getSignedMinValue().isMinSignedValue(); +} + +static bool isCMN(SDValue Op, ISD::CondCode CC, SelectionDAG &DAG) { + return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) && + (isIntEqualitySetCC(CC) || + (isUnsignedIntSetCC(CC) && DAG.isKnownNeverZero(Op.getOperand(1))) || + (isSignedIntSetCC(CC) && isSafeSignedCMN(Op, DAG))); +} + /// Returns appropriate ARM CMP (cmp) and corresponding condition code for /// the given operands. SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, @@ -4811,6 +4834,18 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, CompareType = ARMISD::CMPZ; break; } + + // TODO: Remove CMPZ check once we generalize and remove the CMPZ enum from + // the codebase. + if (CompareType != ARMISD::CMPZ && isCMN(RHS, CC, DAG)) { + CompareType = ARMISD::CMN; + RHS = RHS.getOperand(1); + } else if (CompareType != ARMISD::CMPZ && isCMN(LHS, CC, DAG)) { + CompareType = ARMISD::CMN; + LHS = LHS.getOperand(1); + CondCode = IntCCToARMCC(ISD::getSetCCSwappedOperands(CC)); + } + ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); return DAG.getNode(CompareType, dl, FlagsVT, LHS, RHS); } diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td index 282ff534fc112..a3b8042b3c2f7 100644 --- a/llvm/lib/Target/ARM/ARMInstrInfo.td +++ b/llvm/lib/Target/ARM/ARMInstrInfo.td @@ -206,7 +206,7 @@ def ARMBcci64 : SDNode<"ARMISD::BCC_i64", SDT_ARMBCC_i64, def ARMcmp : SDNode<"ARMISD::CMP", SDT_ARMCmp>; -def ARMcmn : SDNode<"ARMISD::CMN", SDT_ARMCmp>; +def ARMcmn : SDNode<"ARMISD::CMN", SDT_ARMCmp, [SDNPCommutative]>; def ARMcmpZ : SDNode<"ARMISD::CMPZ", SDT_ARMCmp, [SDNPCommutative]>; diff --git a/llvm/lib/Target/ARM/ARMInstrThumb.td b/llvm/lib/Target/ARM/ARMInstrThumb.td index 0c5ea3e0fa8d5..d62a2a0e16fa7 100644 --- a/llvm/lib/Target/ARM/ARMInstrThumb.td +++ b/llvm/lib/Target/ARM/ARMInstrThumb.td @@ -1101,21 +1101,12 @@ def tBIC : // A8.6.20 // CMN register let isCompare = 1, Defs = [CPSR] in { -//FIXME: Disable CMN, as CCodes are backwards from compare expectations -// Compare-to-zero still works out, just not the relationals -//def tCMN : // A8.6.33 -// T1pIDPEncode<0b1011, (outs), (ins tGPR:$lhs, tGPR:$rhs), -// IIC_iCMPr, -// "cmn", "\t$lhs, $rhs", -// [(set CPSR, (ARMcmp tGPR:$lhs, (ineg tGPR:$rhs)))]>; - -def tCMNz : // A8.6.33 +def tCMN : // A8.6.33 T1pIDPEncode<0b1011, (outs), (ins tGPR:$Rn, tGPR:$Rm), IIC_iCMPr, "cmn", "\t$Rn, $Rm", - [(set CPSR, (ARMcmpZ tGPR:$Rn, (ineg tGPR:$Rm)))]>, - Sched<[WriteCMP]>; - + [(set CPSR, (ARMcmn tGPR:$Rn, tGPR:$Rm))]>, + Sched<[WriteCMP]>; } // isCompare = 1, Defs = [CPSR] // CMP immediate @@ -1571,6 +1562,9 @@ def : T1Pat<(ARMcmpZ tGPR:$Rn, imm0_255:$imm8), (tCMPi8 tGPR:$Rn, imm0_255:$imm8)>; def : T1Pat<(ARMcmpZ tGPR:$Rn, tGPR:$Rm), (tCMPr tGPR:$Rn, tGPR:$Rm)>; +// Fold compare-to-zero of a negated register into CMN register form. +def : T1Pat<(ARMcmpZ tGPR:$Rn, (ineg tGPR:$Rm)), + (tCMN tGPR:$Rn, tGPR:$Rm)>; // Bswap 16 with load/store def : T1Pat<(srl (bswap (extloadi16 t_addrmode_is2:$addr)), (i32 16)), diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td index c229c8e4491df..73ee7799019dd 100644 --- a/llvm/lib/Target/ARM/ARMInstrThumb2.td +++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td @@ -3494,11 +3494,12 @@ let isCompare = 1, Defs = [CPSR] in { let Inst{15} = 0; let Inst{11-8} = 0b1111; // Rd } - // register - def t2CMNzrr : T2TwoRegCmp< + + // register + def t2CMNrr : T2TwoRegCmp< (outs), (ins GPRnopc:$Rn, rGPR:$Rm), IIC_iCMPr, "cmn", ".w\t$Rn, $Rm", - [(set CPSR, (BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))> + [(set CPSR, (BinOpFrag<(ARMcmn node:$LHS, node:$RHS)> GPRnopc:$Rn, rGPR:$Rm))]>, Sched<[WriteCMP, ReadALU, ReadALU]> { let Inst{31-27} = 0b11101; @@ -3511,10 +3512,10 @@ let isCompare = 1, Defs = [CPSR] in { let Inst{5-4} = 0b00; // type } // shifted register - def t2CMNzrs : T2OneRegCmpShiftedReg< + def t2CMNrs : T2OneRegCmpShiftedReg< (outs), (ins GPRnopc:$Rn, t2_so_reg:$ShiftedRm), IIC_iCMPsi, "cmn", ".w\t$Rn, $ShiftedRm", - [(set CPSR, (BinOpFrag<(ARMcmpZ node:$LHS,(ineg node:$RHS))> + [(set CPSR, (BinOpFrag<(ARMcmn node:$LHS, node:$RHS)> GPRnopc:$Rn, t2_so_reg:$ShiftedRm))]>, Sched<[WriteCMPsi, ReadALU, ReadALU]> { let Inst{31-27} = 0b11101; @@ -3531,7 +3532,7 @@ let isCompare = 1, Defs = [CPSR] in { def : t2InstAlias<"cmn${p} $Rn, $imm", (t2CMNri GPRnopc:$Rn, t2_so_imm:$imm, pred:$p)>; def : t2InstAlias<"cmn${p} $Rn, $shift", - (t2CMNzrs GPRnopc:$Rn, t2_so_reg:$shift, pred:$p)>; + (t2CMNrs GPRnopc:$Rn, t2_so_reg:$shift, pred:$p)>; def : T2Pat<(ARMcmp GPR:$src, t2_so_imm_neg:$imm), (t2CMNri GPR:$src, t2_so_imm_neg:$imm)>; @@ -3539,6 +3540,14 @@ def : T2Pat<(ARMcmp GPR:$src, t2_so_imm_neg:$imm), def : T2Pat<(ARMcmpZ GPRnopc:$src, t2_so_imm_neg:$imm), (t2CMNri GPRnopc:$src, t2_so_imm_neg:$imm)>; +// Fold compare-to-zero of a negated register into CMN register forms. +// Note: This maps only the encoding; higher-level legality of relational use +// is handled elsewhere. +def : T2Pat<(ARMcmpZ GPRnopc:$Rn, (ineg rGPR:$Rm)), + (t2CMNrr GPRnopc:$Rn, rGPR:$Rm)>; +def : T2Pat<(ARMcmpZ GPRnopc:$Rn, (ineg t2_so_reg:$ShiftedRm)), + (t2CMNrs GPRnopc:$Rn, t2_so_reg:$ShiftedRm)>; + defm t2TST : T2I_cmp_irs<0b0000, "tst", rGPR, IIC_iTSTi, IIC_iTSTr, IIC_iTSTsi, BinOpFrag<(ARMcmpZ (and_su node:$LHS, node:$RHS), 0)>>; @@ -5096,7 +5105,7 @@ def : t2InstAlias<"subw${p} $Rdn, $imm", // Alias for compares without the ".w" optional width specifier. def : t2InstAlias<"cmn${p} $Rn, $Rm", - (t2CMNzrr GPRnopc:$Rn, rGPR:$Rm, pred:$p)>; + (t2CMNrr GPRnopc:$Rn, rGPR:$Rm, pred:$p)>; def : t2InstAlias<"teq${p} $Rn, $Rm", (t2TEQrr rGPR:$Rn, rGPR:$Rm, pred:$p)>; def : t2InstAlias<"tst${p} $Rn, $Rm", diff --git a/llvm/lib/Target/ARM/ARMLatencyMutations.cpp b/llvm/lib/Target/ARM/ARMLatencyMutations.cpp index 85bad4f1925a4..bd497f4172406 100644 --- a/llvm/lib/Target/ARM/ARMLatencyMutations.cpp +++ b/llvm/lib/Target/ARM/ARMLatencyMutations.cpp @@ -114,7 +114,7 @@ InstructionInformation::InstructionInformation(const ARMBaseInstrInfo *TII) { std::initializer_list<unsigned> isInlineShiftALUList = { t2ADCrs, t2ADDSrs, t2ADDrs, t2BICrs, t2EORrs, t2ORNrs, t2RSBSrs, t2RSBrs, t2SBCrs, t2SUBrs, - t2SUBSrs, t2CMPrs, t2CMNzrs, t2TEQrs, t2TSTrs, + t2SUBSrs, t2CMPrs, t2CMNrs, t2TEQrs, t2TSTrs, }; for (auto op : isInlineShiftALUList) { Info[op].IsInlineShiftALU = true; diff --git a/llvm/lib/Target/ARM/ARMScheduleM55.td b/llvm/lib/Target/ARM/ARMScheduleM55.td index ff05936e8ba45..de55eafb039d6 100644 --- a/llvm/lib/Target/ARM/ARMScheduleM55.td +++ b/llvm/lib/Target/ARM/ARMScheduleM55.td @@ -152,7 +152,7 @@ def : InstRW<[M55WriteDX_SI], (instregex "t2CS(EL|INC|INV|NEG)")>; // Thumb 2 instructions that could be reduced to a thumb 1 instruction and can // be dual issued with one of the above. This list is optimistic. def : InstRW<[M55WriteDX_DI], (instregex "t2ADDC?rr$", "t2ADDrr$", - "t2ADDSrr$", "t2ANDrr$", "t2ASRr[ir]$", "t2BICrr$", "t2CMNzrr$", + "t2ADDSrr$", "t2ANDrr$", "t2ASRr[ir]$", "t2BICrr$", "t2CMNrr$", "t2CMPr[ir]$", "t2EORrr$", "t2LSLr[ir]$", "t2LSRr[ir]$", "t2MVNr$", "t2ORRrr$", "t2REV(16|SH)?$", "t2RORrr$", "t2RSBr[ir]$", "t2RSBSri$", "t2SBCrr$", "t2SUBS?rr$", "t2TEQrr$", "t2TSTrr$", "t2STRi12$", @@ -161,7 +161,7 @@ def : InstRW<[M55WriteDX_DI], (instregex "t2ADDC?rr$", "t2ADDrr$", def : InstRW<[M55WriteDX_DI], (instregex "t2SETPAN$", "tADC$", "tADDhirr$", "tADDrSP$", "tADDrSPi$", "tADDrr$", "tADDspi$", "tADDspr$", "tADR$", "tAND$", "tASRri$", "tASRrr$", "tBIC$", "tBKPT$", "tCBNZ$", "tCBZ$", - "tCMNz$", "tCMPhir$", "tCMPi8$", "tCMPr$", "tCPS$", "tEOR$", "tHINT$", + "tCMN","tCMPhir$", "tCMPi8$", "tCMPr$", "tCPS$", "tEOR$", "tHINT$", "tHLT$", "tLSLri$", "tLSLrr$", "tLSRri$", "tLSRrr$", "tMOVSr$", "tMUL$", "tMVN$", "tORR$", "tPICADD$", "tPOP$", "tPUSH$", "tREV$", "tREV16$", "tREVSH$", "tROR$", "tRSB$", "tSBC$", "tSETEND$", diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp index 18e41297b1734..c593d3f8a7d27 100644 --- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -88,9 +88,7 @@ namespace { { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 }, - //FIXME: Disable CMN, as CCodes are backwards from compare expectations - //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, - { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, + { ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 }, { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 }, { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 }, diff --git a/llvm/test/MC/ARM/thumb-shift-encoding.s b/llvm/test/MC/ARM/thumb-shift-encoding.s index ad35aff450556..6226e0204757c 100644 --- a/llvm/test/MC/ARM/thumb-shift-encoding.s +++ b/llvm/test/MC/ARM/thumb-shift-encoding.s @@ -1,7 +1,7 @@ @ RUN: llvm-mc -mcpu=cortex-a8 -triple thumbv7 -show-encoding < %s | FileCheck %s @ Uses printT2SOOperand(), used by t2ADCrs t2ADDrs t2ANDrs t2BICrs t2EORrs -@ t2ORNrs t2ORRrs t2RSBrs t2SBCrs t2SUBrs t2CMNzrs t2CMPrs t2MOVSsi t2MOVsi +@ t2ORNrs t2ORRrs t2RSBrs t2SBCrs t2SUBrs t2CMNrs t2CMPrs t2MOVSsi t2MOVsi @ t2MVNs t2TEQrs t2TSTrs	sbc.w r12, lr, r0 
@github-actions
Copy link

github-actions bot commented Oct 13, 2025

⚠️ C/C++ code formatter, clang-format found issues in your code. ⚠️

You can test this locally with the following command:
git-clang-format --diff origin/main HEAD --extensions h,cpp -- llvm/lib/Target/ARM/ARMFeatures.h llvm/lib/Target/ARM/ARMISelLowering.cpp llvm/lib/Target/ARM/ARMLatencyMutations.cpp llvm/lib/Target/ARM/Thumb2SizeReduction.cpp --diff_from_common_commit

⚠️
The reproduction instructions above might return results for more than one PR
in a stack if you are using a stacked PR workflow. You can limit the results by
changing origin/main to the base branch/commit you want to compare against.
⚠️

View the diff from clang-format here.
diff --git a/llvm/lib/Target/ARM/ARMLatencyMutations.cpp b/llvm/lib/Target/ARM/ARMLatencyMutations.cpp index bd497f417..7a0d38d66 100644 --- a/llvm/lib/Target/ARM/ARMLatencyMutations.cpp +++ b/llvm/lib/Target/ARM/ARMLatencyMutations.cpp @@ -112,9 +112,8 @@ InstructionInformation::InstructionInformation(const ARMBaseInstrInfo *TII) { Info[t2SDIV].IsDivide = Info[t2UDIV].IsDivide = true; std::initializer_list<unsigned> isInlineShiftALUList = { - t2ADCrs, t2ADDSrs, t2ADDrs, t2BICrs, t2EORrs, - t2ORNrs, t2RSBSrs, t2RSBrs, t2SBCrs, t2SUBrs, - t2SUBSrs, t2CMPrs, t2CMNrs, t2TEQrs, t2TSTrs, + t2ADCrs, t2ADDSrs, t2ADDrs, t2BICrs, t2EORrs, t2ORNrs, t2RSBSrs, t2RSBrs, + t2SBCrs, t2SUBrs, t2SUBSrs, t2CMPrs, t2CMNrs, t2TEQrs, t2TSTrs, }; for (auto op : isInlineShiftALUList) { Info[op].IsInlineShiftALU = true; diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp index de1a46ef3..2809c6c9c 100644 --- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -78,80 +78,81 @@ namespace { }; static const ReduceEntry ReduceTable[] = { - // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM - { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 }, - { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 }, - { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 }, - { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 }, - { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, - { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 }, - { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, - { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, - { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 }, - { ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, - { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 }, - { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 }, - { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 }, - // FIXME: adr.n immediate offset must be multiple of 4. - //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, - { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, - { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 }, - { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 }, - { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 }, - { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 }, - { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 }, - // FIXME: Do we need the 16-bit 'S' variant? - { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 }, - { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 }, - { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, - { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 }, - { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, - { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, - { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 }, - { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 }, - { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, - { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 }, - { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 }, - { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 }, - { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 }, - { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, - { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, - { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, - { ARM::t2TEQrr, ARM::tEOR, 0, 0, 0, 1, 0, 2,0, 0,1,0 }, - { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 }, - { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, - { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 }, - - // FIXME: Clean this up after splitting each Thumb load / store opcode - // into multiple ones. - { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, - { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2LDR_POST,ARM::tLDMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 }, - { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 }, - { ARM::t2STR_POST,ARM::tSTMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 }, - - { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, - { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 }, - { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 }, - // ARM::t2STMIA (with no basereg writeback) has no Thumb1 equivalent. - // tSTMIA_UPD is a change in semantics which can only be used if the base - // register is killed. This difference is correctly handled elsewhere. - { ARM::t2STMIA, ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, - { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 }, - { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 } - }; + // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, + // P/C,PF,S,AM + {ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0, 0, 0, 0, 0}, + {ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0, 0, 0, 1, 0}, + {ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0, 1, 0, 0, 0}, + {ARM::t2ADDSri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2, 2, 0, 1, 0}, + {ARM::t2ADDSrr, ARM::tADDrr, 0, 0, 0, 1, 0, 2, 0, 0, 1, 0}, + {ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0, 0, 1, 0, 0}, + {ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0, 0, 1, 0, 1}, + {ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0, 0, 1, 0, 1}, + {ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0, 0, 1, 0, 0}, + {ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0}, + {ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2, 0, 0, 0, 0}, + {ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2, 0, 0, 1, 0}, + {ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0, 0, 1, 0, 0}, + // FIXME: adr.n immediate offset must be multiple of 4. + //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 + //}, + {ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0, 0, 1, 0, 1}, + {ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0, 0, 1, 0, 1}, + {ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0, 0, 1, 0, 1}, + {ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0, 0, 1, 0, 1}, + {ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0, 0, 1, 0, 0}, + {ARM::t2MOVi16, ARM::tMOVi8, 0, 8, 0, 1, 0, 0, 0, 1, 1, 0}, + // FIXME: Do we need the 16-bit 'S' variant? + {ARM::t2MOVr, ARM::tMOVr, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0}, + {ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0, 0, 1, 0, 0}, + {ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, + {ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0, 0, 1, 0, 0}, + {ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0}, + {ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0}, + {ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0}, + {ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0, 0, 1, 0, 0}, + {ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2RSBSri, ARM::tRSB, 0, 0, 0, 1, 0, 2, 0, 0, 1, 0}, + {ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0, 0, 0, 0, 0}, + {ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0, 0, 0, 0, 0}, + {ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0}, + {ARM::t2SUBSri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2, 2, 0, 0, 0}, + {ARM::t2SUBSrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0}, + {ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0}, + {ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0}, + {ARM::t2TEQrr, ARM::tEOR, 0, 0, 0, 1, 0, 2, 0, 0, 1, 0}, + {ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2, 0, 0, 0, 0}, + {ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0}, + {ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0}, + + // FIXME: Clean this up after splitting each Thumb load / store opcode + // into multiple ones. + {ARM::t2LDRi12, ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2LDRBi12, ARM::tLDRBi, 0, 5, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2LDRHi12, ARM::tLDRHi, 0, 5, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2LDRSBs, ARM::tLDRSB, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2LDRSHs, ARM::tLDRSH, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2LDR_POST, ARM::tLDMIA_UPD, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2STRi12, ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2STRBi12, ARM::tSTRBi, 0, 5, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2STRHi12, ARM::tSTRHi, 0, 5, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + {ARM::t2STR_POST, ARM::tSTMIA_UPD, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0}, + + {ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0}, + {ARM::t2LDMIA_RET, 0, ARM::tPOP_RET, 0, 0, 1, 1, 1, 1, 0, 1, 0}, + {ARM::t2LDMIA_UPD, ARM::tLDMIA_UPD, ARM::tPOP, 0, 0, 1, 1, 1, 1, 0, 1, 0}, + // ARM::t2STMIA (with no basereg writeback) has no Thumb1 equivalent. + // tSTMIA_UPD is a change in semantics which can only be used if the base + // register is killed. This difference is correctly handled elsewhere. + {ARM::t2STMIA, ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0}, + {ARM::t2STMIA_UPD, ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0}, + {ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1, 1, 0, 1, 0}}; class Thumb2SizeReduce : public MachineFunctionPass { public: 
@AZero13 AZero13 force-pushed the enable-cmnz branch 6 times, most recently from bbbc7dd to 33d92b7 Compare October 13, 2025 19:06
@AZero13 AZero13 changed the title [ARM] Enable ARMISD::CMN [ARM] Enable creation of ARMISD::CMN nodes Oct 13, 2025
@AZero13 AZero13 force-pushed the enable-cmnz branch 7 times, most recently from 5c3bf60 to 946266b Compare October 15, 2025 02:12
@AZero13
Copy link
Contributor Author

AZero13 commented Nov 13, 2025

@RKSimon RKSimon requested a review from davemgreen November 13, 2025 20:24
Map ARMISD::CMN to tCMN instead of armcmpz. Rename the cmn instructions to match this new reality. Also use the new isCMN to simplify LowerCMP.
@github-actions
Copy link

🐧 Linux x64 Test Results

  • 186341 tests passed
  • 4858 tests skipped
@AZero13
Copy link
Contributor Author

AZero13 commented Dec 2, 2025

@davemgreen Ping?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

2 participants