aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrancis Visoiu Mistrih <francisvm@yahoo.com>2017-12-07 10:40:31 +0000
committerFrancis Visoiu Mistrih <francisvm@yahoo.com>2017-12-07 10:40:31 +0000
commitfd11bc081304b8ca3bf7a657eb45af7a6a24246f (patch)
tree8d35433975ca69f6b7e331e01569816438d561f1
parente65af32d44f2d727de5ad3dda03a60fffe3ecdb7 (diff)
[CodeGen] Use MachineOperand::print in the MIRPrinter for MO_Register.
Work towards the unification of MIR and debug output by refactoring the interfaces. For MachineOperand::print, keep a simple version that can be easily called from `dump()`, and a more complex one which will be called from both the MIRPrinter and MachineInstr::print. Add extra checks inside MachineOperand for detached operands (operands with getParent() == nullptr). https://reviews.llvm.org/D40836 * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/kill: ([^ ]+) ([^ ]+)<def> ([^ ]+)/kill: \1 def \2 \3/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/kill: ([^ ]+) ([^ ]+) ([^ ]+)<def>/kill: \1 \2 def \3/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/kill: def ([^ ]+) ([^ ]+) ([^ ]+)<def>/kill: def \1 \2 def \3/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/<def>//g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<kill>/killed \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-use,kill>/implicit killed \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<dead>/dead \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<def[ ]*,[ ]*dead>/dead \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-def[ ]*,[ ]*dead>/implicit-def dead \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-def>/implicit-def \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<imp-use>/implicit \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<internal>/internal \1/g' * find . \( -name "*.mir" -o -name "*.cpp" -o -name "*.h" -o -name "*.ll" -o -name "*.s" \) -type f -print0 | xargs -0 sed -i '' -E 's/([^ ]+)<undef>/undef \1/g' git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@320022 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h56
-rw-r--r--include/llvm/CodeGen/LivePhysRegs.h8
-rw-r--r--include/llvm/CodeGen/MachineBasicBlock.h4
-rw-r--r--include/llvm/CodeGen/MachineInstr.h10
-rw-r--r--include/llvm/CodeGen/MachineInstrBundle.h2
-rw-r--r--include/llvm/CodeGen/MachineOperand.h33
-rw-r--r--include/llvm/CodeGen/TargetInstrInfo.h4
-rw-r--r--include/llvm/CodeGen/TargetRegisterInfo.h5
-rw-r--r--lib/CodeGen/AggressiveAntiDepBreaker.cpp8
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp6
-rw-r--r--lib/CodeGen/BranchFolding.cpp2
-rw-r--r--lib/CodeGen/CriticalAntiDepBreaker.cpp8
-rw-r--r--lib/CodeGen/ExpandPostRAPseudos.cpp2
-rw-r--r--lib/CodeGen/ImplicitNullChecks.cpp2
-rw-r--r--lib/CodeGen/InlineSpiller.cpp2
-rw-r--r--lib/CodeGen/LiveIntervalAnalysis.cpp4
-rw-r--r--lib/CodeGen/LiveVariables.cpp12
-rw-r--r--lib/CodeGen/MIRPrinter.cpp105
-rw-r--r--lib/CodeGen/MachineCSE.cpp8
-rw-r--r--lib/CodeGen/MachineCopyPropagation.cpp18
-rw-r--r--lib/CodeGen/MachineInstr.cpp89
-rw-r--r--lib/CodeGen/MachineOperand.cpp158
-rw-r--r--lib/CodeGen/MachineSink.cpp6
-rw-r--r--lib/CodeGen/MachineVerifier.cpp2
-rw-r--r--lib/CodeGen/RegAllocFast.cpp4
-rw-r--r--lib/CodeGen/RegisterCoalescer.cpp30
-rw-r--r--lib/CodeGen/RegisterScavenging.cpp2
-rw-r--r--lib/CodeGen/SplitKit.cpp2
-rw-r--r--lib/CodeGen/TargetRegisterInfo.cpp15
-rw-r--r--lib/CodeGen/TwoAddressInstructionPass.cpp48
-rw-r--r--lib/CodeGen/VirtRegMap.cpp10
-rw-r--r--lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp6
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp6
-rw-r--r--lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp4
-rw-r--r--lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp17
-rw-r--r--lib/Target/AMDGPU/CaymanInstructions.td4
-rw-r--r--lib/Target/AMDGPU/EvergreenInstructions.td4
-rw-r--r--lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp8
-rw-r--r--lib/Target/AMDGPU/SIFixWWMLiveness.cpp4
-rw-r--r--lib/Target/AMDGPU/SIFoldOperands.cpp4
-rw-r--r--lib/Target/AMDGPU/SIPeepholeSDWA.cpp2
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp10
-rw-r--r--lib/Target/ARM/ARMExpandPseudoInsts.cpp2
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp2
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp2
-rw-r--r--lib/Target/BPF/BPFISelDAGToDAG.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonBlockRanges.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonConstPropagation.cpp6
-rw-r--r--lib/Target/Hexagon/HexagonCopyToCombine.cpp8
-rw-r--r--lib/Target/Hexagon/HexagonEarlyIfConv.cpp36
-rw-r--r--lib/Target/Hexagon/HexagonExpandCondsets.cpp12
-rw-r--r--lib/Target/Hexagon/HexagonHardwareLoops.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp8
-rw-r--r--lib/Target/Hexagon/HexagonNewValueJump.cpp12
-rw-r--r--lib/Target/Hexagon/HexagonPeephole.cpp28
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp24
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp4
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp14
-rw-r--r--lib/Target/Hexagon/RDFGraph.h2
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp2
-rw-r--r--lib/Target/NVPTX/NVPTXPeephole.cpp6
-rw-r--r--lib/Target/PowerPC/PPCBranchCoalescing.cpp24
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp6
-rw-r--r--lib/Target/PowerPC/PPCMIPeephole.cpp6
-rw-r--r--lib/Target/PowerPC/PPCQPXLoadSplat.cpp4
-rw-r--r--lib/Target/PowerPC/PPCVSXFMAMutate.cpp18
-rw-r--r--lib/Target/SystemZ/SystemZElimCompare.cpp4
-rw-r--r--lib/Target/X86/README-X86-64.txt8
-rw-r--r--lib/Target/X86/X86FixupBWInsts.cpp6
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp10
-rw-r--r--lib/Target/X86/X86VZeroUpper.cpp2
-rw-r--r--test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll24
-rw-r--r--test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir4
-rw-r--r--test/CodeGen/AArch64/GlobalISel/verify-selected.mir6
-rw-r--r--test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-csldst-mmo.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-dead-register-def-bug.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-fast-isel-rem.ll6
-rw-r--r--test/CodeGen/AArch64/arm64-ldp-cluster.ll52
-rw-r--r--test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll2
-rw-r--r--test/CodeGen/AArch64/arm64-misched-memdep-bug.ll4
-rw-r--r--test/CodeGen/AArch64/arm64-misched-multimmo.ll2
-rw-r--r--test/CodeGen/AArch64/loh.mir78
-rw-r--r--test/CodeGen/AArch64/machine-copy-prop.ll12
-rw-r--r--test/CodeGen/AArch64/scheduledag-constreg.mir8
-rw-r--r--test/CodeGen/AArch64/tailcall_misched_graph.ll6
-rw-r--r--test/CodeGen/AMDGPU/llvm.dbg.value.ll2
-rw-r--r--test/CodeGen/AMDGPU/schedule-regpressure.mir2
-rw-r--r--test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll4
-rw-r--r--test/CodeGen/ARM/2011-11-14-EarlyClobber.ll2
-rw-r--r--test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll4
-rw-r--r--test/CodeGen/ARM/Windows/vla-cpsr.ll2
-rw-r--r--test/CodeGen/ARM/crash-greedy.ll2
-rw-r--r--test/CodeGen/ARM/ifcvt-dead-def.ll2
-rw-r--r--test/CodeGen/ARM/misched-copy-arm.ll6
-rw-r--r--test/CodeGen/ARM/misched-int-basic-thumb2.mir22
-rw-r--r--test/CodeGen/ARM/misched-int-basic.mir14
-rw-r--r--test/CodeGen/ARM/sched-it-debug-nodes.mir6
-rw-r--r--test/CodeGen/ARM/single-issue-r52.mir14
-rw-r--r--test/CodeGen/ARM/subreg-remat.ll6
-rw-r--r--test/CodeGen/ARM/vldm-liveness.mir8
-rw-r--r--test/CodeGen/AVR/select-must-add-unconditional-jump.ll10
-rw-r--r--test/CodeGen/Hexagon/branch-folder-hoist-kills.mir14
-rw-r--r--test/CodeGen/Hexagon/post-inc-aa-metadata.ll2
-rw-r--r--test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll2
-rw-r--r--test/CodeGen/PowerPC/byval-agg-info.ll2
-rw-r--r--test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll16
-rw-r--r--test/CodeGen/PowerPC/quadint-return.ll4
-rw-r--r--test/CodeGen/SystemZ/pr32505.ll4
-rw-r--r--test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir2
-rw-r--r--test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll4
-rw-r--r--test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll4
-rw-r--r--test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll2
-rw-r--r--test/CodeGen/X86/2010-04-08-CoalescerBug.ll4
-rw-r--r--test/CodeGen/X86/2010-05-12-FastAllocKills.ll24
-rw-r--r--test/CodeGen/X86/GlobalISel/add-scalar.ll10
-rw-r--r--test/CodeGen/X86/GlobalISel/ext-x86-64.ll2
-rw-r--r--test/CodeGen/X86/GlobalISel/ext.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/gep.ll4
-rw-r--r--test/CodeGen/X86/GlobalISel/x86_64-fallback.ll2
-rw-r--r--test/CodeGen/X86/add-sub-nsw-nuw.ll2
-rw-r--r--test/CodeGen/X86/add.ll4
-rw-r--r--test/CodeGen/X86/addcarry.ll2
-rw-r--r--test/CodeGen/X86/anyext.ll8
-rw-r--r--test/CodeGen/X86/atomic-eflags-reuse.ll2
-rw-r--r--test/CodeGen/X86/avx-cast.ll14
-rw-r--r--test/CodeGen/X86/avx-cmp.ll2
-rw-r--r--test/CodeGen/X86/avx-intrinsics-fast-isel.ll56
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll4
-rw-r--r--test/CodeGen/X86/avx-vinsertf128.ll6
-rw-r--r--test/CodeGen/X86/avx-vzeroupper.ll12
-rw-r--r--test/CodeGen/X86/avx2-conversions.ll8
-rw-r--r--test/CodeGen/X86/avx2-intrinsics-fast-isel.ll4
-rw-r--r--test/CodeGen/X86/avx2-shift.ll8
-rw-r--r--test/CodeGen/X86/avx2-vector-shifts.ll8
-rw-r--r--test/CodeGen/X86/avx512-arith.ll24
-rw-r--r--test/CodeGen/X86/avx512-build-vector.ll2
-rw-r--r--test/CodeGen/X86/avx512-calling-conv.ll16
-rw-r--r--test/CodeGen/X86/avx512-cmp-kor-sequence.ll2
-rw-r--r--test/CodeGen/X86/avx512-cvt.ll66
-rw-r--r--test/CodeGen/X86/avx512-ext.ll28
-rw-r--r--test/CodeGen/X86/avx512-extract-subvector.ll4
-rw-r--r--test/CodeGen/X86/avx512-hadd-hsub.ll20
-rw-r--r--test/CodeGen/X86/avx512-insert-extract.ll104
-rw-r--r--test/CodeGen/X86/avx512-insert-extract_i1.ll2
-rw-r--r--test/CodeGen/X86/avx512-intrinsics-upgrade.ll38
-rw-r--r--test/CodeGen/X86/avx512-intrinsics.ll24
-rw-r--r--test/CodeGen/X86/avx512-mask-op.ll132
-rw-r--r--test/CodeGen/X86/avx512-memfold.ll2
-rw-r--r--test/CodeGen/X86/avx512-regcall-Mask.ll78
-rw-r--r--test/CodeGen/X86/avx512-regcall-NoMask.ll30
-rwxr-xr-xtest/CodeGen/X86/avx512-schedule.ll64
-rw-r--r--test/CodeGen/X86/avx512-select.ll12
-rw-r--r--test/CodeGen/X86/avx512-shift.ll10
-rw-r--r--test/CodeGen/X86/avx512-shuffles/partial_permute.ll14
-rw-r--r--test/CodeGen/X86/avx512-trunc.ll30
-rw-r--r--test/CodeGen/X86/avx512-vbroadcast.ll8
-rw-r--r--test/CodeGen/X86/avx512-vec-cmp.ll30
-rw-r--r--test/CodeGen/X86/avx512-vec3-crash.ll6
-rw-r--r--test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll8
-rw-r--r--test/CodeGen/X86/avx512bw-mov.ll16
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll36
-rw-r--r--test/CodeGen/X86/avx512bwvl-intrinsics.ll6
-rw-r--r--test/CodeGen/X86/avx512bwvl-vec-test-testn.ll24
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll12
-rw-r--r--test/CodeGen/X86/avx512dq-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/avx512dq-mask-op.ll4
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll8
-rw-r--r--test/CodeGen/X86/avx512dqvl-intrinsics.ll16
-rw-r--r--test/CodeGen/X86/avx512f-vec-test-testn.ll16
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll52
-rw-r--r--test/CodeGen/X86/avx512vl-intrinsics.ll8
-rw-r--r--test/CodeGen/X86/avx512vl-vec-cmp.ll112
-rw-r--r--test/CodeGen/X86/avx512vl-vec-masked-cmp.ll1272
-rw-r--r--test/CodeGen/X86/avx512vl-vec-test-testn.ll64
-rw-r--r--test/CodeGen/X86/bitcast-and-setcc-128.ll66
-rw-r--r--test/CodeGen/X86/bitcast-and-setcc-256.ll38
-rw-r--r--test/CodeGen/X86/bitcast-and-setcc-512.ll36
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll24
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll34
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector-bool.ll10
-rw-r--r--test/CodeGen/X86/bitcast-int-to-vector.ll2
-rw-r--r--test/CodeGen/X86/bitcast-setcc-128.ll66
-rw-r--r--test/CodeGen/X86/bitcast-setcc-256.ll38
-rw-r--r--test/CodeGen/X86/bitcast-setcc-512.ll40
-rw-r--r--test/CodeGen/X86/bitreverse.ll14
-rw-r--r--test/CodeGen/X86/bmi-schedule.ll24
-rw-r--r--test/CodeGen/X86/bmi.ll22
-rw-r--r--test/CodeGen/X86/bool-simplify.ll4
-rw-r--r--test/CodeGen/X86/bool-vector.ll8
-rw-r--r--test/CodeGen/X86/broadcastm-lowering.ll8
-rw-r--r--test/CodeGen/X86/bypass-slow-division-32.ll14
-rw-r--r--test/CodeGen/X86/bypass-slow-division-64.ll8
-rw-r--r--test/CodeGen/X86/clz.ll68
-rw-r--r--test/CodeGen/X86/cmov-into-branch.ll2
-rw-r--r--test/CodeGen/X86/cmov-promotion.ll8
-rw-r--r--test/CodeGen/X86/cmov.ll2
-rw-r--r--test/CodeGen/X86/cmovcmov.ll4
-rw-r--r--test/CodeGen/X86/coalescer-dce.ll8
-rw-r--r--test/CodeGen/X86/combine-abs.ll4
-rw-r--r--test/CodeGen/X86/compress_expand.ll22
-rw-r--r--test/CodeGen/X86/critical-edge-split-2.ll2
-rw-r--r--test/CodeGen/X86/ctpop-combine.ll2
-rw-r--r--test/CodeGen/X86/dagcombine-cse.ll4
-rw-r--r--test/CodeGen/X86/divide-by-constant.ll34
-rw-r--r--test/CodeGen/X86/divrem.ll4
-rw-r--r--test/CodeGen/X86/divrem8_ext.ll36
-rw-r--r--test/CodeGen/X86/extractelement-index.ll50
-rw-r--r--test/CodeGen/X86/f16c-intrinsics-fast-isel.ll4
-rw-r--r--test/CodeGen/X86/fast-isel-cmp.ll8
-rw-r--r--test/CodeGen/X86/fast-isel-sext-zext.ll16
-rw-r--r--test/CodeGen/X86/fast-isel-shift.ll24
-rw-r--r--test/CodeGen/X86/fixup-bw-copy.ll2
-rw-r--r--test/CodeGen/X86/fixup-bw-inst.mir4
-rw-r--r--test/CodeGen/X86/gpr-to-mask.ll40
-rw-r--r--test/CodeGen/X86/half.ll2
-rw-r--r--test/CodeGen/X86/handle-move.ll6
-rw-r--r--test/CodeGen/X86/horizontal-reduce-smax.ll96
-rw-r--r--test/CodeGen/X86/horizontal-reduce-smin.ll96
-rw-r--r--test/CodeGen/X86/horizontal-reduce-umax.ll96
-rw-r--r--test/CodeGen/X86/horizontal-reduce-umin.ll96
-rw-r--r--test/CodeGen/X86/iabs.ll2
-rw-r--r--test/CodeGen/X86/illegal-bitfield-loadstore.ll6
-rw-r--r--test/CodeGen/X86/imul.ll4
-rw-r--r--test/CodeGen/X86/inline-asm-fpstack.ll6
-rw-r--r--test/CodeGen/X86/lea-3.ll8
-rw-r--r--test/CodeGen/X86/lea-opt-cse3.ll16
-rw-r--r--test/CodeGen/X86/lea32-schedule.ll306
-rw-r--r--test/CodeGen/X86/liveness-local-regalloc.ll4
-rw-r--r--test/CodeGen/X86/loop-search.ll6
-rw-r--r--test/CodeGen/X86/lzcnt-schedule.ll12
-rw-r--r--test/CodeGen/X86/lzcnt-zext-cmp.ll6
-rw-r--r--test/CodeGen/X86/machine-cp.ll2
-rw-r--r--test/CodeGen/X86/machine-cse.ll6
-rw-r--r--test/CodeGen/X86/masked_gather_scatter.ll88
-rw-r--r--test/CodeGen/X86/masked_memop.ll26
-rw-r--r--test/CodeGen/X86/misched-copy.ll4
-rw-r--r--test/CodeGen/X86/movmsk.ll2
-rw-r--r--test/CodeGen/X86/mul-constant-i16.ll180
-rw-r--r--test/CodeGen/X86/mul-constant-i32.ll190
-rw-r--r--test/CodeGen/X86/mul-constant-result.ll48
-rw-r--r--test/CodeGen/X86/negate-i1.ll6
-rw-r--r--test/CodeGen/X86/norex-subreg.ll8
-rw-r--r--test/CodeGen/X86/oddshuffles.ll14
-rw-r--r--test/CodeGen/X86/or-lea.ll26
-rw-r--r--test/CodeGen/X86/phys_subreg_coalesce-3.ll6
-rw-r--r--test/CodeGen/X86/pmul.ll4
-rw-r--r--test/CodeGen/X86/popcnt-schedule.ll16
-rw-r--r--test/CodeGen/X86/popcnt.ll8
-rw-r--r--test/CodeGen/X86/pr22970.ll2
-rw-r--r--test/CodeGen/X86/pr28173.ll4
-rw-r--r--test/CodeGen/X86/pr28560.ll2
-rw-r--r--test/CodeGen/X86/pr29061.ll4
-rw-r--r--test/CodeGen/X86/pr32282.ll2
-rw-r--r--test/CodeGen/X86/pr32329.ll2
-rw-r--r--test/CodeGen/X86/pr32345.ll6
-rw-r--r--test/CodeGen/X86/pr34653.ll8
-rw-r--r--test/CodeGen/X86/promote-vec3.ll48
-rw-r--r--test/CodeGen/X86/psubus.ll2
-rw-r--r--test/CodeGen/X86/reduce-trunc-shl.ll2
-rw-r--r--test/CodeGen/X86/remat-phys-dead.ll4
-rw-r--r--test/CodeGen/X86/sar_fold64.ll8
-rw-r--r--test/CodeGen/X86/schedule-x86_64.ll40
-rw-r--r--test/CodeGen/X86/select.ll12
-rw-r--r--test/CodeGen/X86/select_const.ll12
-rw-r--r--test/CodeGen/X86/setcc-lowering.ll4
-rw-r--r--test/CodeGen/X86/sext-i1.ll4
-rw-r--r--test/CodeGen/X86/shift-combine.ll8
-rw-r--r--test/CodeGen/X86/shift-double.ll4
-rw-r--r--test/CodeGen/X86/shrink-compare.ll4
-rw-r--r--test/CodeGen/X86/shuffle-vs-trunc-256.ll10
-rw-r--r--test/CodeGen/X86/sse2-schedule.ll20
-rw-r--r--test/CodeGen/X86/sse42-schedule.ll36
-rw-r--r--test/CodeGen/X86/subvector-broadcast.ll112
-rw-r--r--test/CodeGen/X86/tbm-intrinsics-fast-isel.ll8
-rw-r--r--test/CodeGen/X86/tbm_patterns.ll10
-rw-r--r--test/CodeGen/X86/umul-with-overflow.ll6
-rw-r--r--test/CodeGen/X86/urem-i8-constant.ll2
-rw-r--r--test/CodeGen/X86/urem-power-of-two.ll12
-rw-r--r--test/CodeGen/X86/vec_cmp_uint-128.ll8
-rw-r--r--test/CodeGen/X86/vec_fp_to_int.ll94
-rw-r--r--test/CodeGen/X86/vec_ins_extract-1.ll8
-rw-r--r--test/CodeGen/X86/vec_insert-4.ll2
-rw-r--r--test/CodeGen/X86/vec_insert-5.ll2
-rw-r--r--test/CodeGen/X86/vec_insert-8.ll4
-rw-r--r--test/CodeGen/X86/vec_insert-mmx.ll2
-rw-r--r--test/CodeGen/X86/vec_int_to_fp.ll186
-rw-r--r--test/CodeGen/X86/vec_minmax_sint.ll48
-rw-r--r--test/CodeGen/X86/vec_minmax_uint.ll48
-rw-r--r--test/CodeGen/X86/vec_ss_load_fold.ll20
-rw-r--r--test/CodeGen/X86/vector-bitreverse.ll16
-rw-r--r--test/CodeGen/X86/vector-compare-all_of.ll36
-rw-r--r--test/CodeGen/X86/vector-compare-any_of.ll36
-rw-r--r--test/CodeGen/X86/vector-compare-results.ll52
-rw-r--r--test/CodeGen/X86/vector-extend-inreg.ll4
-rw-r--r--test/CodeGen/X86/vector-half-conversions.ll122
-rw-r--r--test/CodeGen/X86/vector-lzcnt-128.ll16
-rw-r--r--test/CodeGen/X86/vector-lzcnt-256.ll16
-rw-r--r--test/CodeGen/X86/vector-popcnt-128.ll16
-rw-r--r--test/CodeGen/X86/vector-popcnt-256.ll16
-rw-r--r--test/CodeGen/X86/vector-rotate-128.ll38
-rw-r--r--test/CodeGen/X86/vector-rotate-256.ll38
-rw-r--r--test/CodeGen/X86/vector-sext.ll28
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-128.ll32
-rw-r--r--test/CodeGen/X86/vector-shift-ashr-256.ll28
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-128.ll18
-rw-r--r--test/CodeGen/X86/vector-shift-lshr-256.ll10
-rw-r--r--test/CodeGen/X86/vector-shift-shl-128.ll14
-rw-r--r--test/CodeGen/X86/vector-shift-shl-256.ll10
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v4.ll6
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v16.ll4
-rw-r--r--test/CodeGen/X86/vector-shuffle-512-v8.ll8
-rw-r--r--test/CodeGen/X86/vector-shuffle-avx512.ll84
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining-avx2.ll24
-rw-r--r--test/CodeGen/X86/vector-shuffle-v1.ll46
-rw-r--r--test/CodeGen/X86/vector-shuffle-variable-128.ll228
-rw-r--r--test/CodeGen/X86/vector-shuffle-variable-256.ll72
-rw-r--r--test/CodeGen/X86/vector-trunc-math.ll126
-rw-r--r--test/CodeGen/X86/vector-trunc.ll42
-rw-r--r--test/CodeGen/X86/vector-tzcnt-128.ll16
-rw-r--r--test/CodeGen/X86/vector-tzcnt-256.ll16
-rw-r--r--test/CodeGen/X86/verifier-phi-fail0.mir4
-rw-r--r--test/CodeGen/X86/vpshufbitqbm-intrinsics.ll2
-rw-r--r--test/CodeGen/X86/vselect-pcmp.ll8
-rw-r--r--test/CodeGen/X86/widen_bitops-0.ll36
-rw-r--r--test/CodeGen/X86/x86-interleaved-access.ll4
-rw-r--r--test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll2
-rw-r--r--test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir8
-rw-r--r--test/MC/AArch64/arm64-leaf-compact-unwind.s2
-rw-r--r--unittests/CodeGen/MachineOperandTest.cpp39
331 files changed, 4205 insertions, 4120 deletions
diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index bed7c724892..aa875c11d86 100644
--- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -204,7 +204,7 @@ public:
const MDNode *Variable,
const MDNode *Expr);
- /// Build and insert \p Res<def> = G_FRAME_INDEX \p Idx
+ /// Build and insert \p Res = G_FRAME_INDEX \p Idx
///
/// G_FRAME_INDEX materializes the address of an alloca value or other
/// stack-based object.
@@ -215,7 +215,7 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildFrameIndex(unsigned Res, int Idx);
- /// Build and insert \p Res<def> = G_GLOBAL_VALUE \p GV
+ /// Build and insert \p Res = G_GLOBAL_VALUE \p GV
///
/// G_GLOBAL_VALUE materializes the address of the specified global
/// into \p Res.
@@ -227,7 +227,7 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildGlobalValue(unsigned Res, const GlobalValue *GV);
- /// Build and insert \p Res<def> = G_ADD \p Op0, \p Op1
+ /// Build and insert \p Res = G_ADD \p Op0, \p Op1
///
/// G_ADD sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
/// truncated to their width.
@@ -245,7 +245,7 @@ public:
return buildAdd(Res, (getRegFromArg(UseArgs))...);
}
- /// Build and insert \p Res<def> = G_SUB \p Op0, \p Op1
+ /// Build and insert \p Res = G_SUB \p Op0, \p Op1
///
/// G_SUB sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
/// truncated to their width.
@@ -258,7 +258,7 @@ public:
MachineInstrBuilder buildSub(unsigned Res, unsigned Op0,
unsigned Op1);
- /// Build and insert \p Res<def> = G_MUL \p Op0, \p Op1
+ /// Build and insert \p Res = G_MUL \p Op0, \p Op1
///
/// G_MUL sets \p Res to the sum of integer parameters \p Op0 and \p Op1,
/// truncated to their width.
@@ -271,7 +271,7 @@ public:
MachineInstrBuilder buildMul(unsigned Res, unsigned Op0,
unsigned Op1);
- /// Build and insert \p Res<def> = G_GEP \p Op0, \p Op1
+ /// Build and insert \p Res = G_GEP \p Op0, \p Op1
///
/// G_GEP adds \p Op1 bytes to the pointer specified by \p Op0,
/// storing the resulting pointer in \p Res.
@@ -285,7 +285,7 @@ public:
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
unsigned Op1);
- /// Materialize and insert \p Res<def> = G_GEP \p Op0, (G_CONSTANT \p Value)
+ /// Materialize and insert \p Res = G_GEP \p Op0, (G_CONSTANT \p Value)
///
/// G_GEP adds \p Value bytes to the pointer specified by \p Op0,
/// storing the resulting pointer in \p Res. If \p Value is zero then no
@@ -305,7 +305,7 @@ public:
const LLT &ValueTy,
uint64_t Value);
- /// Build and insert \p Res<def> = G_PTR_MASK \p Op0, \p NumBits
+ /// Build and insert \p Res = G_PTR_MASK \p Op0, \p NumBits
///
/// G_PTR_MASK clears the low bits of a pointer operand without destroying its
/// pointer properties. This has the effect of rounding the address *down* to
@@ -321,7 +321,7 @@ public:
MachineInstrBuilder buildPtrMask(unsigned Res, unsigned Op0,
uint32_t NumBits);
- /// Build and insert \p Res<def>, \p CarryOut<def> = G_UADDE \p Op0,
+ /// Build and insert \p Res, \p CarryOut = G_UADDE \p Op0,
/// \p Op1, \p CarryIn
///
/// G_UADDE sets \p Res to \p Op0 + \p Op1 + \p CarryIn (truncated to the bit
@@ -338,7 +338,7 @@ public:
MachineInstrBuilder buildUAdde(unsigned Res, unsigned CarryOut, unsigned Op0,
unsigned Op1, unsigned CarryIn);
- /// Build and insert \p Res<def> = G_AND \p Op0, \p Op1
+ /// Build and insert \p Res = G_AND \p Op0, \p Op1
///
/// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
/// Op1.
@@ -355,7 +355,7 @@ public:
MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
unsigned Op1);
- /// Build and insert \p Res<def> = G_OR \p Op0, \p Op1
+ /// Build and insert \p Res = G_OR \p Op0, \p Op1
///
/// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
/// Op1.
@@ -367,7 +367,7 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
- /// Build and insert \p Res<def> = G_ANYEXT \p Op0
+ /// Build and insert \p Res = G_ANYEXT \p Op0
///
/// G_ANYEXT produces a register of the specified width, with bits 0 to
/// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are unspecified
@@ -387,7 +387,7 @@ public:
return buildAnyExt(getDestFromArg(Res), getRegFromArg(Arg));
}
- /// Build and insert \p Res<def> = G_SEXT \p Op
+ /// Build and insert \p Res = G_SEXT \p Op
///
/// G_SEXT produces a register of the specified width, with bits 0 to
/// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are duplicated from the
@@ -401,7 +401,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildSExt(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = G_ZEXT \p Op
+ /// Build and insert \p Res = G_ZEXT \p Op
///
/// G_ZEXT produces a register of the specified width, with bits 0 to
/// sizeof(\p Ty) * 8 set to \p Op. The remaining bits are 0. For a vector
@@ -415,7 +415,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildZExt(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
+ /// Build and insert \p Res = G_SEXT \p Op, \p Res = G_TRUNC \p Op, or
/// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
/// ///
/// \pre setBasicBlock or setMI must have been called.
@@ -425,7 +425,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildSExtOrTrunc(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
+ /// Build and insert \p Res = G_ZEXT \p Op, \p Res = G_TRUNC \p Op, or
/// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
/// ///
/// \pre setBasicBlock or setMI must have been called.
@@ -435,7 +435,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildZExtOrTrunc(unsigned Res, unsigned Op);
- // Build and insert \p Res<def> = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
+ // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_TRUNC \p Op, or
/// \p Res = COPY \p Op depending on the differing sizes of \p Res and \p Op.
/// ///
/// \pre setBasicBlock or setMI must have been called.
@@ -449,7 +449,7 @@ public:
}
MachineInstrBuilder buildAnyExtOrTrunc(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = \p ExtOpc, \p Res = G_TRUNC \p
+ /// Build and insert \p Res = \p ExtOpc, \p Res = G_TRUNC \p
/// Op, or \p Res = COPY \p Op depending on the differing sizes of \p Res and
/// \p Op.
/// ///
@@ -534,7 +534,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildFConstant(unsigned Res, const ConstantFP &Val);
- /// Build and insert \p Res<def> = COPY Op
+ /// Build and insert \p Res = COPY Op
///
/// Register-to-register COPY sets \p Res to \p Op.
///
@@ -547,7 +547,7 @@ public:
return buildCopy(getDestFromArg(Res), getRegFromArg(Src));
}
- /// Build and insert `Res<def> = G_LOAD Addr, MMO`.
+ /// Build and insert `Res = G_LOAD Addr, MMO`.
///
/// Loads the value stored at \p Addr. Puts the result in \p Res.
///
@@ -571,7 +571,7 @@ public:
MachineInstrBuilder buildStore(unsigned Val, unsigned Addr,
MachineMemOperand &MMO);
- /// Build and insert `Res0<def>, ... = G_EXTRACT Src, Idx0`.
+ /// Build and insert `Res0, ... = G_EXTRACT Src, Idx0`.
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Res and \p Src must be generic virtual registers.
@@ -598,7 +598,7 @@ public:
void buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
ArrayRef<uint64_t> Indices);
- /// Build and insert \p Res<def> = G_MERGE_VALUES \p Op0, ...
+ /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
///
/// G_MERGE_VALUES combines the input elements contiguously into a larger
/// register.
@@ -611,7 +611,7 @@ public:
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildMerge(unsigned Res, ArrayRef<unsigned> Ops);
- /// Build and insert \p Res0<def>, ... = G_UNMERGE_VALUES \p Op
+ /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
///
/// G_UNMERGE_VALUES splits contiguous bits of the input into multiple
///
@@ -639,7 +639,7 @@ public:
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, unsigned Res,
bool HasSideEffects);
- /// Build and insert \p Res<def> = G_FPTRUNC \p Op
+ /// Build and insert \p Res = G_FPTRUNC \p Op
///
/// G_FPTRUNC converts a floating-point value into one with a smaller type.
///
@@ -651,7 +651,7 @@ public:
/// \return The newly created instruction.
MachineInstrBuilder buildFPTrunc(unsigned Res, unsigned Op);
- /// Build and insert \p Res<def> = G_TRUNC \p Op
+ /// Build and insert \p Res = G_TRUNC \p Op
///
/// G_TRUNC extracts the low bits of a type. For a vector type each element is
/// truncated independently before being packed into the destination.
@@ -711,7 +711,7 @@ public:
MachineInstrBuilder buildSelect(unsigned Res, unsigned Tst,
unsigned Op0, unsigned Op1);
- /// Build and insert \p Res<def> = G_INSERT_VECTOR_ELT \p Val,
+ /// Build and insert \p Res = G_INSERT_VECTOR_ELT \p Val,
/// \p Elt, \p Idx
///
/// \pre setBasicBlock or setMI must have been called.
@@ -724,7 +724,7 @@ public:
MachineInstrBuilder buildInsertVectorElement(unsigned Res, unsigned Val,
unsigned Elt, unsigned Idx);
- /// Build and insert \p Res<def> = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
+ /// Build and insert \p Res = G_EXTRACT_VECTOR_ELT \p Val, \p Idx
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Res must be a generic virtual register with scalar type.
@@ -735,7 +735,7 @@ public:
MachineInstrBuilder buildExtractVectorElement(unsigned Res, unsigned Val,
unsigned Idx);
- /// Build and insert `OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
+ /// Build and insert `OldValRes = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal,
/// MMO`.
///
/// Atomically replace the value at \p Addr with \p NewVal if it is currently
diff --git a/include/llvm/CodeGen/LivePhysRegs.h b/include/llvm/CodeGen/LivePhysRegs.h
index eb935bb7c06..f9aab0d09e1 100644
--- a/include/llvm/CodeGen/LivePhysRegs.h
+++ b/include/llvm/CodeGen/LivePhysRegs.h
@@ -20,11 +20,11 @@
/// register.
///
/// X86 Example:
-/// %ymm0<def> = ...
-/// %xmm0<def> = ... (Kills %xmm0, all %xmm0s sub-registers, and %ymm0)
+/// %ymm0 = ...
+/// %xmm0 = ... (Kills %xmm0, all %xmm0s sub-registers, and %ymm0)
///
-/// %ymm0<def> = ...
-/// %xmm0<def> = ..., %ymm0<imp-use> (%ymm0 and all its sub-registers are alive)
+/// %ymm0 = ...
+/// %xmm0 = ..., implicit %ymm0 (%ymm0 and all its sub-registers are alive)
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h
index 0730eb763e1..0c9110cbaa8 100644
--- a/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/include/llvm/CodeGen/MachineBasicBlock.h
@@ -702,8 +702,8 @@ public:
LQR_Unknown ///< Register liveness not decidable from local neighborhood.
};
- /// Return whether (physical) register \p Reg has been <def>ined and not
- /// <kill>ed as of just before \p Before.
+ /// Return whether (physical) register \p Reg has been defined and not
+ /// killed as of just before \p Before.
///
/// Search is localised to a neighborhood of \p Neighborhood instructions
/// before (searching for defs or kills) and \p Neighborhood instructions
diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h
index 6c899ca7ee0..cf8f5e5540a 100644
--- a/include/llvm/CodeGen/MachineInstr.h
+++ b/include/llvm/CodeGen/MachineInstr.h
@@ -44,6 +44,7 @@ class MachineRegisterInfo;
class ModuleSlotTracker;
class raw_ostream;
template <typename T> class SmallVectorImpl;
+class SmallBitVector;
class StringRef;
class TargetInstrInfo;
class TargetRegisterClass;
@@ -1220,6 +1221,15 @@ public:
/// Debugging support
/// @{
+ /// Determine the generic type to be printed (if needed) on uses and defs.
+ LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
+ const MachineRegisterInfo &MRI) const;
+
+ /// Return true when an instruction has tied register that can't be determined
+ /// by the instruction's descriptor. This is useful for MIR printing, to
+ /// determine whether we need to print the ties or not.
+ bool hasComplexRegisterTies() const;
+
/// Print this MI to \p OS.
/// Only print the defs and the opcode if \p SkipOpers is true.
/// Otherwise, also print operands if \p SkipDebugLoc is true.
diff --git a/include/llvm/CodeGen/MachineInstrBundle.h b/include/llvm/CodeGen/MachineInstrBundle.h
index 995c7001d92..b5341fd1ae4 100644
--- a/include/llvm/CodeGen/MachineInstrBundle.h
+++ b/include/llvm/CodeGen/MachineInstrBundle.h
@@ -150,7 +150,7 @@ public:
///
struct VirtRegInfo {
/// Reads - One of the operands read the virtual register. This does not
- /// include <undef> or <internal> use operands, see MO::readsReg().
+ /// include undef or internal use operands, see MO::readsReg().
bool Reads;
/// Writes - One of the operands writes the virtual register.
diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h
index 64889eb3a2d..757de85f158 100644
--- a/include/llvm/CodeGen/MachineOperand.h
+++ b/include/llvm/CodeGen/MachineOperand.h
@@ -17,6 +17,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
#include <cassert>
namespace llvm {
@@ -116,7 +117,7 @@ private:
/// the same register. In that case, the instruction may depend on those
/// operands reading the same dont-care value. For example:
///
- /// %1<def> = XOR %2<undef>, %2<undef>
+ /// %1 = XOR undef %2, undef %2
///
/// Any register can be used for %2, and its value doesn't matter, but
/// the two operands must be the same register.
@@ -226,11 +227,33 @@ public:
///
void clearParent() { ParentMI = nullptr; }
+ /// Print the MachineOperand to \p os.
+ /// Providing a valid \p TRI and \p IntrinsicInfo results in a more
+ /// target-specific printing. If \p TRI and \p IntrinsicInfo are null, the
+ /// function will try to pick it up from the parent.
void print(raw_ostream &os, const TargetRegisterInfo *TRI = nullptr,
const TargetIntrinsicInfo *IntrinsicInfo = nullptr) const;
- void print(raw_ostream &os, ModuleSlotTracker &MST,
- const TargetRegisterInfo *TRI = nullptr,
- const TargetIntrinsicInfo *IntrinsicInfo = nullptr) const;
+
+ /// More complex way of printing a MachineOperand.
+ /// \param TypeToPrint specifies the generic type to be printed on uses and
+ /// defs. It can be determined using MachineInstr::getTypeToPrint.
+ /// \param PrintDef - whether we want to print `def` on an operand which
+ /// isDef. Sometimes, if the operand is printed before '=', we don't print
+ /// `def`.
+ /// \param ShouldPrintRegisterTies - whether we want to print register ties.
+ /// Sometimes they are easily determined by the instruction's descriptor
+ /// (MachineInstr::hasComplexRegiterTies can determine if it's needed).
+ /// \param TiedOperandIdx - if we need to print register ties this needs to
+ /// provide the index of the tied register. If not, it will be ignored.
+ /// \param TRI - provide more target-specific information to the printer.
+ /// Unlike the previous function, this one will not try and get the
+ /// information from it's parent.
+ /// \param IntrinsicInfo - same as \p TRI.
+ void print(raw_ostream &os, ModuleSlotTracker &MST, LLT TypeToPrint,
+ bool PrintDef, bool ShouldPrintRegisterTies,
+ unsigned TiedOperandIdx, const TargetRegisterInfo *TRI,
+ const TargetIntrinsicInfo *IntrinsicInfo) const;
+
void dump() const;
//===--------------------------------------------------------------------===//
@@ -831,7 +854,7 @@ template <> struct DenseMapInfo<MachineOperand> {
};
inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand &MO) {
- MO.print(OS, nullptr);
+ MO.print(OS);
return OS;
}
diff --git a/include/llvm/CodeGen/TargetInstrInfo.h b/include/llvm/CodeGen/TargetInstrInfo.h
index c4a3865e7f0..2dc918be66d 100644
--- a/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/include/llvm/CodeGen/TargetInstrInfo.h
@@ -547,7 +547,7 @@ public:
/// Represents a predicate at the MachineFunction level. The control flow a
/// MachineBranchPredicate represents is:
///
- /// Reg <def>= LHS `Predicate` RHS == ConditionDef
+ /// Reg = LHS `Predicate` RHS == ConditionDef
/// if Reg then goto TrueDest else goto FalseDest
///
struct MachineBranchPredicate {
@@ -1432,7 +1432,7 @@ public:
/// For example, AVX instructions may copy part of a register operand into
/// the unused high bits of the destination register.
///
- /// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14
+ /// vcvtsi2sdq %rax, undef %xmm0, %xmm14
///
/// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
/// false dependence on any previous write to %xmm0.
diff --git a/include/llvm/CodeGen/TargetRegisterInfo.h b/include/llvm/CodeGen/TargetRegisterInfo.h
index cc612a42d75..81907538fb0 100644
--- a/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -1167,6 +1167,11 @@ Printable printRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
/// registers on a \ref raw_ostream.
Printable printVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
+/// \brief Create Printable object to print register classes or register banks
+/// on a \ref raw_ostream.
+Printable printRegClassOrBank(unsigned Reg, const MachineRegisterInfo &RegInfo,
+ const TargetRegisterInfo *TRI);
+
} // end namespace llvm
#endif // LLVM_CODEGEN_TARGETREGISTERINFO_H
diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/lib/CodeGen/AggressiveAntiDepBreaker.cpp
index 162e04fe4ce..ffcb9a09ad7 100644
--- a/lib/CodeGen/AggressiveAntiDepBreaker.cpp
+++ b/lib/CodeGen/AggressiveAntiDepBreaker.cpp
@@ -448,11 +448,11 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr &MI,
// FIXME: The issue with predicated instruction is more complex. We are being
// conservatively here because the kill markers cannot be trusted after
// if-conversion:
- // %r6<def> = LDR %sp, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // %r6 = LDR %sp, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
// ...
- // STR %r0, %r6<kill>, %reg0, 0, pred:0, pred:%cpsr; mem:ST4[%395]
- // %r6<def> = LDR %sp, %reg0, 100, pred:0, pred:%cpsr; mem:LD4[FixedStack12]
- // STR %r0, %r6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ // STR %r0, killed %r6, %reg0, 0, pred:0, pred:%cpsr; mem:ST4[%395]
+ // %r6 = LDR %sp, %reg0, 100, pred:0, pred:%cpsr; mem:LD4[FixedStack12]
+ // STR %r0, killed %r6, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
//
// The first R6 kill is not really a kill since it's killed by a predicated
// instruction which may not be executed. The second R6 def may or may not
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 1bc8b4eee0f..f1459d9d0a1 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -815,10 +815,8 @@ static void emitKill(const MachineInstr *MI, AsmPrinter &AP) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &Op = MI->getOperand(i);
assert(Op.isReg() && "KILL instruction must have only register operands");
- OS << ' '
- << printReg(Op.getReg(),
- AP.MF->getSubtarget().getRegisterInfo())
- << (Op.isDef() ? "<def>" : "<kill>");
+ OS << ' ' << (Op.isDef() ? "def " : "killed ")
+ << printReg(Op.getReg(), AP.MF->getSubtarget().getRegisterInfo());
}
AP.OutStreamer->AddComment(OS.str());
AP.OutStreamer->AddBlankLine();
diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp
index 92e73cb502c..99270ff4ea7 100644
--- a/lib/CodeGen/BranchFolding.cpp
+++ b/lib/CodeGen/BranchFolding.cpp
@@ -1968,7 +1968,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
//
// BB2:
// r1 = op2, ...
- // = op3, r1<kill>
+ // = op3, killed r1
IsSafe = false;
break;
}
diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 30918a98be0..98e22b24d37 100644
--- a/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -170,11 +170,11 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr &MI) {
// FIXME: The issue with predicated instruction is more complex. We are being
// conservative here because the kill markers cannot be trusted after
// if-conversion:
- // %r6<def> = LDR %sp, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
+ // %r6 = LDR %sp, %reg0, 92, pred:14, pred:%reg0; mem:LD4[FixedStack14]
// ...
- // STR %r0, %r6<kill>, %reg0, 0, pred:0, pred:%cpsr; mem:ST4[%395]
- // %r6<def> = LDR %sp, %reg0, 100, pred:0, pred:%cpsr; mem:LD4[FixedStack12]
- // STR %r0, %r6<kill>, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
+ // STR %r0, killed %r6, %reg0, 0, pred:0, pred:%cpsr; mem:ST4[%395]
+ // %r6 = LDR %sp, %reg0, 100, pred:0, pred:%cpsr; mem:LD4[FixedStack12]
+ // STR %r0, killed %r6, %reg0, 0, pred:14, pred:%reg0; mem:ST4[%396](align=8)
//
// The first R6 kill is not really a kill since it's killed by a predicated
// instruction which may not be executed. The second R6 def may or may not
diff --git a/lib/CodeGen/ExpandPostRAPseudos.cpp b/lib/CodeGen/ExpandPostRAPseudos.cpp
index dc5040471f3..6ef97d6dd5e 100644
--- a/lib/CodeGen/ExpandPostRAPseudos.cpp
+++ b/lib/CodeGen/ExpandPostRAPseudos.cpp
@@ -104,7 +104,7 @@ bool ExpandPostRA::LowerSubregToReg(MachineInstr *MI) {
if (DstSubReg == InsReg) {
// No need to insert an identity copy instruction.
// Watch out for case like this:
- // %rax<def> = SUBREG_TO_REG 0, %eax<kill>, 3
+ // %rax = SUBREG_TO_REG 0, killed %eax, 3
// We must leave %rax live.
if (DstReg != InsReg) {
MI->setDesc(TII->get(TargetOpcode::KILL));
diff --git a/lib/CodeGen/ImplicitNullChecks.cpp b/lib/CodeGen/ImplicitNullChecks.cpp
index 1962b4ca65d..308b6d293d3 100644
--- a/lib/CodeGen/ImplicitNullChecks.cpp
+++ b/lib/CodeGen/ImplicitNullChecks.cpp
@@ -421,7 +421,7 @@ bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI,
// test %rcx, %rcx
// je _null_block
// _non_null_block:
- // %rdx<def> = INST
+ // %rdx = INST
// ...
//
// This restriction does not apply to the faulting load inst because in
diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp
index aff6189283e..56f5a0c047c 100644
--- a/lib/CodeGen/InlineSpiller.cpp
+++ b/lib/CodeGen/InlineSpiller.cpp
@@ -360,7 +360,7 @@ bool InlineSpiller::isSibling(unsigned Reg) {
///
/// x = def
/// spill x
-/// y = use x<kill>
+/// y = use killed x
///
/// This hoist only helps when the copy kills its source.
///
diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp
index 06807542b34..d181fe83b88 100644
--- a/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -700,7 +700,7 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
//
// %eax = COPY %5
// FOO %5 <--- MI, cancel kill because %eax is live.
- // BAR %eax<kill>
+ // BAR killed %eax
//
// There should be no kill flag on FOO when %5 is rewritten as %eax.
for (auto &RUP : RU) {
@@ -721,7 +721,7 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
// Example:
// %1 = ... ; R32: %1
// %2:high16 = ... ; R64: %2
- // = read %2<kill> ; R64: %2
+ // = read killed %2 ; R64: %2
// = read %1 ; R32: %1
// The <kill> flag is correct for %2, but the register allocator may
// assign R0L to %1, and R0 to %2 because the low 32bits of R0
diff --git a/lib/CodeGen/LiveVariables.cpp b/lib/CodeGen/LiveVariables.cpp
index f9c5652e8a1..1c2bbc3df02 100644
--- a/lib/CodeGen/LiveVariables.cpp
+++ b/lib/CodeGen/LiveVariables.cpp
@@ -235,7 +235,7 @@ void LiveVariables::HandlePhysRegUse(unsigned Reg, MachineInstr &MI) {
// Otherwise, the last sub-register def implicitly defines this register.
// e.g.
// AH =
- // AL = ... <imp-def EAX>, <imp-kill AH>
+ // AL = ... implicit-def EAX, implicit killed AH
// = AH
// ...
// = EAX
@@ -321,17 +321,17 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
// AH =
//
// = AX
- // = AL, AX<imp-use, kill>
+ // = AL, implicit killed AX
// AX =
//
// Or whole register is defined, but not used at all.
- // AX<dead> =
+ // dead AX =
// ...
// AX =
//
// Or whole register is defined, but only partly used.
- // AX<dead> = AL<imp-def>
- // = AL<kill>
+ // dead AX = implicit-def AL
+ // = killed AL
// AX =
MachineInstr *LastPartDef = nullptr;
unsigned LastPartDefDist = 0;
@@ -364,7 +364,7 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
if (!PhysRegUse[Reg]) {
// Partial uses. Mark register def dead and add implicit def of
// sub-registers which are used.
- // EAX<dead> = op AL<imp-def>
+ // dead EAX = op implicit-def AL
// That is, EAX def is dead but AL def extends pass it.
PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true);
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp
index aa0f38036b1..e8a358e5209 100644
--- a/lib/CodeGen/MIRPrinter.cpp
+++ b/lib/CodeGen/MIRPrinter.cpp
@@ -164,7 +164,7 @@ public:
void printTargetFlags(const MachineOperand &Op);
void print(const MachineInstr &MI, unsigned OpIdx,
const TargetRegisterInfo *TRI, bool ShouldPrintRegisterTies,
- LLT TypeToPrint, bool IsDef = false);
+ LLT TypeToPrint, bool PrintDef = true);
void print(const LLVMContext &Context, const TargetInstrInfo &TII,
const MachineMemOperand &Op);
void printSyncScope(const LLVMContext &Context, SyncScope::ID SSID);
@@ -257,25 +257,11 @@ static void printCustomRegMask(const uint32_t *RegMask, raw_ostream &OS,
OS << ')';
}
-static void printRegClassOrBank(unsigned Reg, raw_ostream &OS,
- const MachineRegisterInfo &RegInfo,
- const TargetRegisterInfo *TRI) {
- if (RegInfo.getRegClassOrNull(Reg))
- OS << StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower();
- else if (RegInfo.getRegBankOrNull(Reg))
- OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
- else {
- OS << "_";
- assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
- "Generic registers must have a valid type");
- }
-}
-
static void printRegClassOrBank(unsigned Reg, yaml::StringValue &Dest,
const MachineRegisterInfo &RegInfo,
const TargetRegisterInfo *TRI) {
raw_string_ostream OS(Dest.Value);
- printRegClassOrBank(Reg, OS, RegInfo, TRI);
+ OS << printRegClassOrBank(Reg, RegInfo, TRI);
}
@@ -289,7 +275,7 @@ void MIRPrinter::convert(yaml::MachineFunction &MF,
unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
yaml::VirtualRegisterDefinition VReg;
VReg.ID = I;
- printRegClassOrBank(Reg, VReg.Class, RegInfo, TRI);
+ ::printRegClassOrBank(Reg, VReg.Class, RegInfo, TRI);
unsigned PreferredReg = RegInfo.getSimpleHint(Reg);
if (PreferredReg)
printRegMIR(PreferredReg, VReg.PreferredRegister, TRI);
@@ -661,44 +647,6 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
OS.indent(2) << "}\n";
}
-/// Return true when an instruction has tied register that can't be determined
-/// by the instruction's descriptor.
-static bool hasComplexRegisterTies(const MachineInstr &MI) {
- const MCInstrDesc &MCID = MI.getDesc();
- for (unsigned I = 0, E = MI.getNumOperands(); I < E; ++I) {
- const auto &Operand = MI.getOperand(I);
- if (!Operand.isReg() || Operand.isDef())
- // Ignore the defined registers as MCID marks only the uses as tied.
- continue;
- int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
- int TiedIdx = Operand.isTied() ? int(MI.findTiedOperandIdx(I)) : -1;
- if (ExpectedTiedIdx != TiedIdx)
- return true;
- }
- return false;
-}
-
-static LLT getTypeToPrint(const MachineInstr &MI, unsigned OpIdx,
- SmallBitVector &PrintedTypes,
- const MachineRegisterInfo &MRI) {
- const MachineOperand &Op = MI.getOperand(OpIdx);
- if (!Op.isReg())
- return LLT{};
-
- if (MI.isVariadic() || OpIdx >= MI.getNumExplicitOperands())
- return MRI.getType(Op.getReg());
-
- auto &OpInfo = MI.getDesc().OpInfo[OpIdx];
- if (!OpInfo.isGenericType())
- return MRI.getType(Op.getReg());
-
- if (PrintedTypes[OpInfo.getGenericTypeIndex()])
- return LLT{};
-
- PrintedTypes.set(OpInfo.getGenericTypeIndex());
- return MRI.getType(Op.getReg());
-}
-
void MIPrinter::print(const MachineInstr &MI) {
const auto *MF = MI.getMF();
const auto &MRI = MF->getRegInfo();
@@ -711,7 +659,7 @@ void MIPrinter::print(const MachineInstr &MI) {
assert(MI.getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
SmallBitVector PrintedTypes(8);
- bool ShouldPrintRegisterTies = hasComplexRegisterTies(MI);
+ bool ShouldPrintRegisterTies = MI.hasComplexRegisterTies();
unsigned I = 0, E = MI.getNumOperands();
for (; I < E && MI.getOperand(I).isReg() && MI.getOperand(I).isDef() &&
!MI.getOperand(I).isImplicit();
@@ -719,8 +667,8 @@ void MIPrinter::print(const MachineInstr &MI) {
if (I)
OS << ", ";
print(MI, I, TRI, ShouldPrintRegisterTies,
- getTypeToPrint(MI, I, PrintedTypes, MRI),
- /*IsDef=*/true);
+ MI.getTypeToPrint(I, PrintedTypes, MRI),
+ /*PrintDef=*/false);
}
if (I)
@@ -736,7 +684,7 @@ void MIPrinter::print(const MachineInstr &MI) {
if (NeedComma)
OS << ", ";
print(MI, I, TRI, ShouldPrintRegisterTies,
- getTypeToPrint(MI, I, PrintedTypes, MRI));
+ MI.getTypeToPrint(I, PrintedTypes, MRI));
NeedComma = true;
}
@@ -902,44 +850,17 @@ static const char *getTargetIndexName(const MachineFunction &MF, int Index) {
void MIPrinter::print(const MachineInstr &MI, unsigned OpIdx,
const TargetRegisterInfo *TRI,
bool ShouldPrintRegisterTies, LLT TypeToPrint,
- bool IsDef) {
+ bool PrintDef) {
const MachineOperand &Op = MI.getOperand(OpIdx);
printTargetFlags(Op);
switch (Op.getType()) {
case MachineOperand::MO_Register: {
- unsigned Reg = Op.getReg();
- if (Op.isImplicit())
- OS << (Op.isDef() ? "implicit-def " : "implicit ");
- else if (!IsDef && Op.isDef())
- // Print the 'def' flag only when the operand is defined after '='.
- OS << "def ";
- if (Op.isInternalRead())
- OS << "internal ";
- if (Op.isDead())
- OS << "dead ";
- if (Op.isKill())
- OS << "killed ";
- if (Op.isUndef())
- OS << "undef ";
- if (Op.isEarlyClobber())
- OS << "early-clobber ";
- if (Op.isDebug())
- OS << "debug-use ";
- OS << printReg(Reg, TRI);
- // Print the sub register.
- if (Op.getSubReg() != 0)
- OS << '.' << TRI->getSubRegIndexName(Op.getSubReg());
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- const MachineRegisterInfo &MRI = Op.getParent()->getMF()->getRegInfo();
- if (IsDef || MRI.def_empty(Reg)) {
- OS << ':';
- printRegClassOrBank(Reg, OS, MRI, TRI);
- }
- }
+ unsigned TiedOperandIdx = 0;
if (ShouldPrintRegisterTies && Op.isTied() && !Op.isDef())
- OS << "(tied-def " << Op.getParent()->findTiedOperandIdx(OpIdx) << ")";
- if (TypeToPrint.isValid())
- OS << '(' << TypeToPrint << ')';
+ TiedOperandIdx = Op.getParent()->findTiedOperandIdx(OpIdx);
+ const TargetIntrinsicInfo *TII = MI.getMF()->getTarget().getIntrinsicInfo();
+ Op.print(OS, MST, TypeToPrint, PrintDef, ShouldPrintRegisterTies,
+ TiedOperandIdx, TRI, TII);
break;
}
case MachineOperand::MO_Immediate:
diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp
index d26d53d87ca..da63b41858e 100644
--- a/lib/CodeGen/MachineCSE.cpp
+++ b/lib/CodeGen/MachineCSE.cpp
@@ -623,10 +623,10 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) {
// Go through implicit defs of CSMI and MI, and clear the kill flags on
// their uses in all the instructions between CSMI and MI.
// We might have made some of the kill flags redundant, consider:
- // subs ... %nzcv<imp-def> <- CSMI
- // csinc ... %nzcv<imp-use,kill> <- this kill flag isn't valid anymore
- // subs ... %nzcv<imp-def> <- MI, to be eliminated
- // csinc ... %nzcv<imp-use,kill>
+ // subs ... implicit-def %nzcv <- CSMI
+ // csinc ... implicit killed %nzcv <- this kill flag isn't valid anymore
+ // subs ... implicit-def %nzcv <- MI, to be eliminated
+ // csinc ... implicit killed %nzcv
// Since we eliminated MI, and reused a register imp-def'd by CSMI
// (here %nzcv), that register, if it was killed before MI, should have
// that kill flag removed, because it's lifetime was extended.
diff --git a/lib/CodeGen/MachineCopyPropagation.cpp b/lib/CodeGen/MachineCopyPropagation.cpp
index 1590b205def..8b4f9970b22 100644
--- a/lib/CodeGen/MachineCopyPropagation.cpp
+++ b/lib/CodeGen/MachineCopyPropagation.cpp
@@ -226,19 +226,19 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// The two copies cancel out and the source of the first copy
// hasn't been overridden, eliminate the second one. e.g.
- // %ecx<def> = COPY %eax
+ // %ecx = COPY %eax
// ... nothing clobbered eax.
- // %eax<def> = COPY %ecx
+ // %eax = COPY %ecx
// =>
- // %ecx<def> = COPY %eax
+ // %ecx = COPY %eax
//
// or
//
- // %ecx<def> = COPY %eax
+ // %ecx = COPY %eax
// ... nothing clobbered eax.
- // %ecx<def> = COPY %eax
+ // %ecx = COPY %eax
// =>
- // %ecx<def> = COPY %eax
+ // %ecx = COPY %eax
if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
continue;
@@ -262,11 +262,11 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// If 'Def' is previously source of another copy, then this earlier copy's
// source is no longer available. e.g.
- // %xmm9<def> = copy %xmm2
+ // %xmm9 = copy %xmm2
// ...
- // %xmm2<def> = copy %xmm0
+ // %xmm2 = copy %xmm0
// ...
- // %xmm2<def> = copy %xmm9
+ // %xmm2 = copy %xmm9
ClobberRegister(Def);
for (const MachineOperand &MO : MI->implicit_operands()) {
if (!MO.isReg() || !MO.isDef())
diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp
index 8bdc183fabb..464df33e6be 100644
--- a/lib/CodeGen/MachineInstr.cpp
+++ b/lib/CodeGen/MachineInstr.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
@@ -742,7 +743,7 @@ MachineInstr::readsWritesVirtualRegister(unsigned Reg,
if (MO.isUse())
Use |= !MO.isUndef();
else if (MO.getSubReg() && !MO.isUndef())
- // A partial <def,undef> doesn't count as reading the register.
+ // A partial def undef doesn't count as reading the register.
PartDef = true;
else
FullDef = true;
@@ -1163,6 +1164,41 @@ void MachineInstr::copyImplicitOps(MachineFunction &MF,
}
}
+bool MachineInstr::hasComplexRegisterTies() const {
+ const MCInstrDesc &MCID = getDesc();
+ for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
+ const auto &Operand = getOperand(I);
+ if (!Operand.isReg() || Operand.isDef())
+ // Ignore the defined registers as MCID marks only the uses as tied.
+ continue;
+ int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
+ int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
+ if (ExpectedTiedIdx != TiedIdx)
+ return true;
+ }
+ return false;
+}
+
+LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
+ const MachineRegisterInfo &MRI) const {
+ const MachineOperand &Op = getOperand(OpIdx);
+ if (!Op.isReg())
+ return LLT{};
+
+ if (isVariadic() || OpIdx >= getNumExplicitOperands())
+ return MRI.getType(Op.getReg());
+
+ auto &OpInfo = getDesc().OpInfo[OpIdx];
+ if (!OpInfo.isGenericType())
+ return MRI.getType(Op.getReg());
+
+ if (PrintedTypes[OpInfo.getGenericTypeIndex()])
+ return LLT{};
+
+ PrintedTypes.set(OpInfo.getGenericTypeIndex());
+ return MRI.getType(Op.getReg());
+}
+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void MachineInstr::dump() const {
dbgs() << " ";
@@ -1204,21 +1240,31 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
// Save a list of virtual registers.
SmallVector<unsigned, 8> VirtRegs;
+ SmallBitVector PrintedTypes(8);
+ bool ShouldPrintRegisterTies = hasComplexRegisterTies();
+ auto getTiedOperandIdx = [&](unsigned OpIdx) {
+ if (!ShouldPrintRegisterTies)
+ return 0U;
+ const MachineOperand &MO = getOperand(OpIdx);
+ if (MO.isReg() && MO.isTied() && !MO.isDef())
+ return findTiedOperandIdx(OpIdx);
+ return 0U;
+ };
// Print explicitly defined operands on the left of an assignment syntax.
unsigned StartOp = 0, e = getNumOperands();
for (; StartOp < e && getOperand(StartOp).isReg() &&
- getOperand(StartOp).isDef() &&
- !getOperand(StartOp).isImplicit();
+ getOperand(StartOp).isDef() && !getOperand(StartOp).isImplicit();
++StartOp) {
- if (StartOp != 0) OS << ", ";
- getOperand(StartOp).print(OS, MST, TRI, IntrinsicInfo);
+ if (StartOp != 0)
+ OS << ", ";
+ LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
+ unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
+ getOperand(StartOp).print(OS, MST, TypeToPrint, /*PrintDef=*/false,
+ ShouldPrintRegisterTies, TiedOperandIdx, TRI,
+ IntrinsicInfo);
unsigned Reg = getOperand(StartOp).getReg();
- if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
VirtRegs.push_back(Reg);
- LLT Ty = MRI ? MRI->getType(Reg) : LLT{};
- if (Ty.isValid())
- OS << '(' << Ty << ')';
- }
}
if (StartOp != 0)
@@ -1241,7 +1287,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
// Print asm string.
OS << " ";
- getOperand(InlineAsm::MIOp_AsmString).print(OS, MST, TRI);
+ const unsigned OpIdx = InlineAsm::MIOp_AsmString;
+ LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
+ unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
+ getOperand(OpIdx).print(OS, MST, TypeToPrint, /*PrintDef=*/true,
+ ShouldPrintRegisterTies, TiedOperandIdx, TRI,
+ IntrinsicInfo);
// Print HasSideEffects, MayLoad, MayStore, IsAlignStack
unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
@@ -1284,8 +1335,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
if (DIV && !DIV->getName().empty())
OS << "!\"" << DIV->getName() << '\"';
- else
- MO.print(OS, MST, TRI);
+ else {
+ LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
+ unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
+ MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true,
+ ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
+ }
} else if (TRI && (isInsertSubreg() || isRegSequence() ||
(isSubregToReg() && i == 3)) && MO.isImm()) {
OS << TRI->getSubRegIndexName(MO.getImm());
@@ -1347,8 +1402,12 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
// Compute the index of the next operand descriptor.
AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag);
- } else
- MO.print(OS, MST, TRI);
+ } else {
+ LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
+ unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
+ MO.print(OS, MST, TypeToPrint, /*PrintDef=*/true, ShouldPrintRegisterTies,
+ TiedOperandIdx, TRI, IntrinsicInfo);
+ }
}
bool HaveSemi = false;
diff --git a/lib/CodeGen/MachineOperand.cpp b/lib/CodeGen/MachineOperand.cpp
index def4d682dec..f5857db8ada 100644
--- a/lib/CodeGen/MachineOperand.cpp
+++ b/lib/CodeGen/MachineOperand.cpp
@@ -15,10 +15,11 @@
#include "llvm/Analysis/Loads.h"
#include "llvm/CodeGen/MIRPrinter.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/ModuleSlotTracker.h"
+#include "llvm/Target/TargetIntrinsicInfo.h"
+#include "llvm/Target/TargetMachine.h"
using namespace llvm;
@@ -333,75 +334,86 @@ hash_code llvm::hash_value(const MachineOperand &MO) {
llvm_unreachable("Invalid machine operand type");
}
+// Try to crawl up to the machine function and get TRI and IntrinsicInfo from
+// it.
+static void tryToGetTargetInfo(const MachineOperand &MO,
+ const TargetRegisterInfo *&TRI,
+ const TargetIntrinsicInfo *&IntrinsicInfo) {
+ if (const MachineInstr *MI = MO.getParent()) {
+ if (const MachineBasicBlock *MBB = MI->getParent()) {
+ if (const MachineFunction *MF = MBB->getParent()) {
+ TRI = MF->getSubtarget().getRegisterInfo();
+ IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
+ }
+ }
+ }
+}
+
void MachineOperand::print(raw_ostream &OS, const TargetRegisterInfo *TRI,
const TargetIntrinsicInfo *IntrinsicInfo) const {
+ tryToGetTargetInfo(*this, TRI, IntrinsicInfo);
ModuleSlotTracker DummyMST(nullptr);
- print(OS, DummyMST, TRI, IntrinsicInfo);
+ print(OS, DummyMST, LLT{}, /*PrintDef=*/false,
+ /*ShouldPrintRegisterTies=*/true,
+ /*TiedOperandIdx=*/0, TRI, IntrinsicInfo);
}
void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
+ LLT TypeToPrint, bool PrintDef,
+ bool ShouldPrintRegisterTies,
+ unsigned TiedOperandIdx,
const TargetRegisterInfo *TRI,
const TargetIntrinsicInfo *IntrinsicInfo) const {
switch (getType()) {
- case MachineOperand::MO_Register:
- OS << printReg(getReg(), TRI, getSubReg());
-
- if (isDef() || isKill() || isDead() || isImplicit() || isUndef() ||
- isInternalRead() || isEarlyClobber() || isTied()) {
- OS << '<';
- bool NeedComma = false;
- if (isDef()) {
- if (NeedComma)
- OS << ',';
- if (isEarlyClobber())
- OS << "earlyclobber,";
- if (isImplicit())
- OS << "imp-";
- OS << "def";
- NeedComma = true;
- // <def,read-undef> only makes sense when getSubReg() is set.
- // Don't clutter the output otherwise.
- if (isUndef() && getSubReg())
- OS << ",read-undef";
- } else if (isImplicit()) {
- OS << "imp-use";
- NeedComma = true;
- }
-
- if (isKill()) {
- if (NeedComma)
- OS << ',';
- OS << "kill";
- NeedComma = true;
- }
- if (isDead()) {
- if (NeedComma)
- OS << ',';
- OS << "dead";
- NeedComma = true;
- }
- if (isUndef() && isUse()) {
- if (NeedComma)
- OS << ',';
- OS << "undef";
- NeedComma = true;
- }
- if (isInternalRead()) {
- if (NeedComma)
- OS << ',';
- OS << "internal";
- NeedComma = true;
- }
- if (isTied()) {
- if (NeedComma)
- OS << ',';
- OS << "tied";
- if (TiedTo != 15)
- OS << unsigned(TiedTo - 1);
+ case MachineOperand::MO_Register: {
+ unsigned Reg = getReg();
+ if (isImplicit())
+ OS << (isDef() ? "implicit-def " : "implicit ");
+ else if (PrintDef && isDef())
+ // Print the 'def' flag only when the operand is defined after '='.
+ OS << "def ";
+ if (isInternalRead())
+ OS << "internal ";
+ if (isDead())
+ OS << "dead ";
+ if (isKill())
+ OS << "killed ";
+ if (isUndef())
+ OS << "undef ";
+ if (isEarlyClobber())
+ OS << "early-clobber ";
+ if (isDebug())
+ OS << "debug-use ";
+ OS << printReg(Reg, TRI);
+ // Print the sub register.
+ if (unsigned SubReg = getSubReg()) {
+ if (TRI)
+ OS << '.' << TRI->getSubRegIndexName(SubReg);
+ else
+ OS << ".subreg" << SubReg;
+ }
+ // Print the register class / bank.
+ if (TargetRegisterInfo::isVirtualRegister(Reg)) {
+ if (const MachineInstr *MI = getParent()) {
+ if (const MachineBasicBlock *MBB = MI->getParent()) {
+ if (const MachineFunction *MF = MBB->getParent()) {
+ const MachineRegisterInfo &MRI = MF->getRegInfo();
+ if (!PrintDef || MRI.def_empty(Reg)) {
+ OS << ':';
+ OS << printRegClassOrBank(Reg, MRI, TRI);
+ }
+ }
+ }
}
- OS << '>';
}
+ // Print ties.
+ if (ShouldPrintRegisterTies && isTied() && !isDef())
+ OS << "(tied-def " << TiedOperandIdx << ")";
+ // Print types.
+ if (TypeToPrint.isValid())
+ OS << '(' << TypeToPrint << ')';
break;
+ }
case MachineOperand::MO_Immediate:
OS << getImm();
break;
@@ -475,23 +487,27 @@ void MachineOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
OS << '>';
break;
case MachineOperand::MO_RegisterMask: {
- unsigned NumRegsInMask = 0;
- unsigned NumRegsEmitted = 0;
OS << "<regmask";
- for (unsigned i = 0; i < TRI->getNumRegs(); ++i) {
- unsigned MaskWord = i / 32;
- unsigned MaskBit = i % 32;
- if (getRegMask()[MaskWord] & (1 << MaskBit)) {
- if (PrintRegMaskNumRegs < 0 ||
- NumRegsEmitted <= static_cast<unsigned>(PrintRegMaskNumRegs)) {
- OS << " " << printReg(i, TRI);
- NumRegsEmitted++;
+ if (TRI) {
+ unsigned NumRegsInMask = 0;
+ unsigned NumRegsEmitted = 0;
+ for (unsigned i = 0; i < TRI->getNumRegs(); ++i) {
+ unsigned MaskWord = i / 32;
+ unsigned MaskBit = i % 32;
+ if (getRegMask()[MaskWord] & (1 << MaskBit)) {
+ if (PrintRegMaskNumRegs < 0 ||
+ NumRegsEmitted <= static_cast<unsigned>(PrintRegMaskNumRegs)) {
+ OS << " " << printReg(i, TRI);
+ NumRegsEmitted++;
+ }
+ NumRegsInMask++;
}
- NumRegsInMask++;
}
+ if (NumRegsEmitted != NumRegsInMask)
+ OS << " and " << (NumRegsInMask - NumRegsEmitted) << " more...";
+ } else {
+ OS << " ...";
}
- if (NumRegsEmitted != NumRegsInMask)
- OS << " and " << (NumRegsInMask - NumRegsEmitted) << " more...";
OS << ">";
break;
}
diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp
index 7857084c4e6..f932df71cdb 100644
--- a/lib/CodeGen/MachineSink.cpp
+++ b/lib/CodeGen/MachineSink.cpp
@@ -246,14 +246,14 @@ MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
// %bb.1: derived from LLVM BB %bb4.preheader
// Predecessors according to CFG: %bb.0
// ...
- // %reg16385<def> = DEC64_32r %reg16437, %eflags<imp-def,dead>
+ // %reg16385 = DEC64_32r %reg16437, implicit-def dead %eflags
// ...
- // JE_4 <%bb.37>, %eflags<imp-use>
+ // JE_4 <%bb.37>, implicit %eflags
// Successors according to CFG: %bb.37 %bb.2
//
// %bb.2: derived from LLVM BB %bb.nph
// Predecessors according to CFG: %bb.0 %bb.1
- // %reg16386<def> = PHI %reg16434, %bb.0, %reg16385, %bb.1
+ // %reg16386 = PHI %reg16434, %bb.0, %reg16385, %bb.1
BreakPHIEdge = true;
for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
MachineInstr *UseInst = MO.getParent();
diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp
index 6834059234e..d5658db161a 100644
--- a/lib/CodeGen/MachineVerifier.cpp
+++ b/lib/CodeGen/MachineVerifier.cpp
@@ -1961,7 +1961,7 @@ void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
if (MOI->isDef()) {
if (Sub != 0) {
hasSubRegDef = true;
- // An operand %0:sub0<def> reads %0:sub1..n. Invert the lane
+ // An operand %0:sub0 reads %0:sub1..n. Invert the lane
// mask for subregister defs. Read-undef defs will be handled by
// readsReg below.
SLM = ~SLM;
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index f26f43d79f2..97011d55d89 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -272,7 +272,7 @@ void RegAllocFast::addKillFlag(const LiveReg &LR) {
// subreg of this register and given we don't track which
// lanes are actually dead, we cannot insert a kill flag here.
// Otherwise we may end up in a situation like this:
- // ... = (MO) physreg:sub1, physreg <implicit-use, kill>
+ // ... = (MO) physreg:sub1, implicit killed physreg
// ... <== Here we would allow later pass to reuse physreg:sub1
// which is potentially wrong.
// LR:sub0 = ...
@@ -675,7 +675,7 @@ RegAllocFast::LiveRegMap::iterator RegAllocFast::reloadVirtReg(MachineInstr &MI,
} else if (MO.isKill()) {
// We must remove kill flags from uses of reloaded registers because the
// register would be killed immediately, and there might be a second use:
- // %foo = OR %x<kill>, %x
+ // %foo = OR killed %x, %x
// This would cause a second reload of %x into a different register.
DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n");
MO.setIsKill(false);
diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp
index 09875d336fd..685271baa4a 100644
--- a/lib/CodeGen/RegisterCoalescer.cpp
+++ b/lib/CodeGen/RegisterCoalescer.cpp
@@ -667,7 +667,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
// its other operand is coalesced to the copy dest register, see if we can
// transform the copy into a noop by commuting the definition. For example,
//
- // A3 = op A2 B0<kill>
+ // A3 = op A2 killed B0
// ...
// B1 = A3 <- this copy
// ...
@@ -675,7 +675,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
//
// ==>
//
- // B2 = op B0 A2<kill>
+ // B2 = op B0 killed A2
// ...
// B1 = B2 <- now an identity copy
// ...
@@ -768,7 +768,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP,
// ...
// B = A
// ...
- // C = A<kill>
+ // C = killed A
// ...
// = B
@@ -1254,7 +1254,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// Make sure that the subrange for resultant undef is removed
// For example:
// %1:sub1<def,read-undef> = LOAD CONSTANT 1
- // %2<def> = COPY %1
+ // %2 = COPY %1
// ==>
// %2:sub1<def, read-undef> = LOAD CONSTANT 1
// ; Correct but need to remove the subrange for %2:sub0
@@ -1297,7 +1297,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP,
// = somedef %1 ; %1 GR8
// =>
// %1 = somedef ; %1 GR8
- // ECX<def, dead> = remat ; CL<imp-def>
+ // dead ECX = remat ; implicit-def CL
// = somedef %1 ; %1 GR8
// %1 will see the inteferences with CL but not with CH since
// no live-ranges would have been created for ECX.
@@ -1352,7 +1352,7 @@ bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI) {
// ProcessImpicitDefs may leave some copies of <undef> values, it only removes
// local variables. When we have a copy like:
//
- // %1 = COPY %2<undef>
+ // %1 = COPY undef %2
//
// We delete the copy and remove the corresponding value number from %1.
// Any uses of that value number are marked as <undef>.
@@ -1927,7 +1927,7 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
//
// %dst:ssub0<def,read-undef> = FOO
// %src = BAR
-// %dst:ssub1<def> = COPY %src
+// %dst:ssub1 = COPY %src
//
// The live range of %src overlaps the %dst value defined by FOO, but
// merging %src into %dst:ssub1 is only going to clobber the ssub1 lane
@@ -1942,9 +1942,9 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
// is live, but never read. This can happen because we don't compute
// individual live ranges per lane.
//
-// %dst<def> = FOO
+// %dst = FOO
// %src = BAR
-// %dst:ssub1<def> = COPY %src
+// %dst:ssub1 = COPY %src
//
// This kind of interference is only resolved locally. If the clobbered
// lane value escapes the block, the join is aborted.
@@ -2287,7 +2287,7 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
//
// This adds ssub1 to the set of valid lanes in %src:
//
- // %src:ssub1<def> = FOO
+ // %src:ssub1 = FOO
//
// This leaves only ssub1 valid, making any other lanes undef:
//
@@ -2425,9 +2425,9 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
//
// 1 %dst:ssub0 = FOO <-- OtherVNI
// 2 %src = BAR <-- VNI
- // 3 %dst:ssub1 = COPY %src<kill> <-- Eliminate this copy.
- // 4 BAZ %dst<kill>
- // 5 QUUX %src<kill>
+ // 3 %dst:ssub1 = COPY killed %src <-- Eliminate this copy.
+ // 4 BAZ killed %dst
+ // 5 QUUX killed %src
//
// Here OtherVNI will map to itself in [1;2), but to VNI in [2;5). CR_Replace
// handles this complex value mapping.
@@ -2437,7 +2437,7 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) {
// If the other live range is killed by DefMI and the live ranges are still
// overlapping, it must be because we're looking at an early clobber def:
//
- // %dst<def,early-clobber> = ASM %src<kill>
+ // %dst<def,early-clobber> = ASM killed %src
//
// In this case, it is illegal to merge the two live ranges since the early
// clobber def would clobber %src before it was read.
@@ -2682,7 +2682,7 @@ void JoinVals::pruneValues(JoinVals &Other,
if (!Def.isBlock()) {
if (changeInstrs) {
// Remove <def,read-undef> flags. This def is now a partial redef.
- // Also remove <def,dead> flags since the joined live range will
+ // Also remove dead flags since the joined live range will
// continue past this instruction.
for (MachineOperand &MO :
Indexes->getInstructionFromIndex(Def)->operands()) {
diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp
index 5aeec854dad..97967124add 100644
--- a/lib/CodeGen/RegisterScavenging.cpp
+++ b/lib/CodeGen/RegisterScavenging.cpp
@@ -213,7 +213,7 @@ void RegScavenger::forward() {
continue;
if (!isRegUsed(Reg)) {
// Check if it's partial live: e.g.
- // D0 = insert_subreg D0<undef>, S0
+ // D0 = insert_subreg undef D0, S0
// ... D0
// The problem is the insert_subreg could be eliminated. The use of
// D0 is using a partially undef value. This is not *incorrect* since
diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp
index fc85ea3d166..dade05ce515 100644
--- a/lib/CodeGen/SplitKit.cpp
+++ b/lib/CodeGen/SplitKit.cpp
@@ -1379,7 +1379,7 @@ void SplitEditor::rewriteAssigned(bool ExtendRanges) {
// for a partially defined original register. For example:
// %0:subreg_hireg<def,read-undef> = ...
// ...
- // %1<def> = COPY %0
+ // %1 = COPY %0
if (S.empty())
continue;
SubLRC.reset(&VRM.getMachineFunction(), LIS.getSlotIndexes(), &MDT,
diff --git a/lib/CodeGen/TargetRegisterInfo.cpp b/lib/CodeGen/TargetRegisterInfo.cpp
index dfda313f233..f255ba4fef9 100644
--- a/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/lib/CodeGen/TargetRegisterInfo.cpp
@@ -144,6 +144,21 @@ Printable printVRegOrUnit(unsigned Unit, const TargetRegisterInfo *TRI) {
});
}
+Printable printRegClassOrBank(unsigned Reg, const MachineRegisterInfo &RegInfo,
+ const TargetRegisterInfo *TRI) {
+ return Printable([Reg, &RegInfo, TRI](raw_ostream &OS) {
+ if (RegInfo.getRegClassOrNull(Reg))
+ OS << StringRef(TRI->getRegClassName(RegInfo.getRegClass(Reg))).lower();
+ else if (RegInfo.getRegBankOrNull(Reg))
+ OS << StringRef(RegInfo.getRegBankOrNull(Reg)->getName()).lower();
+ else {
+ OS << "_";
+ assert((RegInfo.def_empty(Reg) || RegInfo.getType(Reg).isValid()) &&
+ "Generic registers must have a valid type");
+ }
+ });
+}
+
} // end namespace llvm
/// getAllocatableClass - Return the maximal subclass of the given register
diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp
index c51340766b7..cd4391232c1 100644
--- a/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -458,8 +458,8 @@ static bool isPlainlyKilled(MachineInstr *MI, unsigned Reg,
/// For example, in this code:
///
/// %reg1034 = copy %reg1024
-/// %reg1035 = copy %reg1025<kill>
-/// %reg1036 = add %reg1034<kill>, %reg1035<kill>
+/// %reg1035 = copy killed %reg1025
+/// %reg1036 = add killed %reg1034, killed %reg1035
///
/// %reg1034 is not considered to be killed, since it is copied from a
/// register which is not killed. Treating it as not killed lets the
@@ -591,31 +591,31 @@ isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC,
// general, we want no uses between this instruction and the definition of
// the two-address register.
// e.g.
- // %reg1028<def> = EXTRACT_SUBREG %reg1027<kill>, 1
- // %reg1029<def> = MOV8rr %reg1028
- // %reg1029<def> = SHR8ri %reg1029, 7, %eflags<imp-def,dead>
- // insert => %reg1030<def> = MOV8rr %reg1028
- // %reg1030<def> = ADD8rr %reg1028<kill>, %reg1029<kill>, %eflags<imp-def,dead>
+ // %reg1028 = EXTRACT_SUBREG killed %reg1027, 1
+ // %reg1029 = MOV8rr %reg1028
+ // %reg1029 = SHR8ri %reg1029, 7, implicit dead %eflags
+ // insert => %reg1030 = MOV8rr %reg1028
+ // %reg1030 = ADD8rr killed %reg1028, killed %reg1029, implicit dead %eflags
// In this case, it might not be possible to coalesce the second MOV8rr
// instruction if the first one is coalesced. So it would be profitable to
// commute it:
- // %reg1028<def> = EXTRACT_SUBREG %reg1027<kill>, 1
- // %reg1029<def> = MOV8rr %reg1028
- // %reg1029<def> = SHR8ri %reg1029, 7, %eflags<imp-def,dead>
- // insert => %reg1030<def> = MOV8rr %reg1029
- // %reg1030<def> = ADD8rr %reg1029<kill>, %reg1028<kill>, %eflags<imp-def,dead>
+ // %reg1028 = EXTRACT_SUBREG killed %reg1027, 1
+ // %reg1029 = MOV8rr %reg1028
+ // %reg1029 = SHR8ri %reg1029, 7, implicit dead %eflags
+ // insert => %reg1030 = MOV8rr %reg1029
+ // %reg1030 = ADD8rr killed %reg1029, killed %reg1028, implicit dead %eflags
if (!isPlainlyKilled(MI, regC, LIS))
return false;
// Ok, we have something like:
- // %reg1030<def> = ADD8rr %reg1028<kill>, %reg1029<kill>, %eflags<imp-def,dead>
+ // %reg1030 = ADD8rr killed %reg1028, killed %reg1029, implicit dead %eflags
// let's see if it's worth commuting it.
// Look for situations like this:
- // %reg1024<def> = MOV r1
- // %reg1025<def> = MOV r0
- // %reg1026<def> = ADD %reg1024, %reg1025
+ // %reg1024 = MOV r1
+ // %reg1025 = MOV r0
+ // %reg1026 = ADD %reg1024, %reg1025
// r0 = MOV %reg1026
// Commute the ADD to hopefully eliminate an otherwise unavoidable copy.
unsigned ToRegA = getMappedReg(regA, DstRegMap);
@@ -713,9 +713,9 @@ bool TwoAddressInstructionPass::commuteInstruction(MachineInstr *MI,
bool
TwoAddressInstructionPass::isProfitableToConv3Addr(unsigned RegA,unsigned RegB){
// Look for situations like this:
- // %reg1024<def> = MOV r1
- // %reg1025<def> = MOV r0
- // %reg1026<def> = ADD %reg1024, %reg1025
+ // %reg1024 = MOV r1
+ // %reg1025 = MOV r0
+ // %reg1026 = ADD %reg1024, %reg1025
// r2 = MOV %reg1026
// Turn ADD into a 3-address instruction to avoid a copy.
unsigned FromRegB = getMappedReg(RegB, SrcRegMap);
@@ -1466,7 +1466,7 @@ collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) {
assert(SrcReg && SrcMO.isUse() && "two address instruction invalid");
- // Deal with <undef> uses immediately - simply rewrite the src operand.
+ // Deal with undef uses immediately - simply rewrite the src operand.
if (SrcMO.isUndef() && !DstMO.getSubReg()) {
// Constrain the DstReg register class if required.
if (TargetRegisterInfo::isVirtualRegister(DstReg))
@@ -1778,8 +1778,8 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
///
/// Becomes:
///
-/// %dst:ssub0<def,undef> = COPY %v1
-/// %dst:ssub1<def> = COPY %v2
+/// undef %dst:ssub0 = COPY %v1
+/// %dst:ssub1 = COPY %v2
void TwoAddressInstructionPass::
eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
MachineInstr &MI = *MBBI;
@@ -1803,7 +1803,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
MachineOperand &UseMO = MI.getOperand(i);
unsigned SrcReg = UseMO.getReg();
unsigned SubIdx = MI.getOperand(i+1).getImm();
- // Nothing needs to be inserted for <undef> operands.
+ // Nothing needs to be inserted for undef operands.
if (UseMO.isUndef())
continue;
@@ -1825,7 +1825,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) {
.addReg(DstReg, RegState::Define, SubIdx)
.add(UseMO);
- // The first def needs an <undef> flag because there is no live register
+ // The first def needs an undef flag because there is no live register
// before it.
if (!DefEmitted) {
CopyMI->getOperand(0).setIsUndef(true);
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp
index df950b5d317..6e5674bb8bc 100644
--- a/lib/CodeGen/VirtRegMap.cpp
+++ b/lib/CodeGen/VirtRegMap.cpp
@@ -380,8 +380,8 @@ void VirtRegRewriter::handleIdentityCopy(MachineInstr &MI) const {
++NumIdCopies;
// Copies like:
- // %r0 = COPY %r0<undef>
- // %al = COPY %al, %eax<imp-def>
+ // %r0 = COPY undef %r0
+ // %al = COPY %al, implicit-def %eax
// give us additional liveness information: The target (super-)register
// must not be valid before this point. Replace the COPY with a KILL
// instruction to maintain this information.
@@ -488,7 +488,7 @@ void VirtRegRewriter::rewrite() {
if (SubReg != 0) {
if (NoSubRegLiveness) {
// A virtual register kill refers to the whole register, so we may
- // have to add <imp-use,kill> operands for the super-register. A
+ // have to add implicit killed operands for the super-register. A
// partial redef always kills and redefines the super-register.
if ((MO.readsReg() && (MO.isDef() || MO.isKill())) ||
(MO.isDef() && subRegLiveThrough(*MI, PhysReg)))
@@ -513,9 +513,9 @@ void VirtRegRewriter::rewrite() {
}
}
- // The <def,undef> and <def,internal> flags only make sense for
+ // The def undef and def internal flags only make sense for
// sub-register defs, and we are substituting a full physreg. An
- // <imp-use,kill> operand from the SuperKills list will represent the
+ // implicit killed operand from the SuperKills list will represent the
// partial read of the super-register.
if (MO.isDef()) {
MO.setIsUndef(false);
diff --git a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
index 2d510a48d1c..1135f0f1262 100644
--- a/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
+++ b/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp
@@ -161,9 +161,9 @@ namespace {
/// A Chain is a sequence of instructions that are linked together by
/// an accumulation operand. For example:
///
-/// fmul d0<def>, ?
-/// fmla d1<def>, ?, ?, d0<kill>
-/// fmla d2<def>, ?, ?, d1<kill>
+/// fmul def d0, ?
+/// fmla def d1, ?, ?, killed d0
+/// fmla def d2, ?, ?, killed d1
///
/// There may be other instructions interleaved in the sequence that
/// do not belong to the chain. These other instructions must not use
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index dacb19330c1..b88beda4d6b 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2801,7 +2801,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
LiveIntervals *LIS) const {
// This is a bit of a hack. Consider this instruction:
//
- // %0<def> = COPY %sp; GPR64all:%0
+ // %0 = COPY %sp; GPR64all:%0
//
// We explicitly chose GPR64all for the virtual register so such a copy might
// be eliminated by RegisterCoalescer. However, that may not be possible, and
@@ -2830,7 +2830,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// Handle the case where a copy is being spilled or filled but the source
// and destination register class don't match. For example:
//
- // %0<def> = COPY %xzr; GPR64common:%0
+ // %0 = COPY %xzr; GPR64common:%0
//
// In this case we can still safely fold away the COPY and generate the
// following spill code:
@@ -2840,7 +2840,7 @@ MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
// This also eliminates spilled cross register class COPYs (e.g. between x and
// d regs) of the same size. For example:
//
- // %0<def> = COPY %1; GPR64:%0, FPR64:%1
+ // %0 = COPY %1; GPR64:%0, FPR64:%1
//
// will be filled as
//
diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index de912244eeb..c406228b7fe 100644
--- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -830,8 +830,8 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
if (SExtIdx != -1) {
// Generate the sign extension for the proper result of the ldp.
// I.e., with X1, that would be:
- // %w1<def> = KILL %w1, %x1<imp-def>
- // %x1<def> = SBFMXri %x1<kill>, 0, 31
+ // %w1 = KILL %w1, implicit-def %x1
+ // %x1 = SBFMXri killed %x1, 0, 31
MachineOperand &DstMO = MIB->getOperand(SExtIdx);
// Right now, DstMO has the extended register, since it comes from an
// extended opcode.
diff --git a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
index 5ff82c5d1e0..20918233e44 100644
--- a/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
+++ b/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp
@@ -1450,8 +1450,7 @@ bool AMDGPUMachineCFGStructurizer::shrinkPHI(MachineInstr &PHI,
unsigned *ReplaceReg) {
DEBUG(dbgs() << "Shrink PHI: ");
DEBUG(PHI.dump());
- DEBUG(dbgs() << " to " << printReg(getPHIDestReg(PHI), TRI)
- << "<def> = PHI(");
+ DEBUG(dbgs() << " to " << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
bool Replaced = false;
unsigned NumInputs = getPHINumInputs(PHI);
@@ -1507,8 +1506,7 @@ void AMDGPUMachineCFGStructurizer::replacePHI(
SmallVector<unsigned, 2> &PHIRegionIndices) {
DEBUG(dbgs() << "Replace PHI: ");
DEBUG(PHI.dump());
- DEBUG(dbgs() << " with " << printReg(getPHIDestReg(PHI), TRI)
- << "<def> = PHI(");
+ DEBUG(dbgs() << " with " << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
bool HasExternalEdge = false;
unsigned NumInputs = getPHINumInputs(PHI);
@@ -1566,7 +1564,7 @@ void AMDGPUMachineCFGStructurizer::replaceEntryPHI(
DEBUG(dbgs() << " register " << printReg(CombinedSourceReg, TRI) << "\n");
PHI.eraseFromParent();
} else {
- DEBUG(dbgs() << printReg(getPHIDestReg(PHI), TRI) << "<def> = PHI(");
+ DEBUG(dbgs() << printReg(getPHIDestReg(PHI), TRI) << " = PHI(");
MachineBasicBlock *MBB = PHI.getParent();
MachineInstrBuilder MIB =
BuildMI(*MBB, PHI, PHI.getDebugLoc(), TII->get(TargetOpcode::PHI),
@@ -1751,7 +1749,7 @@ void AMDGPUMachineCFGStructurizer::insertMergePHI(MachineBasicBlock *IfBB,
return;
}
DEBUG(dbgs() << "Merge PHI (" << printMBBReference(*MergeBB)
- << "): " << printReg(DestRegister, TRI) << "<def> = PHI("
+ << "): " << printReg(DestRegister, TRI) << " = PHI("
<< printReg(IfSourceRegister, TRI) << ", "
<< printMBBReference(*IfBB) << printReg(CodeSourceRegister, TRI)
<< ", " << printMBBReference(*CodeBB) << ")\n");
@@ -2147,7 +2145,7 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio
const DebugLoc &DL = Entry->findDebugLoc(Entry->begin());
MachineInstrBuilder MIB = BuildMI(*Entry, Entry->instr_begin(), DL,
TII->get(TargetOpcode::PHI), DestReg);
- DEBUG(dbgs() << "Entry PHI " << printReg(DestReg, TRI) << "<def> = PHI(");
+ DEBUG(dbgs() << "Entry PHI " << printReg(DestReg, TRI) << " = PHI(");
unsigned CurrentBackedgeReg = 0;
@@ -2172,7 +2170,7 @@ void AMDGPUMachineCFGStructurizer::createEntryPHI(LinearizedRegion *CurrentRegio
BackedgePHI.addMBB((*SRI).second);
CurrentBackedgeReg = NewBackedgeReg;
DEBUG(dbgs() << "Inserting backedge PHI: "
- << printReg(NewBackedgeReg, TRI) << "<def> = PHI("
+ << printReg(NewBackedgeReg, TRI) << " = PHI("
<< printReg(CurrentBackedgeReg, TRI) << ", "
<< printMBBReference(*getPHIPred(*PHIDefInstr, 0))
<< ", "
@@ -2441,8 +2439,7 @@ void AMDGPUMachineCFGStructurizer::splitLoopPHI(MachineInstr &PHI,
MachineInstrBuilder MIB =
BuildMI(*EntrySucc, EntrySucc->instr_begin(), PHI.getDebugLoc(),
TII->get(TargetOpcode::PHI), NewDestReg);
- DEBUG(dbgs() << "Split Entry PHI " << printReg(NewDestReg, TRI)
- << "<def> = PHI(");
+ DEBUG(dbgs() << "Split Entry PHI " << printReg(NewDestReg, TRI) << " = PHI(");
MIB.addReg(PHISource);
MIB.addMBB(Entry);
DEBUG(dbgs() << printReg(PHISource, TRI) << ", "
diff --git a/lib/Target/AMDGPU/CaymanInstructions.td b/lib/Target/AMDGPU/CaymanInstructions.td
index 429d28e753c..ae40c638798 100644
--- a/lib/Target/AMDGPU/CaymanInstructions.td
+++ b/lib/Target/AMDGPU/CaymanInstructions.td
@@ -144,8 +144,8 @@ def VTX_READ_32_cm
// to be caused by ALU instructions in the next instruction group that wrote
// to the $src_gpr registers of the VTX_READ.
// e.g.
- // %t3_x<def> = VTX_READ_PARAM_32_eg %t2_x<kill>, 24
- // %t2_x<def> = MOV %zero
+ // %t3_x = VTX_READ_PARAM_32_eg killed %t2_x, 24
+ // %t2_x = MOV %zero
//Adding this constraint prevents this from happening.
let Constraints = "$src_gpr.ptr = $dst_gpr";
}
diff --git a/lib/Target/AMDGPU/EvergreenInstructions.td b/lib/Target/AMDGPU/EvergreenInstructions.td
index c25980eef85..5e26f97b0c8 100644
--- a/lib/Target/AMDGPU/EvergreenInstructions.td
+++ b/lib/Target/AMDGPU/EvergreenInstructions.td
@@ -212,8 +212,8 @@ def VTX_READ_32_eg
// to be caused by ALU instructions in the next instruction group that wrote
// to the $src_gpr registers of the VTX_READ.
// e.g.
- // %t3_x<def> = VTX_READ_PARAM_32_eg %t2_x<kill>, 24
- // %t2_x<def> = MOV %zero
+ // %t3_x = VTX_READ_PARAM_32_eg killed %t2_x, 24
+ // %t2_x = MOV %zero
//Adding this constraint prevents this from happening.
let Constraints = "$src_gpr.ptr = $dst_gpr";
}
diff --git a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
index 1bfa837bfb2..95bc7ca564c 100644
--- a/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
+++ b/lib/Target/AMDGPU/R600OptimizeVectorRegisters.cpp
@@ -12,15 +12,15 @@
/// common data and/or have enough undef subreg using swizzle abilities.
///
/// For instance let's consider the following pseudo code :
-/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
+/// %5 = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
/// ...
-/// %7<def> = REG_SEQ %1, sub0, %3, sub1, undef, sub2, %4, sub3
+/// %7 = REG_SEQ %1, sub0, %3, sub1, undef, sub2, %4, sub3
/// (swizzable Inst) %7, SwizzleMask : sub0, sub1, sub2, sub3
///
/// is turned into :
-/// %5<def> = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
+/// %5 = REG_SEQ %1, sub0, %2, sub1, %3, sub2, undef, sub3
/// ...
-/// %7<def> = INSERT_SUBREG %4, sub3
+/// %7 = INSERT_SUBREG %4, sub3
/// (swizzable Inst) %7, SwizzleMask : sub0, sub2, sub1, sub3
///
/// This allow regalloc to reduce register pressure for vector registers and
diff --git a/lib/Target/AMDGPU/SIFixWWMLiveness.cpp b/lib/Target/AMDGPU/SIFixWWMLiveness.cpp
index 47db8982537..3f657b9fb81 100644
--- a/lib/Target/AMDGPU/SIFixWWMLiveness.cpp
+++ b/lib/Target/AMDGPU/SIFixWWMLiveness.cpp
@@ -17,8 +17,8 @@
/// %vgpr0 = V_MOV_B32_e32 0.0
/// if (...) {
/// %vgpr1 = ...
-/// %vgpr2 = WWM %vgpr1<kill>
-/// ... = %vgpr2<kill>
+/// %vgpr2 = WWM killed %vgpr1
+/// ... = killed %vgpr2
/// %vgpr0 = V_MOV_B32_e32 1.0
/// }
/// ... = %vgpr0
diff --git a/lib/Target/AMDGPU/SIFoldOperands.cpp b/lib/Target/AMDGPU/SIFoldOperands.cpp
index 52157408b36..e806ebb3d82 100644
--- a/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -971,9 +971,9 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
// Prevent folding operands backwards in the function. For example,
// the COPY opcode must not be replaced by 1 in this example:
//
- // %3<def> = COPY %vgpr0; VGPR_32:%3
+ // %3 = COPY %vgpr0; VGPR_32:%3
// ...
- // %vgpr0<def> = V_MOV_B32_e32 1, %exec<imp-use>
+ // %vgpr0 = V_MOV_B32_e32 1, implicit %exec
MachineOperand &Dst = MI.getOperand(0);
if (Dst.isReg() &&
!TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
diff --git a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
index 5fda45f6a7f..7b4652e8aa6 100644
--- a/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
+++ b/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
@@ -480,7 +480,7 @@ Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
}
// If this is not immediate then it can be copy of immediate value, e.g.:
- // %1<def> = S_MOV_B32 255;
+ // %1 = S_MOV_B32 255;
if (Op.isReg()) {
for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
if (!isSameReg(Op, Def))
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index f9505beea20..b2f4a529efa 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1447,7 +1447,7 @@ bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
DEBUG(dbgs() << "widening: " << MI);
MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
- // Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg
+ // Get rid of the old implicit-def of DstRegD. Leave it if it defines a Q-reg
// or some other super-register.
int ImpDefIdx = MI.findRegisterDefOperandIdx(DstRegD);
if (ImpDefIdx != -1)
@@ -1650,7 +1650,7 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0,
}
for (unsigned i = 3, e = MI0.getNumOperands(); i != e; ++i) {
- // %12<def> = PICLDR %11, 0, pred:14, pred:%noreg
+ // %12 = PICLDR %11, 0, pred:14, pred:%noreg
const MachineOperand &MO0 = MI0.getOperand(i);
const MachineOperand &MO1 = MI1.getOperand(i);
if (!MO0.isIdenticalTo(MO1))
@@ -4668,7 +4668,7 @@ void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI,
NewMIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(ARM::VEXTd32),
DDst);
- // On the first instruction, both DSrc and DDst may be <undef> if present.
+ // On the first instruction, both DSrc and DDst may be undef if present.
// Specifically when the original instruction didn't have them as an
// <imp-use>.
unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
@@ -4688,7 +4688,7 @@ void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI,
MIB.addReg(DDst, RegState::Define);
// On the second instruction, DDst has definitely been defined above, so
- // it is not <undef>. DSrc, if present, can be <undef> as above.
+ // it is not undef. DSrc, if present, can be undef as above.
CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
CurUndef = CurReg == DSrc && !MI.readsRegister(CurReg, TRI);
MIB.addReg(CurReg, getUndefRegState(CurUndef));
@@ -4771,7 +4771,7 @@ unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance(
// We must be able to clobber the whole D-reg.
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
- // Virtual register must be a foo:ssub_0<def,undef> operand.
+ // Virtual register must be a def undef foo:ssub_0 operand.
if (!MO.getSubReg() || MI.readsVirtualRegister(Reg))
return 0;
} else if (ARM::SPRRegClass.contains(Reg)) {
diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index bf67bbdc379..eab84ae59e2 100644
--- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -922,7 +922,7 @@ bool ARMExpandPseudo::ExpandCMP_SWAP_64(MachineBasicBlock &MBB,
// .Lloadcmp:
// ldrexd rDestLo, rDestHi, [rAddr]
// cmp rDestLo, rDesiredLo
- // sbcs rTempReg<dead>, rDestHi, rDesiredHi
+ // sbcs dead rTempReg, rDestHi, rDesiredHi
// bne .Ldone
unsigned LDREXD = IsThumb ? ARM::t2LDREXD : ARM::LDREXD;
MachineInstrBuilder MIB;
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 1726d6bcb30..fe9562af152 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -9168,7 +9168,7 @@ void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
// operand is still set to noreg. If needed, set the optional operand's
// register to CPSR, and remove the redundant implicit def.
//
- // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
+ // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR).
// Rename pseudo opcodes.
unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 2b63e0c842f..c61e72ebb21 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -1697,7 +1697,7 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
if (OddReg == EvenReg && EvenDeadKill) {
// If the two source operands are the same, the kill marker is
// probably on the first one. e.g.
- // t2STRDi8 %r5<kill>, %r5, %r9<kill>, 0, 14, %reg0
+ // t2STRDi8 killed %r5, %r5, killed %r9, 0, 14, %reg0
EvenDeadKill = false;
OddDeadKill = true;
}
diff --git a/lib/Target/BPF/BPFISelDAGToDAG.cpp b/lib/Target/BPF/BPFISelDAGToDAG.cpp
index 283359c8b23..61b04d1f2a1 100644
--- a/lib/Target/BPF/BPFISelDAGToDAG.cpp
+++ b/lib/Target/BPF/BPFISelDAGToDAG.cpp
@@ -573,7 +573,7 @@ void BPFDAGToDAGISel::PreprocessTrunc(SDNode *Node,
return;
} else {
// The PHI node looks like:
- // %2<def> = PHI %0, <%bb.1>, %1, <%bb.3>
+ // %2 = PHI %0, <%bb.1>, %1, <%bb.3>
// Trace each incoming definition, e.g., (%0, %bb.1) and (%1, %bb.3)
// The AND operation can be removed if both %0 in %bb.1 and %1 in
// %bb.3 are defined with with a load matching the MaskN.
diff --git a/lib/Target/Hexagon/HexagonBlockRanges.cpp b/lib/Target/Hexagon/HexagonBlockRanges.cpp
index 1953439fc3e..ff915ca59da 100644
--- a/lib/Target/Hexagon/HexagonBlockRanges.cpp
+++ b/lib/Target/Hexagon/HexagonBlockRanges.cpp
@@ -368,7 +368,7 @@ void HexagonBlockRanges::computeInitialLiveRanges(InstrIndexMap &IndexMap,
}
}
// Defs and clobbers can overlap, e.g.
- // %d0<def,dead> = COPY %5, %r0<imp-def>, %r1<imp-def>
+ // dead %d0 = COPY %5, implicit-def %r0, implicit-def %r1
for (RegisterRef R : Defs)
Clobbers.erase(R);
diff --git a/lib/Target/Hexagon/HexagonConstPropagation.cpp b/lib/Target/Hexagon/HexagonConstPropagation.cpp
index 80db36071db..c59cc50c037 100644
--- a/lib/Target/Hexagon/HexagonConstPropagation.cpp
+++ b/lib/Target/Hexagon/HexagonConstPropagation.cpp
@@ -187,7 +187,7 @@ namespace {
// Mapping: vreg -> cell
// The keys are registers _without_ subregisters. This won't allow
- // definitions in the form of "vreg:subreg<def> = ...". Such definitions
+ // definitions in the form of "vreg:subreg = ...". Such definitions
// would be questionable from the point of view of SSA, since the "vreg"
// could not be initialized in its entirety (specifically, an instruction
// defining the "other part" of "vreg" would also count as a definition
@@ -1977,7 +1977,7 @@ bool HexagonConstEvaluator::evaluate(const MachineInstr &MI,
{
const MachineOperand &VO = MI.getOperand(1);
// The operand of CONST32 can be a blockaddress, e.g.
- // %0<def> = CONST32 <blockaddress(@eat, %l)>
+ // %0 = CONST32 <blockaddress(@eat, %l)>
// Do this check for all instructions for safety.
if (!VO.isImm())
return false;
@@ -3147,7 +3147,7 @@ bool HexagonConstEvaluator::rewriteHexBranch(MachineInstr &BrI,
BrI.setDesc(JD);
while (BrI.getNumOperands() > 0)
BrI.RemoveOperand(0);
- // This ensures that all implicit operands (e.g. %r31<imp-def>, etc)
+ // This ensures that all implicit operands (e.g. implicit-def %r31, etc)
// are present in the rewritten branch.
for (auto &Op : NI->operands())
BrI.addOperand(Op);
diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index 2dfd7b7f9c8..d8135e95fba 100644
--- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -351,11 +351,11 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr &I1,
// kill flag for a register (a removeRegisterKilled() analogous to
// addRegisterKilled) that handles aliased register correctly.
// * or has a killed aliased register use of I1's use reg
- // %d4<def> = A2_tfrpi 16
- // %r6<def> = A2_tfr %r9
- // %r8<def> = KILL %r8, %d4<imp-use,kill>
+ // %d4 = A2_tfrpi 16
+ // %r6 = A2_tfr %r9
+ // %r8 = KILL %r8, implicit killed %d4
// If we want to move R6 = across the KILL instruction we would have
- // to remove the %d4<imp-use,kill> operand. For now, we are
+ // to remove the implicit killed %d4 operand. For now, we are
// conservative and disallow the move.
// we can't move I1 across it.
if (MI.isDebugValue()) {
diff --git a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
index 652ea13c414..93ad2e7b5eb 100644
--- a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
+++ b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp
@@ -25,38 +25,38 @@
//
// Example:
//
-// %40<def> = L2_loadrub_io %39<kill>, 1
-// %41<def> = S2_tstbit_i %40<kill>, 0
-// J2_jumpt %41<kill>, <%bb.5>, %pc<imp-def,dead>
-// J2_jump <%bb.4>, %pc<imp-def,dead>
+// %40 = L2_loadrub_io killed %39, 1
+// %41 = S2_tstbit_i killed %40, 0
+// J2_jumpt killed %41, <%bb.5>, implicit dead %pc
+// J2_jump <%bb.4>, implicit dead %pc
// Successors according to CFG: %bb.4(62) %bb.5(62)
//
// %bb.4: derived from LLVM BB %if.then
// Predecessors according to CFG: %bb.3
-// %11<def> = A2_addp %6, %10
+// %11 = A2_addp %6, %10
// S2_storerd_io %32, 16, %11
// Successors according to CFG: %bb.5
//
// %bb.5: derived from LLVM BB %if.end
// Predecessors according to CFG: %bb.3 %bb.4
-// %12<def> = PHI %6, <%bb.3>, %11, <%bb.4>
-// %13<def> = A2_addp %7, %12
-// %42<def> = C2_cmpeqi %9, 10
-// J2_jumpf %42<kill>, <%bb.3>, %pc<imp-def,dead>
-// J2_jump <%bb.6>, %pc<imp-def,dead>
+// %12 = PHI %6, <%bb.3>, %11, <%bb.4>
+// %13 = A2_addp %7, %12
+// %42 = C2_cmpeqi %9, 10
+// J2_jumpf killed %42, <%bb.3>, implicit dead %pc
+// J2_jump <%bb.6>, implicit dead %pc
// Successors according to CFG: %bb.6(4) %bb.3(124)
//
// would become:
//
-// %40<def> = L2_loadrub_io %39<kill>, 1
-// %41<def> = S2_tstbit_i %40<kill>, 0
-// spec-> %11<def> = A2_addp %6, %10
+// %40 = L2_loadrub_io killed %39, 1
+// %41 = S2_tstbit_i killed %40, 0
+// spec-> %11 = A2_addp %6, %10
// pred-> S2_pstorerdf_io %41, %32, 16, %11
-// %46<def> = PS_pselect %41, %6, %11
-// %13<def> = A2_addp %7, %46
-// %42<def> = C2_cmpeqi %9, 10
-// J2_jumpf %42<kill>, <%bb.3>, %pc<imp-def,dead>
-// J2_jump <%bb.6>, %pc<imp-def,dead>
+// %46 = PS_pselect %41, %6, %11
+// %13 = A2_addp %7, %46
+// %42 = C2_cmpeqi %9, 10
+// J2_jumpf killed %42, <%bb.3>, implicit dead %pc
+// J2_jump <%bb.6>, implicit dead %pc
// Successors according to CFG: %bb.6 %bb.3
#include "Hexagon.h"
diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index 78c7c102e7d..d9f43225ea0 100644
--- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -28,14 +28,14 @@
// definitions are predicable, then in the second step, the conditional
// transfers will then be rewritten as predicated instructions. E.g.
// %0 = A2_or %1, %2
-// %3 = A2_tfrt %99, %0<kill>
+// %3 = A2_tfrt %99, killed %0
// will be rewritten as
// %3 = A2_port %99, %1, %2
//
// This replacement has two variants: "up" and "down". Consider this case:
// %0 = A2_or %1, %2
// ... [intervening instructions] ...
-// %3 = A2_tfrt %99, %0<kill>
+// %3 = A2_tfrt %99, killed %0
// variant "up":
// %3 = A2_port %99, %1, %2
// ... [intervening instructions, %0->vreg3] ...
@@ -65,15 +65,15 @@
// will see both instructions as actual definitions, and will mark the
// first one as dead. The definition is not actually dead, and this
// situation will need to be fixed. For example:
-// %1<def,dead> = A2_tfrt ... ; marked as dead
-// %1<def> = A2_tfrf ...
+// dead %1 = A2_tfrt ... ; marked as dead
+// %1 = A2_tfrf ...
//
// Since any of the individual predicated transfers may end up getting
// removed (in case it is an identity copy), some pre-existing def may
// be marked as dead after live interval recomputation:
-// %1<def,dead> = ... ; marked as dead
+// dead %1 = ... ; marked as dead
// ...
-// %1<def> = A2_tfrf ... ; if A2_tfrt is removed
+// %1 = A2_tfrf ... ; if A2_tfrt is removed
// This case happens if %1 was used as a source in A2_tfrt, which means
// that is it actually live at the A2_tfrf, and so the now dead definition
// of %1 will need to be updated to non-dead at some point.
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index b5fa0689d04..d814fa79ea2 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -1720,7 +1720,7 @@ bool HexagonHardwareLoops::fixupInductionVariable(MachineLoop *L) {
MachineOperand &MO = PredDef->getOperand(i);
if (MO.isReg()) {
// Skip all implicit references. In one case there was:
- // %140<def> = FCMPUGT32_rr %138, %139, %usr<imp-use>
+ // %140 = FCMPUGT32_rr %138, %139, implicit %usr
if (MO.isImplicit())
continue;
if (MO.isUse()) {
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index cb00bc770c0..eb643d0aeb2 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -1615,8 +1615,8 @@ DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
}
// Inspired by this pair:
-// %r13<def> = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
-// S2_storeri_io %r29, 132, %r1<kill>; flags: mem:ST4[FixedStack1]
+// %r13 = L2_loadri_io %r29, 136; mem:LD4[FixedStack0]
+// S2_storeri_io %r29, 132, killed %r1; flags: mem:ST4[FixedStack1]
// Currently AA considers the addresses in these instructions to be aliasing.
bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
@@ -3515,7 +3515,7 @@ HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
case Hexagon::EH_RETURN_JMPR:
case Hexagon::PS_jmpret:
// jumpr r31
- // Actual form JMPR %pc<imp-def>, %r31<imp-use>, %r0<imp-use,internal>.
+ // Actual form JMPR implicit-def %pc, implicit %r31, implicit internal %r0
DstReg = MI.getOperand(0).getReg();
if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
return HexagonII::HSIG_L2;
@@ -3705,7 +3705,7 @@ HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
case Hexagon::C2_cmovenewif:
// if ([!]P0[.new]) Rd = #0
// Actual form:
- // %r16<def> = C2_cmovenewit %p0<internal>, 0, %r16<imp-use,undef>;
+ // %r16 = C2_cmovenewit internal %p0, 0, implicit undef %r16;
DstReg = MI.getOperand(0).getReg();
SrcReg = MI.getOperand(1).getReg();
if (isIntRegForSubInst(DstReg) &&
diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp
index 8f177d9a4ee..99c16f14919 100644
--- a/lib/Target/Hexagon/HexagonNewValueJump.cpp
+++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp
@@ -129,9 +129,9 @@ static bool canBeFeederToNewValueJump(const HexagonInstrInfo *QII,
// using -- if (QRI->isSubRegister(feederReg, cmpReg1) logic
// before the callsite of this function
// But we can not as it comes in the following fashion.
- // %d0<def> = Hexagon_S2_lsr_r_p %d0<kill>, %r2<kill>
- // %r0<def> = KILL %r0, %d0<imp-use,kill>
- // %p0<def> = CMPEQri %r0<kill>, 0
+ // %d0 = Hexagon_S2_lsr_r_p killed %d0, killed %r2
+ // %r0 = KILL %r0, implicit killed %d0
+ // %p0 = CMPEQri killed %r0, 0
// Hence, we need to check if it's a KILL instruction.
if (II->getOpcode() == TargetOpcode::KILL)
return false;
@@ -196,9 +196,9 @@ static bool commonChecksToProhibitNewValueJump(bool afterRA,
// to new value jump. If they are in the path, bail out.
// KILL sets kill flag on the opcode. It also sets up a
// single register, out of pair.
- // %d0<def> = S2_lsr_r_p %d0<kill>, %r2<kill>
- // %r0<def> = KILL %r0, %d0<imp-use,kill>
- // %p0<def> = C2_cmpeqi %r0<kill>, 0
+ // %d0 = S2_lsr_r_p killed %d0, killed %r2
+ // %r0 = KILL %r0, implicit killed %d0
+ // %p0 = C2_cmpeqi killed %r0, 0
// PHI can be anything after RA.
// COPY can remateriaze things in between feeder, compare and nvj.
if (MII->getOpcode() == TargetOpcode::KILL ||
diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp
index 7f82a5c4c4d..581761c904a 100644
--- a/lib/Target/Hexagon/HexagonPeephole.cpp
+++ b/lib/Target/Hexagon/HexagonPeephole.cpp
@@ -8,27 +8,27 @@
// This peephole pass optimizes in the following cases.
// 1. Optimizes redundant sign extends for the following case
// Transform the following pattern
-// %170<def> = SXTW %166
+// %170 = SXTW %166
// ...
-// %176<def> = COPY %170:isub_lo
+// %176 = COPY %170:isub_lo
//
// Into
-// %176<def> = COPY %166
+// %176 = COPY %166
//
// 2. Optimizes redundant negation of predicates.
-// %15<def> = CMPGTrr %6, %2
+// %15 = CMPGTrr %6, %2
// ...
-// %16<def> = NOT_p %15<kill>
+// %16 = NOT_p killed %15
// ...
-// JMP_c %16<kill>, <%bb.1>, %pc<imp-def,dead>
+// JMP_c killed %16, <%bb.1>, implicit dead %pc
//
// Into
-// %15<def> = CMPGTrr %6, %2;
+// %15 = CMPGTrr %6, %2;
// ...
-// JMP_cNot %15<kill>, <%bb.1>, %pc<imp-def,dead>;
+// JMP_cNot killed %15, <%bb.1>, implicit dead %pc;
//
// Note: The peephole pass makes the instrucstions like
-// %170<def> = SXTW %166 or %16<def> = NOT_p %15<kill>
+// %170 = SXTW %166 or %16 = NOT_p killed %15
// redundant and relies on some form of dead removal instructions, like
// DCE or DIE to actually eliminate them.
@@ -132,7 +132,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
NextI = std::next(I);
MachineInstr &MI = *I;
// Look for sign extends:
- // %170<def> = SXTW %166
+ // %170 = SXTW %166
if (!DisableOptSZExt && MI.getOpcode() == Hexagon::A2_sxtw) {
assert(MI.getNumOperands() == 2);
MachineOperand &Dst = MI.getOperand(0);
@@ -143,13 +143,13 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
TargetRegisterInfo::isVirtualRegister(SrcReg)) {
// Map the following:
- // %170<def> = SXTW %166
+ // %170 = SXTW %166
// PeepholeMap[170] = %166
PeepholeMap[DstReg] = SrcReg;
}
}
- // Look for %170<def> = COMBINE_ir_V4 (0, %169)
+ // Look for %170 = COMBINE_ir_V4 (0, %169)
// %170:DoublRegs, %169:IntRegs
if (!DisableOptExtTo64 && MI.getOpcode() == Hexagon::A4_combineir) {
assert(MI.getNumOperands() == 3);
@@ -192,14 +192,14 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) {
if (TargetRegisterInfo::isVirtualRegister(DstReg) &&
TargetRegisterInfo::isVirtualRegister(SrcReg)) {
// Map the following:
- // %170<def> = NOT_xx %166
+ // %170 = NOT_xx %166
// PeepholeMap[170] = %166
PeepholeMap[DstReg] = SrcReg;
}
}
// Look for copy:
- // %176<def> = COPY %170:isub_lo
+ // %176 = COPY %170:isub_lo
if (!DisableOptSZExt && MI.isCopy()) {
assert(MI.getNumOperands() == 2);
MachineOperand &Dst = MI.getOperand(0);
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index a8c5dea0d9e..0f5e297af4d 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -772,8 +772,8 @@ bool HexagonPacketizerList::canPromoteToNewValueStore(const MachineInstr &MI,
// If data definition is because of implicit definition of the register,
// do not newify the store. Eg.
- // %r9<def> = ZXTH %r12, %d6<imp-use>, %r12<imp-def>
- // S2_storerh_io %r8, 2, %r12<kill>; mem:ST2[%scevgep343]
+ // %r9 = ZXTH %r12, implicit %d6, implicit-def %r12
+ // S2_storerh_io %r8, 2, killed %r12; mem:ST2[%scevgep343]
for (auto &MO : PacketMI.operands()) {
if (MO.isRegMask() && MO.clobbersPhysReg(DepReg))
return false;
@@ -787,8 +787,8 @@ bool HexagonPacketizerList::canPromoteToNewValueStore(const MachineInstr &MI,
// Handle imp-use of super reg case. There is a target independent side
// change that should prevent this situation but I am handling it for
// just-in-case. For example, we cannot newify R2 in the following case:
- // %r3<def> = A2_tfrsi 0;
- // S2_storeri_io %r0<kill>, 0, %r2<kill>, %d1<imp-use,kill>;
+ // %r3 = A2_tfrsi 0;
+ // S2_storeri_io killed %r0, 0, killed %r2, implicit killed %d1;
for (auto &MO : MI.operands()) {
if (MO.isReg() && MO.isUse() && MO.isImplicit() && MO.getReg() == DepReg)
return false;
@@ -892,12 +892,12 @@ bool HexagonPacketizerList::canPromoteToDotNew(const MachineInstr &MI,
// Go through the packet instructions and search for an anti dependency between
// them and DepReg from MI. Consider this case:
// Trying to add
-// a) %r1<def> = TFRI_cdNotPt %p3, 2
+// a) %r1 = TFRI_cdNotPt %p3, 2
// to this packet:
// {
-// b) %p0<def> = C2_or %p3<kill>, %p0<kill>
-// c) %p3<def> = C2_tfrrp %r23
-// d) %r1<def> = C2_cmovenewit %p3, 4
+// b) %p0 = C2_or killed %p3, killed %p0
+// c) %p3 = C2_tfrrp %r23
+// d) %r1 = C2_cmovenewit %p3, 4
// }
// The P3 from a) and d) will be complements after
// a)'s P3 is converted to .new form
@@ -962,11 +962,11 @@ bool HexagonPacketizerList::arePredicatesComplements(MachineInstr &MI1,
// One corner case deals with the following scenario:
// Trying to add
- // a) %r24<def> = A2_tfrt %p0, %r25
+ // a) %r24 = A2_tfrt %p0, %r25
// to this packet:
// {
- // b) %r25<def> = A2_tfrf %p0, %r24
- // c) %p0<def> = C2_cmpeqi %r26, 1
+ // b) %r25 = A2_tfrf %p0, %r24
+ // c) %p0 = C2_cmpeqi %r26, 1
// }
//
// On general check a) and b) are complements, but presence of c) will
@@ -1543,7 +1543,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
// There are certain anti-dependencies that cannot be ignored.
// Specifically:
- // J2_call ... %r0<imp-def> ; SUJ
+ // J2_call ... implicit-def %r0 ; SUJ
// R0 = ... ; SUI
// Those cannot be packetized together, since the call will observe
// the effect of the assignment to R0.
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
index 7dd89c6eb8e..c5f3d434759 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCDuplexInfo.cpp
@@ -272,7 +272,7 @@ unsigned HexagonMCInstrInfo::getDuplexCandidateGroup(MCInst const &MCI) {
case Hexagon::J2_jumpr:
case Hexagon::PS_jmpret:
// jumpr r31
- // Actual form JMPR %pc<imp-def>, %r31<imp-use>, %r0<imp-use,internal>.
+ // Actual form JMPR implicit-def %pc, implicit %r31, implicit internal %r0.
DstReg = MCI.getOperand(0).getReg();
if (Hexagon::R31 == DstReg)
return HexagonII::HSIG_L2;
@@ -471,7 +471,7 @@ unsigned HexagonMCInstrInfo::getDuplexCandidateGroup(MCInst const &MCI) {
case Hexagon::C2_cmovenewif:
// if ([!]P0[.new]) Rd = #0
// Actual form:
- // %r16<def> = C2_cmovenewit %p0<internal>, 0, %r16<imp-use,undef>;
+ // %r16 = C2_cmovenewit internal %p0, 0, implicit undef %r16;
DstReg = MCI.getOperand(0).getReg(); // Rd
PredReg = MCI.getOperand(1).getReg(); // P0
if (HexagonMCInstrInfo::isIntRegForSubInst(DstReg) &&
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
index 3a4a41ccb40..7bd54fdfa3d 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp
@@ -113,9 +113,10 @@ bool llvm::HexagonMCShuffle(MCContext &Context, bool Fatal,
if (!HexagonMCInstrInfo::bundleSize(MCB)) {
// There once was a bundle:
- // BUNDLE %d2<imp-def>, %r4<imp-def>, %r5<imp-def>, %d7<imp-def>, ...
- // * %d2<def> = IMPLICIT_DEF; flags:
- // * %d7<def> = IMPLICIT_DEF; flags:
+ // BUNDLE implicit-def %d2, implicit-def %r4, implicit-def %r5,
+ // implicit-def %d7, ...
+ // * %d2 = IMPLICIT_DEF; flags:
+ // * %d7 = IMPLICIT_DEF; flags:
// After the IMPLICIT_DEFs were removed by the asm printer, the bundle
// became empty.
DEBUG(dbgs() << "Skipping empty bundle");
@@ -137,9 +138,10 @@ llvm::HexagonMCShuffle(MCContext &Context, MCInstrInfo const &MCII,
if (!HexagonMCInstrInfo::bundleSize(MCB)) {
// There once was a bundle:
- // BUNDLE %d2<imp-def>, %r4<imp-def>, %r5<imp-def>, %d7<imp-def>, ...
- // * %d2<def> = IMPLICIT_DEF; flags:
- // * %d7<def> = IMPLICIT_DEF; flags:
+ // BUNDLE implicit-def %d2, implicit-def %r4, implicit-def %r5,
+ // implicit-def %d7, ...
+ // * %d2 = IMPLICIT_DEF; flags:
+ // * %d7 = IMPLICIT_DEF; flags:
// After the IMPLICIT_DEFs were removed by the asm printer, the bundle
// became empty.
DEBUG(dbgs() << "Skipping empty bundle");
diff --git a/lib/Target/Hexagon/RDFGraph.h b/lib/Target/Hexagon/RDFGraph.h
index 25c4b67230a..e3abb0e22f7 100644
--- a/lib/Target/Hexagon/RDFGraph.h
+++ b/lib/Target/Hexagon/RDFGraph.h
@@ -183,7 +183,7 @@
// This is typically used to prevent keeping registers artificially live
// in cases when they are defined via predicated instructions. For example:
// r0 = add-if-true cond, r10, r11 (1)
-// r0 = add-if-false cond, r12, r13, r0<imp-use> (2)
+// r0 = add-if-false cond, r12, r13, implicit r0 (2)
// ... = r0 (3)
// Before (1), r0 is not intended to be live, and the use of r0 in (3) is
// not meant to be reached by any def preceding (1). However, since the
diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp
index 74394d0e84c..589c8f9884f 100644
--- a/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/lib/Target/Mips/MipsInstrInfo.cpp
@@ -480,7 +480,7 @@ MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc,
MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), get(NewOpc));
// For MIPSR6 JI*C requires an immediate 0 as an operand, JIALC(64) an
- // immediate 0 as an operand and requires the removal of it's %ra<imp-def>
+ // immediate 0 as an operand and requires the removal of it's implicit-def %ra
// implicit operand as copying the implicit operations of the instructio we're
// looking at will give us the correct flags.
if (NewOpc == Mips::JIC || NewOpc == Mips::JIALC || NewOpc == Mips::JIC64 ||
diff --git a/lib/Target/NVPTX/NVPTXPeephole.cpp b/lib/Target/NVPTX/NVPTXPeephole.cpp
index f33655a16c2..415889dc70c 100644
--- a/lib/Target/NVPTX/NVPTXPeephole.cpp
+++ b/lib/Target/NVPTX/NVPTXPeephole.cpp
@@ -22,11 +22,11 @@
// This peephole pass optimizes these cases, for example
//
// It will transform the following pattern
-// %0<def> = LEA_ADDRi64 %VRFrame, 4
-// %1<def> = cvta_to_local_yes_64 %0
+// %0 = LEA_ADDRi64 %VRFrame, 4
+// %1 = cvta_to_local_yes_64 %0
//
// into
-// %1<def> = LEA_ADDRi64 %VRFrameLocal, 4
+// %1 = LEA_ADDRi64 %VRFrameLocal, 4
//
// %VRFrameLocal is the virtual register name of %SPL
//
diff --git a/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/lib/Target/PowerPC/PPCBranchCoalescing.cpp
index cd078972307..48b94a53823 100644
--- a/lib/Target/PowerPC/PPCBranchCoalescing.cpp
+++ b/lib/Target/PowerPC/PPCBranchCoalescing.cpp
@@ -62,9 +62,9 @@ namespace llvm {
/// %bb.0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %0<def> = COPY %f1; F8RC:%0
-/// %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
-/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+/// %0 = COPY %f1; F8RC:%0
+/// %5 = CMPLWI killed %4, 0; CRRC:%5 GPRC:%4
+/// %8 = LXSDX %zero8, killed %7, implicit %rm;
/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
/// BCC 76, %5, <%bb.2>; CRRC:%5
/// Successors according to CFG: %bb.1(?%) %bb.2(?%)
@@ -75,7 +75,7 @@ namespace llvm {
///
/// %bb.2: derived from LLVM BB %entry
/// Predecessors according to CFG: %bb.0 %bb.1
-/// %9<def> = PHI %8, <%bb.1>, %0, <%bb.0>;
+/// %9 = PHI %8, <%bb.1>, %0, <%bb.0>;
/// F8RC:%9,%8,%0
/// <SNIP2>
/// BCC 76, %5, <%bb.4>; CRRC:%5
@@ -87,10 +87,10 @@ namespace llvm {
///
/// %bb.4: derived from LLVM BB %entry
/// Predecessors according to CFG: %bb.2 %bb.3
-/// %13<def> = PHI %12, <%bb.3>, %2, <%bb.2>;
+/// %13 = PHI %12, <%bb.3>, %2, <%bb.2>;
/// F8RC:%13,%12,%2
/// <SNIP3>
-/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
+/// BLR8 implicit %lr8, implicit %rm, implicit %f1
///
/// When this pattern is detected, branch coalescing will try to collapse
/// it by moving code in %bb.2 to %bb.0 and/or %bb.4 and removing %bb.3.
@@ -100,9 +100,9 @@ namespace llvm {
/// %bb.0: derived from LLVM BB %entry
/// Live Ins: %f1 %f3 %x6
/// <SNIP1>
-/// %0<def> = COPY %f1; F8RC:%0
-/// %5<def> = CMPLWI %4<kill>, 0; CRRC:%5 GPRC:%4
-/// %8<def> = LXSDX %zero8, %7<kill>, %rm<imp-use>;
+/// %0 = COPY %f1; F8RC:%0
+/// %5 = CMPLWI killed %4, 0; CRRC:%5 GPRC:%4
+/// %8 = LXSDX %zero8, killed %7, implicit %rm;
/// mem:LD8[ConstantPool] F8RC:%8 G8RC:%7
/// <SNIP2>
/// BCC 76, %5, <%bb.4>; CRRC:%5
@@ -115,12 +115,12 @@ namespace llvm {
///
/// %bb.4: derived from LLVM BB %entry
/// Predecessors according to CFG: %bb.0 %bb.1
-/// %9<def> = PHI %8, <%bb.1>, %0, <%bb.0>;
+/// %9 = PHI %8, <%bb.1>, %0, <%bb.0>;
/// F8RC:%9,%8,%0
-/// %13<def> = PHI %12, <%bb.1>, %2, <%bb.0>;
+/// %13 = PHI %12, <%bb.1>, %2, <%bb.0>;
/// F8RC:%13,%12,%2
/// <SNIP3>
-/// BLR8 %lr8<imp-use>, %rm<imp-use>, %f1<imp-use>
+/// BLR8 implicit %lr8, implicit %rm, implicit %f1
///
/// Branch Coalescing does not split blocks, it moves everything in the same
/// direction ensuring it does not break use/definition semantics.
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index 15cc1c76760..fcc38e233b2 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -2315,10 +2315,10 @@ PPCInstrInfo::isSignOrZeroExtended(const MachineInstr &MI, bool SignExt,
// For a method return value, we check the ZExt/SExt flags in attribute.
// We assume the following code sequence for method call.
- // ADJCALLSTACKDOWN 32, %r1<imp-def,dead>, %r1<imp-use>
+ // ADJCALLSTACKDOWN 32, implicit dead %r1, implicit %r1
// BL8_NOP <ga:@func>,...
- // ADJCALLSTACKUP 32, 0, %r1<imp-def,dead>, %r1<imp-use>
- // %5<def> = COPY %x3; G8RC:%5
+ // ADJCALLSTACKUP 32, 0, implicit dead %r1, implicit %r1
+ // %5 = COPY %x3; G8RC:%5
if (SrcReg == PPC::X3) {
const MachineBasicBlock *MBB = MI.getParent();
MachineBasicBlock::const_instr_iterator II =
diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp
index c6fcea7c956..05eb7563893 100644
--- a/lib/Target/PowerPC/PPCMIPeephole.cpp
+++ b/lib/Target/PowerPC/PPCMIPeephole.cpp
@@ -585,8 +585,8 @@ bool PPCMIPeephole::simplifyCode(void) {
// We can eliminate RLDICL (e.g. for zero-extension)
// if all bits to clear are already zero in the input.
// This code assume following code sequence for zero-extension.
- // %6<def> = COPY %5:sub_32; (optional)
- // %8<def> = IMPLICIT_DEF;
+ // %6 = COPY %5:sub_32; (optional)
+ // %8 = IMPLICIT_DEF;
// %7<def,tied1> = INSERT_SUBREG %8<tied0>, %6, sub_32;
if (!EnableZExtElimination) break;
@@ -685,7 +685,7 @@ bool PPCMIPeephole::simplifyCode(void) {
DEBUG(dbgs() << "Optimizing LI to ADDI: ");
DEBUG(LiMI->dump());
- // There could be repeated registers in the PHI, e.g: %1<def> =
+ // There could be repeated registers in the PHI, e.g: %1 =
// PHI %6, <%bb.2>, %8, <%bb.3>, %8, <%bb.6>; So if we've
// already replaced the def instruction, skip.
if (LiMI->getOpcode() == PPC::ADDI || LiMI->getOpcode() == PPC::ADDI8)
diff --git a/lib/Target/PowerPC/PPCQPXLoadSplat.cpp b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
index 10394166ddf..544c7f2aeef 100644
--- a/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
+++ b/lib/Target/PowerPC/PPCQPXLoadSplat.cpp
@@ -79,8 +79,8 @@ bool PPCQPXLoadSplat::runOnMachineFunction(MachineFunction &MF) {
}
// We're looking for a sequence like this:
- // %f0<def> = LFD 0, %x3<kill>, %qf0<imp-def>; mem:LD8[%a](tbaa=!2)
- // %qf1<def> = QVESPLATI %qf0<kill>, 0, %rm<imp-use>
+ // %f0 = LFD 0, killed %x3, implicit-def %qf0; mem:LD8[%a](tbaa=!2)
+ // %qf1 = QVESPLATI killed %qf0, 0, implicit %rm
for (auto SI = Splats.begin(); SI != Splats.end();) {
MachineInstr *SMI = *SI;
diff --git a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
index 4d001c0210d..422bb7ba305 100644
--- a/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
+++ b/lib/Target/PowerPC/PPCVSXFMAMutate.cpp
@@ -90,21 +90,21 @@ protected:
// This pass is run after register coalescing, and so we're looking for
// a situation like this:
// ...
- // %5<def> = COPY %9; VSLRC:%5,%9
+ // %5 = COPY %9; VSLRC:%5,%9
// %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
- // %rm<imp-use>; VSLRC:%5,%17,%16
+ // implicit %rm; VSLRC:%5,%17,%16
// ...
// %9<def,tied1> = XSMADDADP %9<tied0>, %17, %19,
- // %rm<imp-use>; VSLRC:%9,%17,%19
+ // implicit %rm; VSLRC:%9,%17,%19
// ...
// Where we can eliminate the copy by changing from the A-type to the
// M-type instruction. Specifically, for this example, this means:
// %5<def,tied1> = XSMADDADP %5<tied0>, %17, %16,
- // %rm<imp-use>; VSLRC:%5,%17,%16
+ // implicit %rm; VSLRC:%5,%17,%16
// is replaced by:
// %16<def,tied1> = XSMADDMDP %16<tied0>, %18, %9,
- // %rm<imp-use>; VSLRC:%16,%18,%9
- // and we remove: %5<def> = COPY %9; VSLRC:%5,%9
+ // implicit %rm; VSLRC:%16,%18,%9
+ // and we remove: %5 = COPY %9; VSLRC:%5,%9
SlotIndex FMAIdx = LIS->getInstructionIndex(MI);
@@ -150,10 +150,10 @@ protected:
// walking the MIs we may as well test liveness here.
//
// FIXME: There is a case that occurs in practice, like this:
- // %9<def> = COPY %f1; VSSRC:%9
+ // %9 = COPY %f1; VSSRC:%9
// ...
- // %6<def> = COPY %9; VSSRC:%6,%9
- // %7<def> = COPY %9; VSSRC:%7,%9
+ // %6 = COPY %9; VSSRC:%6,%9
+ // %7 = COPY %9; VSSRC:%7,%9
// %9<def,tied1> = XSMADDASP %9<tied0>, %1, %4; VSSRC:
// %6<def,tied1> = XSMADDASP %6<tied0>, %1, %2; VSSRC:
// %7<def,tied1> = XSMADDASP %7<tied0>, %1, %3; VSSRC:
diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp
index 8009341eab7..ca82740d3e8 100644
--- a/lib/Target/SystemZ/SystemZElimCompare.cpp
+++ b/lib/Target/SystemZ/SystemZElimCompare.cpp
@@ -436,8 +436,8 @@ bool SystemZElimCompare::optimizeCompareZero(
// Also do a forward search to handle cases where an instruction after the
// compare can be converted like
//
- // LTEBRCompare %f0s, %f0s, %cc<imp-def> LTEBRCompare %f0s, %f0s, %cc<imp-def>
- // %f2s<def> = LER %f0s
+ // LTEBRCompare %f0s, %f0s, implicit-def %cc LTEBRCompare %f0s, %f0s,
+ // implicit-def %cc %f2s = LER %f0s
//
MBBI = Compare, MBBE = MBB.end();
while (++MBBI != MBBE) {
diff --git a/lib/Target/X86/README-X86-64.txt b/lib/Target/X86/README-X86-64.txt
index 13856486b14..a3ea4595ac1 100644
--- a/lib/Target/X86/README-X86-64.txt
+++ b/lib/Target/X86/README-X86-64.txt
@@ -103,20 +103,20 @@ LBB1_3: ## bb
Before regalloc, we have:
- %reg1025<def> = IMUL32rri8 %reg1024, 45, %eflags<imp-def>
+ %reg1025 = IMUL32rri8 %reg1024, 45, implicit-def %eflags
JMP mbb<bb2,0x203afb0>
Successors according to CFG: 0x203afb0 (#3)
bb1: 0x203af60, LLVM BB @0x1e02310, ID#2:
Predecessors according to CFG: 0x203aec0 (#0)
- %reg1026<def> = IMUL32rri8 %reg1024, 78, %eflags<imp-def>
+ %reg1026 = IMUL32rri8 %reg1024, 78, implicit-def %eflags
Successors according to CFG: 0x203afb0 (#3)
bb2: 0x203afb0, LLVM BB @0x1e02340, ID#3:
Predecessors according to CFG: 0x203af10 (#1) 0x203af60 (#2)
- %reg1027<def> = PHI %reg1025, mbb<bb,0x203af10>,
+ %reg1027 = PHI %reg1025, mbb<bb,0x203af10>,
%reg1026, mbb<bb1,0x203af60>
- %reg1029<def> = MOVZX64rr32 %reg1027
+ %reg1029 = MOVZX64rr32 %reg1027
so we'd have to know that IMUL32rri8 leaves the high word zero extended and to
be able to recognize the zero extend. This could also presumably be implemented
diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp
index 2e39cb0d797..2f7dd5804fe 100644
--- a/lib/Target/X86/X86FixupBWInsts.cpp
+++ b/lib/Target/X86/X86FixupBWInsts.cpp
@@ -191,15 +191,15 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) {
/// %bb.2: derived from LLVM BB %if.then
/// Live Ins: %rdi
/// Predecessors according to CFG: %bb.0
-/// %ax<def> = MOV16rm %rdi<kill>, 1, %noreg, 0, %noreg, %eax<imp-def>;
+/// %ax = MOV16rm killed %rdi, 1, %noreg, 0, %noreg, implicit-def %eax;
/// mem:LD2[%p]
-/// No %eax<imp-use>
+/// No implicit %eax
/// Successors according to CFG: %bb.3(?%)
///
/// %bb.3: derived from LLVM BB %if.end
/// Live Ins: %eax Only %ax is actually live
/// Predecessors according to CFG: %bb.2 %bb.1
-/// %ax<def> = KILL %ax, %eax<imp-use,kill>
+/// %ax = KILL %ax, implicit killed %eax
/// RET 0, %ax
static bool isLive(const MachineInstr &MI,
const LivePhysRegs &LiveRegs,
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 96f19d35815..8eeb571231e 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -4469,7 +4469,7 @@ MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
unsigned leaInReg2 = 0;
MachineInstr *InsMI2 = nullptr;
if (Src == Src2) {
- // ADD16rr %reg1028<kill>, %reg1028
+ // ADD16rr killed %reg1028, %reg1028
// just a single insert_subreg.
addRegReg(MIB, leaInReg, true, leaInReg, false);
} else {
@@ -7633,7 +7633,7 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
/// This is used for mapping:
/// %xmm4 = V_SET0
/// to:
-/// %xmm4 = PXORrr %xmm4<undef>, %xmm4<undef>
+/// %xmm4 = PXORrr undef %xmm4, undef %xmm4
///
static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
const MCInstrDesc &Desc) {
@@ -8197,12 +8197,12 @@ static bool hasUndefRegUpdate(unsigned Opcode) {
///
/// This catches the VCVTSI2SD family of instructions:
///
-/// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14
+/// vcvtsi2sdq %rax, undef %xmm0, %xmm14
///
/// We should to be careful *not* to catch VXOR idioms which are presumably
/// handled specially in the pipeline:
///
-/// vxorps %xmm1<undef>, %xmm1<undef>, %xmm1
+/// vxorps undef %xmm1, undef %xmm1, %xmm1
///
/// Like getPartialRegUpdateClearance, this makes a strong assumption that the
/// high bits that are passed-through are not live.
@@ -10895,7 +10895,7 @@ X86InstrInfo::getOutliningType(MachineInstr &MI) const {
// FIXME: There are instructions which are being manually built without
// explicit uses/defs so we also have to check the MCInstrDesc. We should be
// able to remove the extra checks once those are fixed up. For example,
- // sometimes we might get something like %rax<def> = POP64r 1. This won't be
+ // sometimes we might get something like %rax = POP64r 1. This won't be
// caught by modifiesRegister or readsRegister even though the instruction
// really ought to be formed so that modifiesRegister/readsRegister would
// catch it.
diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp
index 5999591d181..0b67e819a64 100644
--- a/lib/Target/X86/X86VZeroUpper.cpp
+++ b/lib/Target/X86/X86VZeroUpper.cpp
@@ -235,7 +235,7 @@ void VZeroUpperInserter::processBasicBlock(MachineBasicBlock &MBB) {
// If the call has no RegMask, skip it as well. It usually happens on
// helper function calls (such as '_chkstk', '_ftol2') where standard
// calling convention is not used (RegMask is not used to mark register
- // clobbered and register usage (def/imp-def/use) is well-defined and
+ // clobbered and register usage (def/implicit-def/use) is well-defined and
// explicitly specified.
if (IsCall && !callHasRegMask(MI))
continue;
diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index aa81c3aff89..72069f0e62e 100644
--- a/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -43,7 +43,7 @@ define [1 x double] @constant() {
; The key problem here is that we may fail to create an MBB referenced by a
; PHI. If so, we cannot complete the G_PHI and mustn't try or bad things
; happen.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6, %2; mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: G_STORE %6(s32), %2(p0); mem:ST4[%addr] GPR:%6,%2 (in function: pending_phis)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for pending_phis
; FALLBACK-WITH-REPORT-OUT-LABEL: pending_phis:
define i32 @pending_phis(i1 %tst, i32 %val, i32* %addr) {
@@ -63,7 +63,7 @@ false:
}
; General legalizer inability to handle types whose size wasn't a power of 2.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST6[%addr](align=8) (in function: odd_type)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1(s42), %0(p0); mem:ST6[%addr](align=8) (in function: odd_type)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_type
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_type:
define void @odd_type(i42* %addr) {
@@ -72,7 +72,7 @@ define void @odd_type(i42* %addr) {
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST28[%addr](align=32) (in function: odd_vector)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1(<7 x s32>), %0(p0); mem:ST28[%addr](align=32) (in function: odd_vector)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for odd_vector
; FALLBACK-WITH-REPORT-OUT-LABEL: odd_vector:
define void @odd_vector(<7 x i32>* %addr) {
@@ -91,7 +91,7 @@ define i128 @sequence_sizes([8 x i8] %in) {
}
; Just to make sure we don't accidentally emit a normal load/store.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2<def>(s64) = G_LOAD %0; mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: cannot select: %2:gpr(s64) = G_LOAD %0(p0); mem:LD8[%addr] GPR:%2,%0 (in function: atomic_ops)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for atomic_ops
; FALLBACK-WITH-REPORT-LABEL: atomic_ops:
define i64 @atomic_ops(i64* %addr) {
@@ -132,14 +132,14 @@ continue:
}
; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0<def>(s128) = G_FCONSTANT quad 2
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0:_(s128) = G_FCONSTANT quad 2
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_quad_dump
; FALLBACK-WITH-REPORT-OUT-LABEL: test_quad_dump:
define fp128 @test_quad_dump() {
ret fp128 0xL00000000000000004000000000000000
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0<def>(p0) = G_EXTRACT_VECTOR_ELT %1, %2; (in function: vector_of_pointers_extractelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %0:_(p0) = G_EXTRACT_VECTOR_ELT %1(<2 x p0>), %2(s32); (in function: vector_of_pointers_extractelement)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_extractelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_extractelement:
@var = global <2 x i16*> zeroinitializer
@@ -156,7 +156,7 @@ end:
br label %block
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %4; mem:ST16[undef] (in function: vector_of_pointers_insertelement)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0(<2 x p0>), %4(p0); mem:ST16[undef] (in function: vector_of_pointers_insertelement)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for vector_of_pointers_insertelement
; FALLBACK-WITH-REPORT-OUT-LABEL: vector_of_pointers_insertelement:
define void @vector_of_pointers_insertelement() {
@@ -172,7 +172,7 @@ end:
br label %block
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %3; mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1(s96), %3(p0); mem:ST12[undef](align=4) (in function: nonpow2_insertvalue_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing:
%struct96 = type { float, float, float }
@@ -213,7 +213,7 @@ define void @nonpow2_load_narrowing() {
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3, %0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %3(s96), %0(p0); mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing:
define void @nonpow2_store_narrowing(i96* %c) {
@@ -223,7 +223,7 @@ define void @nonpow2_store_narrowing(i96* %c) {
ret void
}
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0, %1; mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %0(s96), %1(p0); mem:ST12[undef](align=16) (in function: nonpow2_constant_narrowing)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing:
define void @nonpow2_constant_narrowing() {
@@ -233,8 +233,8 @@ define void @nonpow2_constant_narrowing() {
; Currently can't handle vector lengths that aren't an exact multiple of
; natively supported vector lengths. Test that the fall-back works for those.
-; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1<def>(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2<def>(s64) = G_EXTRACT_VECTOR_ELT %1, %3; (in function: nonpow2_vector_add_fewerelements)
+; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %1(<7 x s64>) = G_ADD %0, %0; (in function: nonpow2_vector_add_fewerelements
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %2:_(s64) = G_EXTRACT_VECTOR_ELT %1(<7 x s64>), %3(s64); (in function: nonpow2_vector_add_fewerelements)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements:
define void @nonpow2_vector_add_fewerelements() {
diff --git a/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir b/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
index 7d0c9a37d17..94a9134072a 100644
--- a/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
+++ b/test/CodeGen/AArch64/GlobalISel/verify-regbankselected.mir
@@ -9,8 +9,8 @@
...
---
# CHECK: *** Bad machine code: Generic virtual register must have a bank in a RegBankSelected function ***
-# CHECK: instruction: %0<def>(s64) = COPY
-# CHECK: operand 0: %0<def>
+# CHECK: instruction: %0:_(s64) = COPY
+# CHECK: operand 0: %0
name: test
regBankSelected: true
registers:
diff --git a/test/CodeGen/AArch64/GlobalISel/verify-selected.mir b/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
index a182cf53173..772233ec103 100644
--- a/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
+++ b/test/CodeGen/AArch64/GlobalISel/verify-selected.mir
@@ -22,11 +22,11 @@ body: |
%0 = COPY %x0
; CHECK: *** Bad machine code: Unexpected generic instruction in a Selected function ***
- ; CHECK: instruction: %1<def> = G_ADD
+ ; CHECK: instruction: %1:gpr64 = G_ADD
%1 = G_ADD %0, %0
; CHECK: *** Bad machine code: Generic virtual register invalid in a Selected function ***
- ; CHECK: instruction: %2<def>(s64) = COPY
- ; CHECK: operand 0: %2<def>
+ ; CHECK: instruction: %2:gpr(s64) = COPY
+ ; CHECK: operand 0: %2
%2(s64) = COPY %x0
...
diff --git a/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll b/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
index 3ad9442b674..55f6c01cbd9 100644
--- a/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
+++ b/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
@@ -296,7 +296,7 @@ declare double @hh(double) #1
; Check that we correctly deal with repeated operands.
; The following testcase creates:
-; %d1<def> = FADDDrr %d0<kill>, %d0
+; %d1 = FADDDrr killed %d0, %d0
; We'll get a crash if we naively look at the first operand, remove it
; from the substitution list then look at the second operand.
diff --git a/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll b/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
index a21b6f2b0d9..bd0028c7452 100644
--- a/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
+++ b/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -mtriple=arm64-apple-ios -verify-machineinstrs | FileCheck %s
; LdStOpt bug created illegal instruction:
-; %d1<def>, %d2<def> = LDPSi %x0, 1
+; %d1, %d2 = LDPSi %x0, 1
; rdar://11512047
%0 = type opaque
diff --git a/test/CodeGen/AArch64/arm64-csldst-mmo.ll b/test/CodeGen/AArch64/arm64-csldst-mmo.ll
index b0059193d34..c69779add59 100644
--- a/test/CodeGen/AArch64/arm64-csldst-mmo.ll
+++ b/test/CodeGen/AArch64/arm64-csldst-mmo.ll
@@ -11,7 +11,7 @@
; CHECK: Before post-MI-sched:
; CHECK-LABEL: # Machine code for function test1:
; CHECK: SU(2): STRWui %wzr
-; CHECK: SU(3): %x21<def>, %x20<def> = LDPXi %sp
+; CHECK: SU(3): %x21, %x20 = LDPXi %sp
; CHECK: Predecessors:
; CHECK-NEXT: SU(0): Out
; CHECK-NEXT: SU(0): Out
diff --git a/test/CodeGen/AArch64/arm64-dead-register-def-bug.ll b/test/CodeGen/AArch64/arm64-dead-register-def-bug.ll
index 03d05429308..d43efa7ee79 100644
--- a/test/CodeGen/AArch64/arm64-dead-register-def-bug.ll
+++ b/test/CodeGen/AArch64/arm64-dead-register-def-bug.ll
@@ -3,7 +3,7 @@
; Check that the dead register definition pass is considering implicit defs.
; When rematerializing through truncates, the coalescer may produce instructions
; with dead defs, but live implicit-defs of subregs:
-; E.g. %x1<def, dead> = MOVi64imm 2, %w1<imp-def>; %x1:GPR64, %w1:GPR32
+; E.g. dead %x1 = MOVi64imm 2, implicit-def %w1; %x1:GPR64, %w1:GPR32
; These instructions are live, and their definitions should not be rewritten.
;
; <rdar://problem/16492408>
diff --git a/test/CodeGen/AArch64/arm64-fast-isel-rem.ll b/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
index 58f414432ac..c26bfa8bcfe 100644
--- a/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
+++ b/test/CodeGen/AArch64/arm64-fast-isel-rem.ll
@@ -4,9 +4,9 @@
; CHECK-SSA-LABEL: Machine code for function t1
-; CHECK-SSA: [[QUOTREG:%[0-9]+]]<def> = SDIVWr
-; CHECK-SSA-NOT: [[QUOTREG]]<def> =
-; CHECK-SSA: {{%[0-9]+}}<def> = MSUBWrrr [[QUOTREG]]
+; CHECK-SSA: [[QUOTREG:%[0-9]+]]:gpr32 = SDIVWr
+; CHECK-SSA-NOT: [[QUOTREG]] =
+; CHECK-SSA: {{%[0-9]+}}:gpr32 = MSUBWrrr killed [[QUOTREG]]
; CHECK-SSA-LABEL: Machine code for function t2
diff --git a/test/CodeGen/AArch64/arm64-ldp-cluster.ll b/test/CodeGen/AArch64/arm64-ldp-cluster.ll
index 370db233fcb..75b02b9d913 100644
--- a/test/CodeGen/AArch64/arm64-ldp-cluster.ll
+++ b/test/CodeGen/AArch64/arm64-ldp-cluster.ll
@@ -6,13 +6,13 @@
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldr_int:%bb.0
; CHECK: Cluster ld/st SU(1) - SU(2)
-; CHECK: SU(1): %{{[0-9]+}}<def> = LDRWui
-; CHECK: SU(2): %{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(1): %{{[0-9]+}}:gpr32 = LDRWui
+; CHECK: SU(2): %{{[0-9]+}}:gpr32 = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldr_int:%bb.0
; EXYNOS: Cluster ld/st SU(1) - SU(2)
-; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRWui
-; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(1): %{{[0-9]+}}:gpr32 = LDRWui
+; EXYNOS: SU(2): %{{[0-9]+}}:gpr32 = LDRWui
define i32 @ldr_int(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 1
%tmp1 = load i32, i32* %p1, align 2
@@ -26,13 +26,13 @@ define i32 @ldr_int(i32* %a) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldp_sext_int:%bb.0
; CHECK: Cluster ld/st SU(1) - SU(2)
-; CHECK: SU(1): %{{[0-9]+}}<def> = LDRSWui
-; CHECK: SU(2): %{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(1): %{{[0-9]+}}:gpr64 = LDRSWui
+; CHECK: SU(2): %{{[0-9]+}}:gpr64 = LDRSWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_sext_int:%bb.0
; EXYNOS: Cluster ld/st SU(1) - SU(2)
-; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRSWui
-; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(1): %{{[0-9]+}}:gpr64 = LDRSWui
+; EXYNOS: SU(2): %{{[0-9]+}}:gpr64 = LDRSWui
define i64 @ldp_sext_int(i32* %p) nounwind {
%tmp = load i32, i32* %p, align 4
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
@@ -47,13 +47,13 @@ define i64 @ldp_sext_int(i32* %p) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldur_int:%bb.0
; CHECK: Cluster ld/st SU(2) - SU(1)
-; CHECK: SU(1): %{{[0-9]+}}<def> = LDURWi
-; CHECK: SU(2): %{{[0-9]+}}<def> = LDURWi
+; CHECK: SU(1): %{{[0-9]+}}:gpr32 = LDURWi
+; CHECK: SU(2): %{{[0-9]+}}:gpr32 = LDURWi
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldur_int:%bb.0
; EXYNOS: Cluster ld/st SU(2) - SU(1)
-; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDURWi
-; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDURWi
+; EXYNOS: SU(1): %{{[0-9]+}}:gpr32 = LDURWi
+; EXYNOS: SU(2): %{{[0-9]+}}:gpr32 = LDURWi
define i32 @ldur_int(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
%tmp1 = load i32, i32* %p1, align 2
@@ -67,13 +67,13 @@ define i32 @ldur_int(i32* %a) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldp_half_sext_zext_int:%bb.0
; CHECK: Cluster ld/st SU(3) - SU(4)
-; CHECK: SU(3): %{{[0-9]+}}<def> = LDRSWui
-; CHECK: SU(4): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; CHECK: SU(3): %{{[0-9]+}}:gpr64 = LDRSWui
+; CHECK: SU(4): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_half_sext_zext_int:%bb.0
; EXYNOS: Cluster ld/st SU(3) - SU(4)
-; EXYNOS: SU(3): %{{[0-9]+}}<def> = LDRSWui
-; EXYNOS: SU(4): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
+; EXYNOS: SU(3): %{{[0-9]+}}:gpr64 = LDRSWui
+; EXYNOS: SU(4): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
%tmp0 = load i64, i64* %q, align 4
%tmp = load i32, i32* %p, align 4
@@ -90,13 +90,13 @@ define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldp_half_zext_sext_int:%bb.0
; CHECK: Cluster ld/st SU(3) - SU(4)
-; CHECK: SU(3): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
-; CHECK: SU(4): %{{[0-9]+}}<def> = LDRSWui
+; CHECK: SU(3): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
+; CHECK: SU(4): %{{[0-9]+}}:gpr64 = LDRSWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldp_half_zext_sext_int:%bb.0
; EXYNOS: Cluster ld/st SU(3) - SU(4)
-; EXYNOS: SU(3): %{{[0-9]+}}:sub_32<def,read-undef> = LDRWui
-; EXYNOS: SU(4): %{{[0-9]+}}<def> = LDRSWui
+; EXYNOS: SU(3): undef %{{[0-9]+}}.sub_32:gpr64 = LDRWui
+; EXYNOS: SU(4): %{{[0-9]+}}:gpr64 = LDRSWui
define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
%tmp0 = load i64, i64* %q, align 4
%tmp = load i32, i32* %p, align 4
@@ -113,13 +113,13 @@ define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldr_int_volatile:%bb.0
; CHECK-NOT: Cluster ld/st
-; CHECK: SU(1): %{{[0-9]+}}<def> = LDRWui
-; CHECK: SU(2): %{{[0-9]+}}<def> = LDRWui
+; CHECK: SU(1): %{{[0-9]+}}:gpr32 = LDRWui
+; CHECK: SU(2): %{{[0-9]+}}:gpr32 = LDRWui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldr_int_volatile:%bb.0
; EXYNOS-NOT: Cluster ld/st
-; EXYNOS: SU(1): %{{[0-9]+}}<def> = LDRWui
-; EXYNOS: SU(2): %{{[0-9]+}}<def> = LDRWui
+; EXYNOS: SU(1): %{{[0-9]+}}:gpr32 = LDRWui
+; EXYNOS: SU(2): %{{[0-9]+}}:gpr32 = LDRWui
define i32 @ldr_int_volatile(i32* %a) nounwind {
%p1 = getelementptr inbounds i32, i32* %a, i32 1
%tmp1 = load volatile i32, i32* %p1, align 2
@@ -133,8 +133,8 @@ define i32 @ldr_int_volatile(i32* %a) nounwind {
; CHECK: ********** MI Scheduling **********
; CHECK-LABEL: ldq_cluster:%bb.0
; CHECK: Cluster ld/st SU(1) - SU(3)
-; CHECK: SU(1): %{{[0-9]+}}<def> = LDRQui
-; CHECK: SU(3): %{{[0-9]+}}<def> = LDRQui
+; CHECK: SU(1): %{{[0-9]+}}:fpr128 = LDRQui
+; CHECK: SU(3): %{{[0-9]+}}:fpr128 = LDRQui
; EXYNOS: ********** MI Scheduling **********
; EXYNOS-LABEL: ldq_cluster:%bb.0
; EXYNOS-NOT: Cluster ld/st
diff --git a/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll b/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
index b4e07fe76c1..bbb699bbb46 100644
--- a/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
+++ b/test/CodeGen/AArch64/arm64-misched-forwarding-A53.ll
@@ -6,7 +6,7 @@
;
; CHECK: ********** MI Scheduling **********
; CHECK: shiftable
-; CHECK: SU(2): %2<def> = SUBXri %1, 20, 0
+; CHECK: SU(2): %2:gpr64common = SUBXri %1, 20, 0
; CHECK: Successors:
; CHECK-NEXT: SU(4): Data Latency=1 Reg=%2
; CHECK-NEXT: SU(3): Data Latency=2 Reg=%2
diff --git a/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll b/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
index 8c81cf43e68..36de403a0c8 100644
--- a/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
+++ b/test/CodeGen/AArch64/arm64-misched-memdep-bug.ll
@@ -5,7 +5,7 @@
;
; CHECK: ********** MI Scheduling **********
; CHECK: misched_bug:%bb.0 entry
-; CHECK: SU(2): %2<def> = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0
+; CHECK: SU(2): %2:gpr32 = LDRWui %0, 1; mem:LD4[%ptr1_plus1] GPR32:%2 GPR64common:%0
; CHECK: Successors:
; CHECK-NEXT: SU(5): Data Latency=4 Reg=%2
; CHECK-NEXT: SU(4): Ord Latency=0
@@ -13,7 +13,7 @@
; CHECK: Successors:
; CHECK: SU(4): Ord Latency=0
; CHECK: SU(4): STRWui %wzr, %1, 0; mem:ST4[%ptr2] GPR64common:%1
-; CHECK: SU(5): %w0<def> = COPY %2; GPR32:%2
+; CHECK: SU(5): %w0 = COPY %2; GPR32:%2
; CHECK: ** ScheduleDAGMI::schedule picking next node
define i32 @misched_bug(i32* %ptr1, i32* %ptr2) {
entry:
diff --git a/test/CodeGen/AArch64/arm64-misched-multimmo.ll b/test/CodeGen/AArch64/arm64-misched-multimmo.ll
index 9d92f96a208..bdd4f49d174 100644
--- a/test/CodeGen/AArch64/arm64-misched-multimmo.ll
+++ b/test/CodeGen/AArch64/arm64-misched-multimmo.ll
@@ -8,7 +8,7 @@
; Check that no scheduling dependencies are created between the paired loads and the store during post-RA MI scheduling.
;
; CHECK-LABEL: # Machine code for function foo:
-; CHECK: SU(2): %w{{[0-9]+}}<def>, %w{{[0-9]+}}<def> = LDPWi
+; CHECK: SU(2): %w{{[0-9]+}}, %w{{[0-9]+}} = LDPWi
; CHECK: Successors:
; CHECK-NOT: ch SU(4)
; CHECK: SU(3)
diff --git a/test/CodeGen/AArch64/loh.mir b/test/CodeGen/AArch64/loh.mir
index 001e7755829..1e42ecfd599 100644
--- a/test/CodeGen/AArch64/loh.mir
+++ b/test/CodeGen/AArch64/loh.mir
@@ -22,14 +22,14 @@ tracksRegLiveness: true
body: |
bb.0:
; CHECK: Adding MCLOH_AdrpAdrp:
- ; CHECK-NEXT: %x1<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %x1<def> = ADRP <ga:@g4>
+ ; CHECK-NEXT: %x1 = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x1 = ADRP <ga:@g4>
; CHECK-NEXT: Adding MCLOH_AdrpAdrp:
- ; CHECK-NEXT: %x1<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %x1<def> = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x1 = ADRP <ga:@g2>
+ ; CHECK-NEXT: %x1 = ADRP <ga:@g3>
; CHECK-NEXT: Adding MCLOH_AdrpAdrp:
- ; CHECK-NEXT: %x0<def> = ADRP <ga:@g0>
- ; CHECK-NEXT: %x0<def> = ADRP <ga:@g1>
+ ; CHECK-NEXT: %x0 = ADRP <ga:@g0>
+ ; CHECK-NEXT: %x0 = ADRP <ga:@g1>
%x0 = ADRP target-flags(aarch64-page) @g0
%x0 = ADRP target-flags(aarch64-page) @g1
%x1 = ADRP target-flags(aarch64-page) @g2
@@ -38,11 +38,11 @@ body: |
bb.1:
; CHECK-NEXT: Adding MCLOH_AdrpAdd:
- ; CHECK-NEXT: %x20<def> = ADRP <ga:@g0>
- ; CHECK-NEXT: %x3<def> = ADDXri %x20, <ga:@g0>
+ ; CHECK-NEXT: %x20 = ADRP <ga:@g0>
+ ; CHECK-NEXT: %x3 = ADDXri %x20, <ga:@g0>
; CHECK-NEXT: Adding MCLOH_AdrpAdd:
- ; CHECK-NEXT: %x1<def> = ADRP <ga:@g0>
- ; CHECK-NEXT: %x1<def> = ADDXri %x1, <ga:@g0>
+ ; CHECK-NEXT: %x1 = ADRP <ga:@g0>
+ ; CHECK-NEXT: %x1 = ADDXri %x1, <ga:@g0>
%x1 = ADRP target-flags(aarch64-page) @g0
%x9 = SUBXri undef %x11, 5, 0 ; should not affect MCLOH formation
%x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g0, 0
@@ -73,11 +73,11 @@ body: |
bb.5:
; CHECK-NEXT: Adding MCLOH_AdrpLdr:
- ; CHECK-NEXT: %x5<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %s6<def> = LDRSui %x5, <ga:@g2>
+ ; CHECK-NEXT: %x5 = ADRP <ga:@g2>
+ ; CHECK-NEXT: %s6 = LDRSui %x5, <ga:@g2>
; CHECK-NEXT: Adding MCLOH_AdrpLdr:
- ; CHECK-NEXT: %x4<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %x4<def> = LDRXui %x4, <ga:@g2>
+ ; CHECK-NEXT: %x4 = ADRP <ga:@g2>
+ ; CHECK-NEXT: %x4 = LDRXui %x4, <ga:@g2>
%x4 = ADRP target-flags(aarch64-page) @g2
%x4 = LDRXui %x4, target-flags(aarch64-pageoff) @g2
%x5 = ADRP target-flags(aarch64-page) @g2
@@ -85,11 +85,11 @@ body: |
bb.6:
; CHECK-NEXT: Adding MCLOH_AdrpLdrGot:
- ; CHECK-NEXT: %x5<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %x6<def> = LDRXui %x5, <ga:@g2>
+ ; CHECK-NEXT: %x5 = ADRP <ga:@g2>
+ ; CHECK-NEXT: %x6 = LDRXui %x5, <ga:@g2>
; CHECK-NEXT: Adding MCLOH_AdrpLdrGot:
- ; CHECK-NEXT: %x4<def> = ADRP <ga:@g2>
- ; CHECK-NEXT: %x4<def> = LDRXui %x4, <ga:@g2>
+ ; CHECK-NEXT: %x4 = ADRP <ga:@g2>
+ ; CHECK-NEXT: %x4 = LDRXui %x4, <ga:@g2>
%x4 = ADRP target-flags(aarch64-page, aarch64-got) @g2
%x4 = LDRXui %x4, target-flags(aarch64-pageoff, aarch64-got) @g2
%x5 = ADRP target-flags(aarch64-page, aarch64-got) @g2
@@ -104,23 +104,23 @@ body: |
bb.8:
; CHECK-NEXT: Adding MCLOH_AdrpAddLdr:
- ; CHECK-NEXT: %x7<def> = ADRP <ga:@g3>[TF=1]
- ; CHECK-NEXT: %x8<def> = ADDXri %x7, <ga:@g3>
- ; CHECK-NEXT: %d1<def> = LDRDui %x8, 8
+ ; CHECK-NEXT: %x7 = ADRP <ga:@g3>[TF=1]
+ ; CHECK-NEXT: %x8 = ADDXri %x7, <ga:@g3>
+ ; CHECK-NEXT: %d1 = LDRDui %x8, 8
%x7 = ADRP target-flags(aarch64-page) @g3
%x8 = ADDXri %x7, target-flags(aarch64-pageoff) @g3, 0
%d1 = LDRDui %x8, 8
bb.9:
; CHECK-NEXT: Adding MCLOH_AdrpAdd:
- ; CHECK-NEXT: %x3<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %x3<def> = ADDXri %x3, <ga:@g3>
+ ; CHECK-NEXT: %x3 = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x3 = ADDXri %x3, <ga:@g3>
; CHECK-NEXT: Adding MCLOH_AdrpAdd:
- ; CHECK-NEXT: %x5<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %x2<def> = ADDXri %x5, <ga:@g3>
+ ; CHECK-NEXT: %x5 = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x2 = ADDXri %x5, <ga:@g3>
; CHECK-NEXT: Adding MCLOH_AdrpAddStr:
- ; CHECK-NEXT: %x1<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %x1<def> = ADDXri %x1, <ga:@g3>
+ ; CHECK-NEXT: %x1 = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x1 = ADDXri %x1, <ga:@g3>
; CHECK-NEXT: STRXui %xzr, %x1, 16
%x1 = ADRP target-flags(aarch64-page) @g3
%x1 = ADDXri %x1, target-flags(aarch64-pageoff) @g3, 0
@@ -138,12 +138,12 @@ body: |
bb.10:
; CHECK-NEXT: Adding MCLOH_AdrpLdr:
- ; CHECK-NEXT: %x2<def> = ADRP <ga:@g3>
- ; CHECK-NEXT: %x2<def> = LDRXui %x2, <ga:@g3>
+ ; CHECK-NEXT: %x2 = ADRP <ga:@g3>
+ ; CHECK-NEXT: %x2 = LDRXui %x2, <ga:@g3>
; CHECK-NEXT: Adding MCLOH_AdrpLdrGotLdr:
- ; CHECK-NEXT: %x1<def> = ADRP <ga:@g4>
- ; CHECK-NEXT: %x1<def> = LDRXui %x1, <ga:@g4>
- ; CHECK-NEXT: %x1<def> = LDRXui %x1, 24
+ ; CHECK-NEXT: %x1 = ADRP <ga:@g4>
+ ; CHECK-NEXT: %x1 = LDRXui %x1, <ga:@g4>
+ ; CHECK-NEXT: %x1 = LDRXui %x1, 24
%x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
%x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4
%x1 = LDRXui %x1, 24
@@ -154,11 +154,11 @@ body: |
bb.11:
; CHECK-NEXT: Adding MCLOH_AdrpLdr
- ; CHECK-NEXT: %x5<def> = ADRP <ga:@g1>
- ; CHECK-NEXT: %x5<def> = LDRXui %x5, <ga:@g1>
+ ; CHECK-NEXT: %x5 = ADRP <ga:@g1>
+ ; CHECK-NEXT: %x5 = LDRXui %x5, <ga:@g1>
; CHECK-NEXT: Adding MCLOH_AdrpLdrGotStr:
- ; CHECK-NEXT: %x1<def> = ADRP <ga:@g4>
- ; CHECK-NEXT: %x1<def> = LDRXui %x1, <ga:@g4>
+ ; CHECK-NEXT: %x1 = ADRP <ga:@g4>
+ ; CHECK-NEXT: %x1 = LDRXui %x1, <ga:@g4>
; CHECK-NEXT: STRXui %xzr, %x1, 32
%x1 = ADRP target-flags(aarch64-page, aarch64-got) @g4
%x1 = LDRXui %x1, target-flags(aarch64-pageoff, aarch64-got) @g4
@@ -171,9 +171,9 @@ body: |
bb.12:
; CHECK-NOT: MCLOH_AdrpAdrp
; CHECK: Adding MCLOH_AdrpAddLdr
- ; %x9<def> = ADRP <ga:@g4>
- ; %x9<def> = ADDXri %x9, <ga:@g4>
- ; %x5<def> = LDRXui %x9, 0
+ ; %x9 = ADRP <ga:@g4>
+ ; %x9 = ADDXri %x9, <ga:@g4>
+ ; %x5 = LDRXui %x9, 0
%x9 = ADRP target-flags(aarch64-page, aarch64-got) @g4
%x9 = ADDXri %x9, target-flags(aarch64-pageoff, aarch64-got) @g4, 0
%x5 = LDRXui %x9, 0
diff --git a/test/CodeGen/AArch64/machine-copy-prop.ll b/test/CodeGen/AArch64/machine-copy-prop.ll
index ed0955ccf48..2ac87f00048 100644
--- a/test/CodeGen/AArch64/machine-copy-prop.ll
+++ b/test/CodeGen/AArch64/machine-copy-prop.ll
@@ -2,18 +2,18 @@
; This file check a bug in MachineCopyPropagation pass. The last COPY will be
; incorrectly removed if the machine instructions are as follows:
-; %q5_q6<def> = COPY %q2_q3
-; %d5<def> =
-; %d3<def> =
-; %d3<def> = COPY %d6
+; %q5_q6 = COPY %q2_q3
+; %d5 =
+; %d3 =
+; %d3 = COPY %d6
; This is caused by a bug in function SourceNoLongerAvailable(), which fails to
-; remove the relationship of D6 and "%q5_q6<def> = COPY %q2_q3".
+; remove the relationship of D6 and "%q5_q6 = COPY %q2_q3".
@failed = internal unnamed_addr global i1 false
; CHECK-LABEL: foo:
; CHECK: ld2
-; CHECK-NOT: // kill: D{{[0-9]+}}<def> D{{[0-9]+}}<kill>
+; CHECK-NOT: // kill: def D{{[0-9]+}} killed D{{[0-9]+}}
define void @foo(<2 x i32> %shuffle251, <8 x i8> %vtbl1.i, i8* %t2, <2 x i32> %vrsubhn_v2.i1364) {
entry:
%val0 = alloca [2 x i64], align 8
diff --git a/test/CodeGen/AArch64/scheduledag-constreg.mir b/test/CodeGen/AArch64/scheduledag-constreg.mir
index 1f97fe1360c..013f59f52a9 100644
--- a/test/CodeGen/AArch64/scheduledag-constreg.mir
+++ b/test/CodeGen/AArch64/scheduledag-constreg.mir
@@ -7,16 +7,16 @@
# Check that the instructions are not dependent on each other, even though
# they all read/write to the zero register.
# CHECK-LABEL: MI Scheduling
-# CHECK: SU(0): %wzr<def,dead> = SUBSWri %w1, 0, 0, %nzcv<imp-def,dead>
+# CHECK: SU(0): dead %wzr = SUBSWri %w1, 0, 0, implicit-def dead %nzcv
# CHECK: # succs left : 0
# CHECK-NOT: Successors:
-# CHECK: SU(1): %w2<def> = COPY %wzr
+# CHECK: SU(1): %w2 = COPY %wzr
# CHECK: # succs left : 0
# CHECK-NOT: Successors:
-# CHECK: SU(2): %wzr<def,dead> = SUBSWri %w3, 0, 0, %nzcv<imp-def,dead>
+# CHECK: SU(2): dead %wzr = SUBSWri %w3, 0, 0, implicit-def dead %nzcv
# CHECK: # succs left : 0
# CHECK-NOT: Successors:
-# CHECK: SU(3): %w4<def> = COPY %wzr
+# CHECK: SU(3): %w4 = COPY %wzr
# CHECK: # succs left : 0
# CHECK-NOT: Successors:
name: func
diff --git a/test/CodeGen/AArch64/tailcall_misched_graph.ll b/test/CodeGen/AArch64/tailcall_misched_graph.ll
index cb42fcced8d..860853a0675 100644
--- a/test/CodeGen/AArch64/tailcall_misched_graph.ll
+++ b/test/CodeGen/AArch64/tailcall_misched_graph.ll
@@ -26,15 +26,15 @@ declare void @callee2(i8*, i8*, i8*, i8*, i8*,
; CHECK: fi#-2: {{.*}} fixed, at location [SP+8]
; CHECK: fi#-1: {{.*}} fixed, at location [SP]
-; CHECK: [[VRA:%.*]]<def> = LDRXui <fi#-1>
-; CHECK: [[VRB:%.*]]<def> = LDRXui <fi#-2>
+; CHECK: [[VRA:%.*]]:gpr64 = LDRXui <fi#-1>
+; CHECK: [[VRB:%.*]]:gpr64 = LDRXui <fi#-2>
; CHECK: STRXui %{{.*}}, <fi#-4>
; CHECK: STRXui [[VRB]], <fi#-3>
; Make sure that there is an dependence edge between fi#-2 and fi#-4.
; Without this edge the scheduler would be free to move the store accross the load.
-; CHECK: SU({{.*}}): [[VRB]]<def> = LDRXui <fi#-2>
+; CHECK: SU({{.*}}): [[VRB]]:gpr64 = LDRXui <fi#-2>
; CHECK-NOT: SU
; CHECK: Successors:
; CHECK: SU([[DEPSTOREB:.*]]): Ord Latency=0
diff --git a/test/CodeGen/AMDGPU/llvm.dbg.value.ll b/test/CodeGen/AMDGPU/llvm.dbg.value.ll
index d0917e29495..ace859c9575 100644
--- a/test/CodeGen/AMDGPU/llvm.dbg.value.ll
+++ b/test/CodeGen/AMDGPU/llvm.dbg.value.ll
@@ -5,7 +5,7 @@
; NOOPT: s_load_dwordx2 s[4:5]
; FIXME: Why is the SGPR4_SGPR5 reference being removed from DBG_VALUE?
-; NOOPT: ; kill: %sgpr8_sgpr9<def> %sgpr4_sgpr5<kill>
+; NOOPT: ; kill: def %sgpr8_sgpr9 killed %sgpr4_sgpr5
; NOOPT-NEXT: ;DEBUG_VALUE: test_debug_value:globalptr_arg <- undef
; GCN: flat_store_dword
diff --git a/test/CodeGen/AMDGPU/schedule-regpressure.mir b/test/CodeGen/AMDGPU/schedule-regpressure.mir
index 9d798a50c08..afc2fab08f8 100644
--- a/test/CodeGen/AMDGPU/schedule-regpressure.mir
+++ b/test/CodeGen/AMDGPU/schedule-regpressure.mir
@@ -4,7 +4,7 @@
# Check there is no SReg_32 pressure created by DS_* instructions because of M0 use
# CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} %m0<imp-use>, %exec<imp-use>
+# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} implicit %m0, implicit %exec
# CHECK: Pressure Diff : {{$}}
# CHECK: SU({{.*}} DS_WRITE_B32
diff --git a/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll b/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
index 130221d38c2..c0b94134bec 100644
--- a/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
+++ b/test/CodeGen/ARM/2010-06-29-PartialRedefFastAlloc.ll
@@ -4,8 +4,8 @@ target triple = "thumbv7-apple-darwin10"
; This tests the fast register allocator's handling of partial redefines:
;
-; %reg1028:dsub_0<def>, %reg1028:dsub_1<def> = VLD1q64 %reg1025...
-; %reg1030:dsub_1<def> = COPY %reg1028:dsub_0<kill>
+; %reg1028:dsub_0, %reg1028:dsub_1 = VLD1q64 %reg1025...
+; %reg1030:dsub_1 = COPY killed %reg1028:dsub_0
;
; %reg1028 gets allocated %Q0, and if %reg1030 is reloaded for the partial
; redef, it cannot also get %Q0.
diff --git a/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll b/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
index 5e71eeb9c3d..bf8ddf46d58 100644
--- a/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
+++ b/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
@@ -9,7 +9,7 @@ target triple = "thumbv7-apple-ios"
;
; The early-clobber instruction is an str:
;
-; %12<earlyclobber,def> = t2STR_PRE %6, %12, 32, pred:14, pred:%noreg
+; early-clobber %12 = t2STR_PRE %6, %12, 32, pred:14, pred:%noreg
;
; This tests that shrinkToUses handles the EC redef correctly.
diff --git a/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll b/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll
index bb6f83c12fa..7a582b16ec9 100644
--- a/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll
+++ b/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll
@@ -4,7 +4,7 @@
define void @vst(i8* %m, [4 x i64] %v) {
entry:
; CHECK: vst:
-; CHECK: VST1d64Q %r{{[0-9]+}}<kill>, 8, %d{{[0-9]+}}, pred:14, pred:%noreg, %q{{[0-9]+}}_q{{[0-9]+}}<imp-use,kill>
+; CHECK: VST1d64Q killed %r{{[0-9]+}}, 8, %d{{[0-9]+}}, pred:14, pred:%noreg, implicit killed %q{{[0-9]+}}_q{{[0-9]+}}
%v0 = extractvalue [4 x i64] %v, 0
%v1 = extractvalue [4 x i64] %v, 1
@@ -37,7 +37,7 @@ entry:
%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind {
; CHECK: vtbx4:
-; CHECK: VTBX4 {{.*}}, pred:14, pred:%noreg, %q{{[0-9]+}}_q{{[0-9]+}}<imp-use>
+; CHECK: VTBX4 {{.*}}, pred:14, pred:%noreg, implicit %q{{[0-9]+}}_q{{[0-9]+}}
%tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = load %struct.__neon_int8x8x4_t, %struct.__neon_int8x8x4_t* %B
%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
diff --git a/test/CodeGen/ARM/Windows/vla-cpsr.ll b/test/CodeGen/ARM/Windows/vla-cpsr.ll
index 1eb2797ca2e..39527425a76 100644
--- a/test/CodeGen/ARM/Windows/vla-cpsr.ll
+++ b/test/CodeGen/ARM/Windows/vla-cpsr.ll
@@ -9,5 +9,5 @@ entry:
ret void
}
-; CHECK: tBL pred:14, pred:%noreg, <es:__chkstk>, %lr<imp-def>, %sp<imp-use>, %r4<imp-use,kill>, %r4<imp-def>, %r12<imp-def,dead>, %cpsr<imp-def,dead>
+; CHECK: tBL pred:14, pred:%noreg, <es:__chkstk>, implicit-def %lr, implicit %sp, implicit killed %r4, implicit-def %r4, implicit-def dead %r12, implicit-def dead %cpsr
diff --git a/test/CodeGen/ARM/crash-greedy.ll b/test/CodeGen/ARM/crash-greedy.ll
index 31d6079db71..5320a163c0b 100644
--- a/test/CodeGen/ARM/crash-greedy.ll
+++ b/test/CodeGen/ARM/crash-greedy.ll
@@ -61,7 +61,7 @@ for.end: ; preds = %cond.end
; CHECK: insert_elem
; This test has a sub-register copy with a kill flag:
-; %6:ssub_3<def> = COPY %6:ssub_2<kill>; QPR_VFP2:%6
+; %6:ssub_3 = COPY killed %6:ssub_2; QPR_VFP2:%6
; The rewriter must do something sensible with that, or the scavenger crashes.
define void @insert_elem() nounwind {
entry:
diff --git a/test/CodeGen/ARM/ifcvt-dead-def.ll b/test/CodeGen/ARM/ifcvt-dead-def.ll
index 77a3f5c0961..fedbcfb09eb 100644
--- a/test/CodeGen/ARM/ifcvt-dead-def.ll
+++ b/test/CodeGen/ARM/ifcvt-dead-def.ll
@@ -8,7 +8,7 @@ target triple = "thumbv7-unknown-unknown"
%struct.gs_color_s = type { i16, i16, i16, i16, i8, i8 }
; In this case, the if converter was cloning the return instruction so that we had
-; r2<def> = ...
+; r2 = ...
; return [pred] r2<dead,def>
; ldr <r2, kill>
; return
diff --git a/test/CodeGen/ARM/misched-copy-arm.ll b/test/CodeGen/ARM/misched-copy-arm.ll
index ae0b127a6f8..dbed4650c39 100644
--- a/test/CodeGen/ARM/misched-copy-arm.ll
+++ b/test/CodeGen/ARM/misched-copy-arm.ll
@@ -33,9 +33,9 @@ for.end: ; preds = %for.body, %entry
; This case was a crasher in constrainLocalCopy.
; The problem was the t2LDR_PRE defining both the global and local lrg.
; CHECK-LABEL: *** Final schedule for %bb.5 ***
-; CHECK: %[[R4:[0-9]+]]<def>, %[[R1:[0-9]+]]<def,tied2> = t2LDR_PRE %[[R1]]<tied1>
-; CHECK: %{{[0-9]+}}<def> = COPY %[[R1]]
-; CHECK: %{{[0-9]+}}<def> = COPY %[[R4]]
+; CHECK: %[[R4:[0-9]+]]:gpr, %[[R1:[0-9]+]]:gpr = t2LDR_PRE %[[R1]]
+; CHECK: %{{[0-9]+}}:gpr = COPY %[[R1]]
+; CHECK: %{{[0-9]+}}:gpr = COPY %[[R4]]
; CHECK-LABEL: MACHINEINSTRS
%struct.rtx_def = type { [4 x i8], [1 x %union.rtunion_def] }
%union.rtunion_def = type { i64 }
diff --git a/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/test/CodeGen/ARM/misched-int-basic-thumb2.mir
index 4048f1122d1..6f813d4f860 100644
--- a/test/CodeGen/ARM/misched-int-basic-thumb2.mir
+++ b/test/CodeGen/ARM/misched-int-basic-thumb2.mir
@@ -37,22 +37,22 @@
}
#
# CHECK: ********** MI Scheduling **********
-# CHECK: SU(2): %2<def> = t2MOVi32imm <ga:@g1>; rGPR:%2
+# CHECK: SU(2): %2:rgpr = t2MOVi32imm <ga:@g1>; rGPR:%2
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 2
# CHECK_R52: Latency : 2
#
-# CHECK: SU(3): %3<def> = t2LDRi12 %2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%3,%2
+# CHECK: SU(3): %3:rgpr = t2LDRi12 %2, 0, pred:14, pred:%noreg; mem:LD4[@g1](dereferenceable) rGPR:%3,%2
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 3
# CHECK_R52: Latency : 4
#
-# CHECK : SU(6): %6<def> = t2ADDrr %3, %3, pred:14, pred:%noreg, opt:%noreg; rGPR:%6,%3,%3
+# CHECK : SU(6): %6 = t2ADDrr %3, %3, pred:14, pred:%noreg, opt:%noreg; rGPR:%6,%3,%3
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 1
# CHECK_R52: Latency : 3
-# CHECK: SU(7): %7<def> = t2SDIV %6, %5, pred:14, pred:%noreg; rGPR:%7,%6,%5
+# CHECK: SU(7): %7:rgpr = t2SDIV %6, %5, pred:14, pred:%noreg; rGPR:%7,%6,%5
# CHECK_A9: Latency : 0
# CHECK_SWIFT: Latency : 14
# CHECK_R52: Latency : 8
@@ -62,37 +62,37 @@
# CHECK_SWIFT: Latency : 0
# CHECK_R52: Latency : 4
#
-# CHECK: SU(9): %8<def> = t2SMULBB %1, %1, pred:14, pred:%noreg; rGPR:%8,%1,%1
+# CHECK: SU(9): %8:rgpr = t2SMULBB %1, %1, pred:14, pred:%noreg; rGPR:%8,%1,%1
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(10): %9<def> = t2SMLABB %0, %0, %8, pred:14, pred:%noreg; rGPR:%9,%0,%0,%8
+# CHECK: SU(10): %9:rgpr = t2SMLABB %0, %0, %8, pred:14, pred:%noreg; rGPR:%9,%0,%0,%8
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(11): %10<def> = t2UXTH %9, 0, pred:14, pred:%noreg; rGPR:%10,%9
+# CHECK: SU(11): %10:rgpr = t2UXTH %9, 0, pred:14, pred:%noreg; rGPR:%10,%9
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 1
# CHECK_R52: Latency : 3
#
-# CHECK: SU(12): %11<def> = t2MUL %10, %7, pred:14, pred:%noreg; rGPR:%11,%10,%7
+# CHECK: SU(12): %11:rgpr = t2MUL %10, %7, pred:14, pred:%noreg; rGPR:%11,%10,%7
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(13): %12<def> = t2MLA %11, %11, %11, pred:14, pred:%noreg; rGPR:%12,%11,%11,%11
+# CHECK: SU(13): %12:rgpr = t2MLA %11, %11, %11, pred:14, pred:%noreg; rGPR:%12,%11,%11,%11
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(14): %13<def>, %14<def> = t2UMULL %12, %12, pred:14, pred:%noreg; rGPR:%13,%14,%12,%12
+# CHECK: SU(14): %13:rgpr, %14:rgpr = t2UMULL %12, %12, pred:14, pred:%noreg; rGPR:%13,%14,%12,%12
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 5
# CHECK_R52: Latency : 4
#
-# CHECK: SU(18): %19<def,tied4>, %20<def,tied5> = t2UMLAL %12, %12, %19<tied0>, %20<tied1>, pred:14, pred:%noreg; rGPR:%19,%20,%12,%12,%20
+# CHECK: SU(18): %19:rgpr, %20:rgpr = t2UMLAL %12, %12, %19, %20, pred:14, pred:%noreg; rGPR:%19,%20,%12,%12,%20
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 7
# CHECK_R52: Latency : 4
diff --git a/test/CodeGen/ARM/misched-int-basic.mir b/test/CodeGen/ARM/misched-int-basic.mir
index 0cad54d975e..9641348792a 100644
--- a/test/CodeGen/ARM/misched-int-basic.mir
+++ b/test/CodeGen/ARM/misched-int-basic.mir
@@ -28,37 +28,37 @@
}
# CHECK: ********** MI Scheduling **********
-# CHECK: SU(2): %2<def> = SMULBB %1, %1, pred:14, pred:%noreg; GPR:%2,%1,%1
+# CHECK: SU(2): %2:gpr = SMULBB %1, %1, pred:14, pred:%noreg; GPR:%2,%1,%1
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(3): %3<def> = SMLABB %0, %0, %2, pred:14, pred:%noreg; GPRnopc:%3,%0,%0 GPR:%2
+# CHECK: SU(3): %3:gprnopc = SMLABB %0, %0, %2, pred:14, pred:%noreg; GPRnopc:%3,%0,%0 GPR:%2
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(4): %4<def> = UXTH %3, 0, pred:14, pred:%noreg; GPRnopc:%4,%3
+# CHECK: SU(4): %4:gprnopc = UXTH %3, 0, pred:14, pred:%noreg; GPRnopc:%4,%3
# CHECK_A9: Latency : 1
# CHECK_SWIFT: Latency : 1
# CHECK_R52: Latency : 3
#
-# CHECK: SU(5): %5<def> = MUL %4, %4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%5,%4,%4
+# CHECK: SU(5): %5:gprnopc = MUL %4, %4, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%5,%4,%4
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(6): %6<def> = MLA %5, %5, %5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%6,%5,%5,%5
+# CHECK: SU(6): %6:gprnopc = MLA %5, %5, %5, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%6,%5,%5,%5
# CHECK_A9: Latency : 2
# CHECK_SWIFT: Latency : 4
# CHECK_R52: Latency : 4
#
-# CHECK: SU(7): %7<def>, %8<def> = UMULL %6, %6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%7,%8,%6,%6
+# CHECK: SU(7): %7:gprnopc, %8:gprnopc = UMULL %6, %6, pred:14, pred:%noreg, opt:%noreg; GPRnopc:%7,%8,%6,%6
# CHECK_A9: Latency : 3
# CHECK_SWIFT: Latency : 5
# CHECK_R52: Latency : 4
#
-# CHECK: SU(11): %13<def,tied4>, %14<def,tied5> = UMLAL %6, %6, %13<tied0>, %14<tied1>, pred:14, pred:%noreg, opt:%noreg; GPR:%13 GPRnopc:%14,%6,%6
+# CHECK: SU(11): %13:gpr, %14:gprnopc = UMLAL %6, %6, %13, %14, pred:14, pred:%noreg, opt:%noreg; GPR:%13 GPRnopc:%14,%6,%6
# CHECK_SWIFT: Latency : 7
# CHECK_A9: Latency : 3
# CHECK_R52: Latency : 4
diff --git a/test/CodeGen/ARM/sched-it-debug-nodes.mir b/test/CodeGen/ARM/sched-it-debug-nodes.mir
index d88a57dccca..c09c2db7ef7 100644
--- a/test/CodeGen/ARM/sched-it-debug-nodes.mir
+++ b/test/CodeGen/ARM/sched-it-debug-nodes.mir
@@ -32,9 +32,9 @@
; debug value as KILL'ed, resulting in a DEBUG_VALUE node changing codegen! (or
; hopefully, triggering an assert).
- ; CHECK: BUNDLE %itstate<imp-def,dead>
- ; CHECK: * DBG_VALUE %r1, %noreg, !"u"
- ; CHECK-NOT: * DBG_VALUE %r1<kill>, %noreg, !"u"
+ ; CHECK: BUNDLE implicit-def dead %itstate
+ ; CHECK: * DBG_VALUE debug-use %r1, debug-use %noreg, !"u"
+ ; CHECK-NOT: * DBG_VALUE killed %r1, %noreg, !"u"
declare arm_aapcscc void @g(%struct.s*, i8*, i32) #1
diff --git a/test/CodeGen/ARM/single-issue-r52.mir b/test/CodeGen/ARM/single-issue-r52.mir
index b9857a18299..379f93384a2 100644
--- a/test/CodeGen/ARM/single-issue-r52.mir
+++ b/test/CodeGen/ARM/single-issue-r52.mir
@@ -20,22 +20,22 @@
# CHECK: ********** MI Scheduling **********
# CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU(1): %1<def> = VLD4d8Pseudo %0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%1 GPR:%0
+# CHECK: SU(1): %1:qqpr = VLD4d8Pseudo %0, 8, pred:14, pred:%noreg; mem:LD32[%A](align=8) QQPR:%1 GPR:%0
# CHECK: Latency : 8
# CHECK: Single Issue : true;
-# CHECK: SU(2): %4<def> = VADDv8i8 %1:dsub_0, %1:dsub_1, pred:14, pred:%noreg; DPR:%4 QQPR:%1
+# CHECK: SU(2): %4:dpr = VADDv8i8 %1.dsub_0, %1.dsub_1, pred:14, pred:%noreg; DPR:%4 QQPR:%1
# CHECK: Latency : 5
# CHECK: Single Issue : false;
-# CHECK: SU(3): %5<def>, %6<def> = VMOVRRD %4, pred:14, pred:%noreg; GPR:%5,%6 DPR:%4
+# CHECK: SU(3): %5:gpr, %6:gpr = VMOVRRD %4, pred:14, pred:%noreg; GPR:%5,%6 DPR:%4
# CHECK: Latency : 4
# CHECK: Single Issue : false;
-# TOPDOWN: Scheduling SU(1) %1<def> = VLD4d8Pseudo
+# TOPDOWN: Scheduling SU(1) %1:qqpr = VLD4d8Pseudo
# TOPDOWN: Bump cycle to end group
-# TOPDOWN: Scheduling SU(2) %4<def> = VADDv8i8
+# TOPDOWN: Scheduling SU(2) %4:dpr = VADDv8i8
-# BOTTOMUP: Scheduling SU(2) %4<def> = VADDv8i8
-# BOTTOMUP: Scheduling SU(1) %1<def> = VLD4d8Pseudo
+# BOTTOMUP: Scheduling SU(2) %4:dpr = VADDv8i8
+# BOTTOMUP: Scheduling SU(1) %1:qqpr = VLD4d8Pseudo
# BOTTOMUP: Bump cycle to begin group
...
diff --git a/test/CodeGen/ARM/subreg-remat.ll b/test/CodeGen/ARM/subreg-remat.ll
index 616ab1ef7cd..64d79413413 100644
--- a/test/CodeGen/ARM/subreg-remat.ll
+++ b/test/CodeGen/ARM/subreg-remat.ll
@@ -4,8 +4,8 @@ target triple = "thumbv7-apple-ios"
;
; The vector %v2 is built like this:
;
-; %6:ssub_1<def> = ...
-; %6:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%6
+; %6:ssub_1 = ...
+; %6:ssub_0 = VLDRS <cp#0>, 0, pred:14, pred:%noreg; mem:LD4[ConstantPool] DPR_VFP2:%6
;
; When %6 spills, the VLDRS constant pool load cannot be rematerialized
; since it implicitly reads the ssub_1 sub-register.
@@ -31,7 +31,7 @@ define void @f1(float %x, <2 x float>* %p) {
; because the bits are undef, we should rematerialize. The vector is now built
; like this:
;
-; %2:ssub_0<def> = VLDRS <cp#0>, 0, pred:14, pred:%noreg, %2<imp-def>; mem:LD4[ConstantPool]
+; %2:ssub_0 = VLDRS <cp#0>, 0, pred:14, pred:%noreg, implicit-def %2; mem:LD4[ConstantPool]
;
; The extra <imp-def> operand indicates that the instruction fully defines the
; virtual register. It doesn't read the old value.
diff --git a/test/CodeGen/ARM/vldm-liveness.mir b/test/CodeGen/ARM/vldm-liveness.mir
index 2056be4f008..c06342c687d 100644
--- a/test/CodeGen/ARM/vldm-liveness.mir
+++ b/test/CodeGen/ARM/vldm-liveness.mir
@@ -1,9 +1,9 @@
# RUN: llc -run-pass arm-ldst-opt -verify-machineinstrs %s -o - | FileCheck %s
# ARM load store optimizer was dealing with a sequence like:
-# s1 = VLDRS [r0, 1], Q0<imp-def>
-# s3 = VLDRS [r0, 2], Q0<imp-use,kill>, Q0<imp-def>
-# s0 = VLDRS [r0, 0], Q0<imp-use,kill>, Q0<imp-def>
-# s2 = VLDRS [r0, 4], Q0<imp-use,kill>, Q0<imp-def>
+# s1 = VLDRS [r0, 1], implicit-def Q0
+# s3 = VLDRS [r0, 2], implicit killed Q0, implicit-def Q0
+# s0 = VLDRS [r0, 0], implicit killed Q0, implicit-def Q0
+# s2 = VLDRS [r0, 4], implicit killed Q0, implicit-def Q0
#
# It decided to combine the {s0, s1} loads into a single instruction in the
# third position. However, this leaves the instruction defining s3 with a stray
diff --git a/test/CodeGen/AVR/select-must-add-unconditional-jump.ll b/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
index 5c247f6e8e6..a0116d6f8a6 100644
--- a/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
+++ b/test/CodeGen/AVR/select-must-add-unconditional-jump.ll
@@ -11,11 +11,11 @@
;
; %bb.2: derived from LLVM BB %finish
; Predecessors according to CFG: %bb.0 %bb.1
-; %0<def> = PHI %3, <%bb.0>, %5, <%bb.1>
-; %7<def> = LDIRdK 2
-; %8<def> = LDIRdK 1
-; CPRdRr %2, %0, %SREG<imp-def>
-; BREQk <%bb.6>, %SREG<imp-use>
+; %0 = PHI %3, <%bb.0>, %5, <%bb.1>
+; %7 = LDIRdK 2
+; %8 = LDIRdK 1
+; CPRdRr %2, %0, implicit-def %SREG
+; BREQk <%bb.6>, implicit %SREG
; Successors according to CFG: %bb.5(?%) %bb.6(?%)
;
; The code assumes it the fallthrough block after this is %bb.5, but
diff --git a/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir
index c685a0c2740..8dbc7d904c1 100644
--- a/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir
+++ b/test/CodeGen/Hexagon/branch-folder-hoist-kills.mir
@@ -6,19 +6,19 @@
# that were no longer live-in.
# This could cause if-converter to generate incorrect code.
#
-# In this testcase, the "r1 = A2_sxth r0<kill>" was hoisted, and since r0
+# In this testcase, the "r1 = A2_sxth killed r0" was hoisted, and since r0
# was killed, it was no longer live-in in either successor. The if-converter
# then created code, where the first predicated instruction has incorrect
# implicit use of r0:
#
# %bb.0:
# Live Ins: %R0
-# %R1<def> = A2_sxth %R0<kill> ; hoisted, kills r0
-# A2_nop %P0<imp-def>
-# %R0<def> = C2_cmoveit %P0, 2, %R0<imp-use> ; predicated A2_tfrsi
-# %R0<def> = C2_cmoveif killed %P0, 1, %R0<imp-use> ; predicated A2_tfrsi
-# %R0<def> = A2_add %R0<kill>, %R1<kill>
-# J2_jumpr %R31, %PC<imp-def,dead>
+# %R1 = A2_sxth killed %R0 ; hoisted, kills r0
+# A2_nop implicit-def %P0
+# %R0 = C2_cmoveit %P0, 2, implicit %R0 ; predicated A2_tfrsi
+# %R0 = C2_cmoveif killed %P0, 1, implicit %R0 ; predicated A2_tfrsi
+# %R0 = A2_add killed %R0, killed %R1
+# J2_jumpr %R31, implicit dead %PC
#
# CHECK: %r1 = A2_sxth killed %r0
diff --git a/test/CodeGen/Hexagon/post-inc-aa-metadata.ll b/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
index 688a71352cd..9357aa7d5a8 100644
--- a/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
+++ b/test/CodeGen/Hexagon/post-inc-aa-metadata.ll
@@ -3,7 +3,7 @@
; Check that the generated post-increment load has TBAA information.
; CHECK-LABEL: Machine code for function fred:
-; CHECK: = V6_vL32b_pi %{{[0-9]+}}<tied1>, 64; mem:LD64[{{.*}}](tbaa=
+; CHECK: = V6_vL32b_pi %{{[0-9]+}}, 64; mem:LD64[{{.*}}](tbaa=
target triple = "hexagon"
diff --git a/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll b/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
index 6279a2ea6a7..2ff11e65045 100644
--- a/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
+++ b/test/CodeGen/Lanai/lanai-misched-trivial-disjoint.ll
@@ -36,7 +36,7 @@ entry:
; CHECK-LABEL: SU({{.*}}): SW_RI{{.*}}, 4,
; CHECK: # preds left : 2
; CHECK: # succs left : 0
-; CHECK-LABEL: SU({{.*}}): %{{.*}}<def> = LDW_RI{{.*}}, 12,
+; CHECK-LABEL: SU({{.*}}): %{{.*}} = LDW_RI{{.*}}, 12,
; CHECK: # preds left : 1
; CHECK: # succs left : 4
; CHECK-LABEL: SU({{.*}}): STH_RI{{.*}}, 10,
diff --git a/test/CodeGen/PowerPC/byval-agg-info.ll b/test/CodeGen/PowerPC/byval-agg-info.ll
index 04869665797..141edb57967 100644
--- a/test/CodeGen/PowerPC/byval-agg-info.ll
+++ b/test/CodeGen/PowerPC/byval-agg-info.ll
@@ -13,5 +13,5 @@ entry:
; Make sure that the MMO on the store has no offset from the byval
; variable itself (we used to have mem:ST8[%v+64]).
-; CHECK: STD %x5<kill>, 176, %x1; mem:ST8[%v](align=16)
+; CHECK: STD killed %x5, 176, %x1; mem:ST8[%v](align=16)
diff --git a/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll b/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
index 4ca75a7e365..c9c9f6a8958 100644
--- a/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
+++ b/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
@@ -7,11 +7,11 @@ define signext i32 @fn1(i32 %baz) {
%2 = zext i32 %1 to i64
%3 = shl i64 %2, 48
%4 = ashr exact i64 %3, 48
-; CHECK: ANDIo8 {{[^,]+}}, 65520, %cr0<imp-def,dead>;
+; CHECK: ANDIo8 killed {{[^,]+}}, 65520, implicit-def dead %cr0;
; CHECK: CMPLDI
; CHECK: BCC
-; CHECK: ANDIo8 {{[^,]+}}, 65520, %cr0<imp-def>;
+; CHECK: ANDIo8 {{[^,]+}}, 65520, implicit-def %cr0;
; CHECK: COPY %cr0
; CHECK: BCC
%5 = icmp eq i64 %4, 0
@@ -26,9 +26,9 @@ bar:
; CHECK-LABEL: fn2
define signext i32 @fn2(i64 %a, i64 %b) {
-; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, %cr0<imp-def>;
-; CHECK: [[CREG:[^, ]+]]<def> = COPY %cr0
-; CHECK: BCC 12, [[CREG]]<kill>
+; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, implicit-def %cr0;
+; CHECK: [[CREG:[^, ]+]]:crrc = COPY killed %cr0
+; CHECK: BCC 12, killed [[CREG]]
%1 = or i64 %b, %a
%2 = icmp sgt i64 %1, -1
br i1 %2, label %foo, label %bar
@@ -42,9 +42,9 @@ bar:
; CHECK-LABEL: fn3
define signext i32 @fn3(i32 %a) {
-; CHECK: ANDIo {{[^, ]+}}, 10, %cr0<imp-def>;
-; CHECK: [[CREG:[^, ]+]]<def> = COPY %cr0
-; CHECK: BCC 76, [[CREG]]<kill>
+; CHECK: ANDIo killed {{[%0-9]+}}, 10, implicit-def %cr0;
+; CHECK: [[CREG:[^, ]+]]:crrc = COPY %cr0
+; CHECK: BCC 76, killed [[CREG]]
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
br i1 %2, label %foo, label %bar
diff --git a/test/CodeGen/PowerPC/quadint-return.ll b/test/CodeGen/PowerPC/quadint-return.ll
index e9681071bf2..b8d982cd366 100644
--- a/test/CodeGen/PowerPC/quadint-return.ll
+++ b/test/CodeGen/PowerPC/quadint-return.ll
@@ -14,6 +14,6 @@ entry:
; CHECK: ********** Function: foo
; CHECK: ********** FAST REGISTER ALLOCATION **********
-; CHECK: %x3<def> = COPY %{{[0-9]+}}
-; CHECK-NEXT: %x4<def> = COPY %{{[0-9]+}}
+; CHECK: %x3 = COPY %{{[0-9]+}}
+; CHECK-NEXT: %x4 = COPY %{{[0-9]+}}
; CHECK-NEXT: BLR
diff --git a/test/CodeGen/SystemZ/pr32505.ll b/test/CodeGen/SystemZ/pr32505.ll
index 288d0b83863..c164592b509 100644
--- a/test/CodeGen/SystemZ/pr32505.ll
+++ b/test/CodeGen/SystemZ/pr32505.ll
@@ -10,8 +10,8 @@ define <2 x float> @pr32505(<2 x i8> * %a) {
; CHECK-NEXT: lbh %r1, 0(%r2)
; CHECK-NEXT: ldgr %f0, %r1
; CHECK-NEXT: ldgr %f2, %r0
-; CHECK-NEXT: # kill: %f0s<def> %f0s<kill> %f0d<kill>
-; CHECK-NEXT: # kill: %f2s<def> %f2s<kill> %f2d<kill>
+; CHECK-NEXT: # kill: def %f0s killed %f0s killed %f0d
+; CHECK-NEXT: # kill: def %f2s killed %f2s killed %f2d
; CHECK-NEXT: br %r14
%L17 = load <2 x i8>, <2 x i8>* %a
%Se21 = sext <2 x i8> %L17 to <2 x i32>
diff --git a/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir b/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
index 2af8b3cce6f..8bf2b5575e8 100644
--- a/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
+++ b/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
@@ -21,7 +21,7 @@ registers:
# CHECK-NEXT: %r0l = COPY %r1l
# Although R0L partially redefines R0Q, it must not mark R0Q as kill
# because R1D is still live through that instruction.
-# CHECK-NOT: %r0q<imp-use,kill>
+# CHECK-NOT: implicit killed %r0q
# CHECK-NEXT: %r2d = COPY %r1d
# CHECK-NEXT: LARL
body: |
diff --git a/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll b/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
index 24a995a1153..f68908728f7 100644
--- a/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
+++ b/test/CodeGen/Thumb2/2010-06-14-NEONCoalescer.ll
@@ -5,8 +5,8 @@ target triple = "thumbv7-apple-darwin10"
; This is a case where the coalescer was too eager. These two copies were
; considered equivalent and coalescable:
;
-; 140 %reg1038:dsub_0<def> = VMOVD %reg1047:dsub_0, pred:14, pred:%reg0
-; 148 %reg1038:dsub_1<def> = VMOVD %reg1047:dsub_0, pred:14, pred:%reg0
+; 140 %reg1038:dsub_0 = VMOVD %reg1047:dsub_0, pred:14, pred:%reg0
+; 148 %reg1038:dsub_1 = VMOVD %reg1047:dsub_0, pred:14, pred:%reg0
;
; Only one can be coalesced.
diff --git a/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll b/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
index 193f8cfcd52..11ac376a893 100644
--- a/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
+++ b/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
@@ -4,7 +4,7 @@
; Tricky coalescer bug:
; After coalescing %RAX with a virtual register, this instruction was rematted:
;
-; %EAX<def> = MOV32rr %reg1070<kill>
+; %EAX = MOV32rr killed %reg1070
;
; This instruction silently defined %RAX, and when rematting removed the
; instruction, the live interval for %RAX was not properly updated. The valno
@@ -12,7 +12,7 @@
;
; The fix is to implicitly define %RAX when coalescing:
;
-; %EAX<def> = MOV32rr %reg1070<kill>, %RAX<imp-def>
+; %EAX = MOV32rr killed %reg1070, implicit-def %RAX
;
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll b/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
index 6fe31b6d167..19182ab2cb5 100644
--- a/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
+++ b/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
@@ -3,7 +3,7 @@
;
; This test produces a move instruction with an implicitly defined super-register:
;
-; %DL<def> = MOV8rr %reg1038<kill>, %RDX<imp-def>
+; %DL = MOV8rr killed %reg1038, implicit-def %RDX
;
; When %DL is rematerialized, we must remember to update live intervals for
; sub-registers %DX and %EDX.
diff --git a/test/CodeGen/X86/2010-04-08-CoalescerBug.ll b/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
index 3dfc8cdbbb8..c3dfbfc15ec 100644
--- a/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
+++ b/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
@@ -2,8 +2,8 @@
; rdar://7842028
; Do not delete partially dead copy instructions.
-; %rdi<def,dead> = MOV64rr %rax<kill>, %edi<imp-def>
-; REP_MOVSD %ecx<imp-def,dead>, %edi<imp-def,dead>, %esi<imp-def,dead>, %ecx<imp-use,kill>, %edi<imp-use,kill>, %esi<imp-use,kill>
+; dead %rdi = MOV64rr killed %rax, implicit-def %edi
+; REP_MOVSD implicit dead %ecx, implicit dead %edi, implicit dead %esi, implicit killed %ecx, implicit killed %edi, implicit killed %esi
%struct.F = type { %struct.FC*, i32, i32, i8, i32, i32, i32 }
diff --git a/test/CodeGen/X86/2010-05-12-FastAllocKills.ll b/test/CodeGen/X86/2010-05-12-FastAllocKills.ll
index c564b72e397..39031806a9a 100644
--- a/test/CodeGen/X86/2010-05-12-FastAllocKills.ll
+++ b/test/CodeGen/X86/2010-05-12-FastAllocKills.ll
@@ -5,24 +5,24 @@ target triple = "x86_64-apple-darwin"
; This test causes a virtual FP register to be redefined while it is live:
;%bb.5: derived from LLVM BB %bb10
; Predecessors according to CFG: %bb.4 %bb.5
-; %reg1024<def> = MOV_Fp8080 %reg1034
-; %reg1025<def> = MUL_Fp80m32 %reg1024, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
-; %reg1034<def> = MOV_Fp8080 %reg1025
-; FP_REG_KILL %fp0<imp-def>, %fp1<imp-def>, %fp2<imp-def>, %fp3<imp-def>, %fp4<imp-def>, %fp5<imp-def>, %fp6<imp-def>
+; %reg1024 = MOV_Fp8080 %reg1034
+; %reg1025 = MUL_Fp80m32 %reg1024, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
+; %reg1034 = MOV_Fp8080 %reg1025
+; FP_REG_KILL implicit-def %fp0, implicit-def %fp1, implicit-def %fp2, implicit-def %fp3, implicit-def %fp4, implicit-def %fp5, implicit-def %fp6
; JMP_4 <%bb.5>
; Successors according to CFG: %bb.5
;
; The X86FP pass needs good kill flags, like on %fp0 representing %reg1034:
;%bb.5: derived from LLVM BB %bb10
; Predecessors according to CFG: %bb.4 %bb.5
-; %fp0<def> = LD_Fp80m <fi#3>, 1, %reg0, 0, %reg0; mem:LD10[FixedStack3](align=4)
-; %fp1<def> = MOV_Fp8080 %fp0<kill>
-; %fp2<def> = MUL_Fp80m32 %fp1, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
-; %fp0<def> = MOV_Fp8080 %fp2
-; ST_FpP80m <fi#3>, 1, %reg0, 0, %reg0, %fp0<kill>; mem:ST10[FixedStack3](align=4)
-; ST_FpP80m <fi#4>, 1, %reg0, 0, %reg0, %fp1<kill>; mem:ST10[FixedStack4](align=4)
-; ST_FpP80m <fi#5>, 1, %reg0, 0, %reg0, %fp2<kill>; mem:ST10[FixedStack5](align=4)
-; FP_REG_KILL %fp0<imp-def>, %fp1<imp-def>, %fp2<imp-def>, %fp3<imp-def>, %fp4<imp-def>, %fp5<imp-def>, %fp6<imp-def>
+; %fp0 = LD_Fp80m <fi#3>, 1, %reg0, 0, %reg0; mem:LD10[FixedStack3](align=4)
+; %fp1 = MOV_Fp8080 killed %fp0
+; %fp2 = MUL_Fp80m32 %fp1, %rip, 1, %reg0, <cp#0>, %reg0; mem:LD4[ConstantPool]
+; %fp0 = MOV_Fp8080 %fp2
+; ST_FpP80m <fi#3>, 1, %reg0, 0, %reg0, killed %fp0; mem:ST10[FixedStack3](align=4)
+; ST_FpP80m <fi#4>, 1, %reg0, 0, %reg0, killed %fp1; mem:ST10[FixedStack4](align=4)
+; ST_FpP80m <fi#5>, 1, %reg0, 0, %reg0, killed %fp2; mem:ST10[FixedStack5](align=4)
+; FP_REG_KILL implicit-def %fp0, implicit-def %fp1, implicit-def %fp2, implicit-def %fp3, implicit-def %fp4, implicit-def %fp5, implicit-def %fp6
; JMP_4 <%bb.5>
; Successors according to CFG: %bb.5
diff --git a/test/CodeGen/X86/GlobalISel/add-scalar.ll b/test/CodeGen/X86/GlobalISel/add-scalar.ll
index 3ed6103d07b..0ef7c956d49 100644
--- a/test/CodeGen/X86/GlobalISel/add-scalar.ll
+++ b/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -28,8 +28,8 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
; X64-LABEL: test_add_i32:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
; X64-NEXT: leal (%rsi,%rdi), %eax
; X64-NEXT: retq
;
@@ -45,10 +45,10 @@ define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
; X64-LABEL: test_add_i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
; X64-NEXT: leal (%rsi,%rdi), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
;
; X32-LABEL: test_add_i16:
diff --git a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
index ab9a2253a4e..6b93a2b9de2 100644
--- a/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
+++ b/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
@@ -6,7 +6,7 @@
define i64 @test_zext_i1(i8 %a) {
; X64-LABEL: test_zext_i1:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: andq $1, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/ext.ll b/test/CodeGen/X86/GlobalISel/ext.ll
index 3fb4979d2cf..51cee2b51d3 100644
--- a/test/CodeGen/X86/GlobalISel/ext.ll
+++ b/test/CodeGen/X86/GlobalISel/ext.ll
@@ -13,7 +13,7 @@ define i8 @test_zext_i1toi8(i32 %a) {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
%val = trunc i32 %a to i1
%r = zext i1 %val to i8
@@ -31,7 +31,7 @@ define i16 @test_zext_i1toi16(i32 %a) {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andw $1, %ax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
%val = trunc i32 %a to i1
%r = zext i1 %val to i16
diff --git a/test/CodeGen/X86/GlobalISel/gep.ll b/test/CodeGen/X86/GlobalISel/gep.ll
index fd30f8b782e..97a986e27d2 100644
--- a/test/CodeGen/X86/GlobalISel/gep.ll
+++ b/test/CodeGen/X86/GlobalISel/gep.ll
@@ -13,7 +13,7 @@ define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
;
; X64-LABEL: test_gep_i8:
; X64: # %bb.0:
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
; X64-NEXT: movsbq %sil, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
@@ -47,7 +47,7 @@ define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
;
; X64-LABEL: test_gep_i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
; X64-NEXT: movswq %si, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll b/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
index 08a4636a7b1..f4d359a2065 100644
--- a/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
+++ b/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll
@@ -8,7 +8,7 @@
; the fallback path.
; Check that we fallback on invoke translation failures.
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1, %0; mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
+; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %1(s80), %0(p0); mem:ST10[%ptr](align=16) (in function: test_x86_fp80_dump)
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump
; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump:
define void @test_x86_fp80_dump(x86_fp80* %ptr){
diff --git a/test/CodeGen/X86/add-sub-nsw-nuw.ll b/test/CodeGen/X86/add-sub-nsw-nuw.ll
index 39dfe7b94b3..703860de944 100644
--- a/test/CodeGen/X86/add-sub-nsw-nuw.ll
+++ b/test/CodeGen/X86/add-sub-nsw-nuw.ll
@@ -10,7 +10,7 @@ define i8 @PR30841(i64 %argc) {
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: negl %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retl
entry:
%or = or i64 %argc, -4294967296
diff --git a/test/CodeGen/X86/add.ll b/test/CodeGen/X86/add.ll
index 5a8e6c37505..3511bae6a61 100644
--- a/test/CodeGen/X86/add.ll
+++ b/test/CodeGen/X86/add.ll
@@ -176,14 +176,14 @@ define i64 @test6(i64 %A, i32 %B) nounwind {
;
; X64-LINUX-LABEL: test6:
; X64-LINUX: # %bb.0: # %entry
-; X64-LINUX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-LINUX-NEXT: # kill: def %esi killed %esi def %rsi
; X64-LINUX-NEXT: shlq $32, %rsi
; X64-LINUX-NEXT: leaq (%rsi,%rdi), %rax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test6:
; X64-WIN32: # %bb.0: # %entry
-; X64-WIN32-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; X64-WIN32-NEXT: # kill: def %edx killed %edx def %rdx
; X64-WIN32-NEXT: shlq $32, %rdx
; X64-WIN32-NEXT: leaq (%rdx,%rcx), %rax
; X64-WIN32-NEXT: retq
diff --git a/test/CodeGen/X86/addcarry.ll b/test/CodeGen/X86/addcarry.ll
index 8ca9fb3120b..2d76c62bbd0 100644
--- a/test/CodeGen/X86/addcarry.ll
+++ b/test/CodeGen/X86/addcarry.ll
@@ -84,7 +84,7 @@ entry:
define i8 @e(i32* nocapture %a, i32 %b) nounwind {
; CHECK-LABEL: e:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
; CHECK-NEXT: movl (%rdi), %ecx
; CHECK-NEXT: leal (%rsi,%rcx), %edx
; CHECK-NEXT: addl %esi, %edx
diff --git a/test/CodeGen/X86/anyext.ll b/test/CodeGen/X86/anyext.ll
index 4baea69af9c..f0b514343b5 100644
--- a/test/CodeGen/X86/anyext.ll
+++ b/test/CodeGen/X86/anyext.ll
@@ -8,7 +8,7 @@ define i32 @foo(i32 %p, i8 zeroext %x) nounwind {
; X32-LABEL: foo:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X32-NEXT: # kill: def %eax killed %eax def %ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: andl $1, %eax
@@ -17,7 +17,7 @@ define i32 @foo(i32 %p, i8 zeroext %x) nounwind {
; X64-LABEL: foo:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X64-NEXT: # kill: def %eax killed %eax def %ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %al, %eax
; X64-NEXT: andl $1, %eax
@@ -35,7 +35,7 @@ define i32 @bar(i32 %p, i16 zeroext %x) nounwind {
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: divw {{[0-9]+}}(%esp)
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
+; X32-NEXT: # kill: def %ax killed %ax def %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: retl
;
@@ -44,7 +44,7 @@ define i32 @bar(i32 %p, i16 zeroext %x) nounwind {
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: movl %edi, %eax
; X64-NEXT: divw %si
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
+; X64-NEXT: # kill: def %ax killed %ax def %eax
; X64-NEXT: andl $1, %eax
; X64-NEXT: retq
%q = trunc i32 %p to i16
diff --git a/test/CodeGen/X86/atomic-eflags-reuse.ll b/test/CodeGen/X86/atomic-eflags-reuse.ll
index df4b00ddbe8..fc2b5671e82 100644
--- a/test/CodeGen/X86/atomic-eflags-reuse.ll
+++ b/test/CodeGen/X86/atomic-eflags-reuse.ll
@@ -93,7 +93,7 @@ define i8 @test_add_1_setcc_slt(i64* %p) #0 {
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: shrq $63, %rax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %rax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %rax
; CHECK-NEXT: retq
entry:
%tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
diff --git a/test/CodeGen/X86/avx-cast.ll b/test/CodeGen/X86/avx-cast.ll
index 2f332592506..09bbb564ea8 100644
--- a/test/CodeGen/X86/avx-cast.ll
+++ b/test/CodeGen/X86/avx-cast.ll
@@ -9,7 +9,7 @@
define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castA:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: retq
@@ -20,7 +20,7 @@ define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castB:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX-NEXT: retq
@@ -33,14 +33,14 @@ define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
; AVX1-LABEL: castC:
; AVX1: ## %bb.0:
-; AVX1-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX1-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: castC:
; AVX2: ## %bb.0:
-; AVX2-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX2-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: retq
@@ -54,7 +54,7 @@ define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castD:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <8 x float> %m, <8 x float> %m, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -64,7 +64,7 @@ define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castE:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <4 x i64> %m, <4 x i64> %m, <2 x i32> <i32 0, i32 1>
@@ -74,7 +74,7 @@ define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castF:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <4 x double> %m, <4 x double> %m, <2 x i32> <i32 0, i32 1>
diff --git a/test/CodeGen/X86/avx-cmp.ll b/test/CodeGen/X86/avx-cmp.ll
index 637101a2c77..968d8e360ec 100644
--- a/test/CodeGen/X86/avx-cmp.ll
+++ b/test/CodeGen/X86/avx-cmp.ll
@@ -197,7 +197,7 @@ define i32 @scalarcmpA() uwtable ssp {
; CHECK-NEXT: vcmpeqsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovq %xmm0, %rax
; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
; CHECK-NEXT: retq
%cmp29 = fcmp oeq double undef, 0.000000e+00
%res = zext i1 %cmp29 to i32
diff --git a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
index 9658c8e5ae3..297922809ea 100644
--- a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
@@ -316,12 +316,12 @@ define <4 x i64> @test_mm256_castpd_si256(<4 x double> %a0) nounwind {
define <4 x double> @test_mm256_castpd128_pd256(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd128_pd256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd128_pd256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x double> %res
@@ -330,13 +330,13 @@ define <4 x double> @test_mm256_castpd128_pd256(<2 x double> %a0) nounwind {
define <2 x double> @test_mm256_castpd256_pd128(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd256_pd128:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd256_pd128:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a0, <2 x i32> <i32 0, i32 1>
@@ -370,12 +370,12 @@ define <4 x i64> @test_mm256_castps_si256(<8 x float> %a0) nounwind {
define <8 x float> @test_mm256_castps128_ps256(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps128_ps256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps128_ps256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x float> %res
@@ -384,13 +384,13 @@ define <8 x float> @test_mm256_castps128_ps256(<4 x float> %a0) nounwind {
define <4 x float> @test_mm256_castps256_ps128(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps256_ps128:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps256_ps128:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a0, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -400,12 +400,12 @@ define <4 x float> @test_mm256_castps256_ps128(<8 x float> %a0) nounwind {
define <4 x i64> @test_mm256_castsi128_si256(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi128_si256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi128_si256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x i64> %res
@@ -438,13 +438,13 @@ define <8 x float> @test_mm256_castsi256_ps(<4 x i64> %a0) nounwind {
define <2 x i64> @test_mm256_castsi256_si128(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi256_si128:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi256_si128:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <4 x i64> %a0, <4 x i64> %a0, <2 x i32> <i32 0, i32 1>
@@ -1043,13 +1043,13 @@ define <4 x i64> @test_mm256_insert_epi64(<4 x i64> %a0, i64 %a1) nounwind {
define <4 x double> @test_mm256_insertf128_pd(<4 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_pd:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_pd:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X64-NEXT: retq
%ext = shufflevector <2 x double> %a1, <2 x double> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -1075,13 +1075,13 @@ define <8 x float> @test_mm256_insertf128_ps(<8 x float> %a0, <4 x float> %a1) n
define <4 x i64> @test_mm256_insertf128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_si256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_si256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; X64-NEXT: retq
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -2188,13 +2188,13 @@ define <4 x i64> @test_mm256_set_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) noun
define <8 x float> @test_mm256_set_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a1, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2204,13 +2204,13 @@ define <8 x float> @test_mm256_set_m128(<4 x float> %a0, <4 x float> %a1) nounwi
define <4 x double> @test_mm256_set_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128d:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128d:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x float>
@@ -2223,13 +2223,13 @@ define <4 x double> @test_mm256_set_m128d(<2 x double> %a0, <2 x double> %a1) no
define <4 x i64> @test_mm256_set_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128i:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128i:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x float>
@@ -2825,13 +2825,13 @@ define <4 x i64> @test_mm256_setr_epi64x(i64 %a0, i64 %a1, i64 %a2, i64 %a3) nou
define <8 x float> @test_mm256_setr_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2841,13 +2841,13 @@ define <8 x float> @test_mm256_setr_m128(<4 x float> %a0, <4 x float> %a1) nounw
define <4 x double> @test_mm256_setr_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128d:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128d:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x float>
@@ -2860,13 +2860,13 @@ define <4 x double> @test_mm256_setr_m128d(<2 x double> %a0, <2 x double> %a1) n
define <4 x i64> @test_mm256_setr_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128i:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128i:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x float>
diff --git a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
index e9b568316f6..70358cdaf9e 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
@@ -39,7 +39,7 @@ define <8 x i32> @test_x86_avx_vinsertf128_si_256_1(<8 x i32> %a0, <4 x i32> %a1
define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2)
@@ -88,7 +88,7 @@ declare <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32>, i8) nounwind read
define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 2)
diff --git a/test/CodeGen/X86/avx-vinsertf128.ll b/test/CodeGen/X86/avx-vinsertf128.ll
index 6ae43d93e64..13b47c3d650 100644
--- a/test/CodeGen/X86/avx-vinsertf128.ll
+++ b/test/CodeGen/X86/avx-vinsertf128.ll
@@ -75,7 +75,7 @@ define <8 x i32> @DAGCombineB(<8 x i32> %v1, <8 x i32> %v2) nounwind readonly {
define <4 x double> @insert_undef_pd(<4 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: insert_undef_pd:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> undef, <2 x double> %a1, i8 0)
@@ -86,7 +86,7 @@ declare <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double>, <2 x double>
define <8 x float> @insert_undef_ps(<8 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: insert_undef_ps:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %a1, i8 0)
@@ -97,7 +97,7 @@ declare <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float>, <4 x float>, i
define <8 x i32> @insert_undef_si(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: insert_undef_si:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> undef, <4 x i32> %a1, i8 0)
diff --git a/test/CodeGen/X86/avx-vzeroupper.ll b/test/CodeGen/X86/avx-vzeroupper.ll
index 4b077221c14..e69a2905b0b 100644
--- a/test/CodeGen/X86/avx-vzeroupper.ll
+++ b/test/CodeGen/X86/avx-vzeroupper.ll
@@ -82,14 +82,14 @@ define <4 x float> @test02(<8 x float> %a, <8 x float> %b) nounwind {
; VZ-LABEL: test02:
; VZ: # %bb.0:
; VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; VZ-NEXT: vzeroupper
; VZ-NEXT: jmp do_sse # TAILCALL
;
; NO-VZ-LABEL: test02:
; NO-VZ: # %bb.0:
; NO-VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; NO-VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; NO-VZ-NEXT: jmp do_sse # TAILCALL
%add.i = fadd <8 x float> %a, %b
%add.low = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %add.i, i8 0)
@@ -222,10 +222,10 @@ define <4 x float> @test04(<4 x float> %a, <4 x float> %b) nounwind {
; VZ-LABEL: test04:
; VZ: # %bb.0:
; VZ-NEXT: pushq %rax
-; VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; VZ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; VZ-NEXT: callq do_avx
-; VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; VZ-NEXT: popq %rax
; VZ-NEXT: vzeroupper
; VZ-NEXT: retq
@@ -233,10 +233,10 @@ define <4 x float> @test04(<4 x float> %a, <4 x float> %b) nounwind {
; NO-VZ-LABEL: test04:
; NO-VZ: # %bb.0:
; NO-VZ-NEXT: pushq %rax
-; NO-VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; NO-VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; NO-VZ-NEXT: callq do_avx
-; NO-VZ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; NO-VZ-NEXT: popq %rax
; NO-VZ-NEXT: retq
%shuf = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
diff --git a/test/CodeGen/X86/avx2-conversions.ll b/test/CodeGen/X86/avx2-conversions.ll
index cafb3e69558..1fee5ed56cb 100644
--- a/test/CodeGen/X86/avx2-conversions.ll
+++ b/test/CodeGen/X86/avx2-conversions.ll
@@ -7,7 +7,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
; X32: # %bb.0:
; X32-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -15,7 +15,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) nounwind {
; X64: # %bb.0:
; X64-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <4 x i64> %A to <4 x i32>
@@ -27,7 +27,7 @@ define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -35,7 +35,7 @@ define <8 x i16> @trunc8(<8 x i32> %A) nounwind {
; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <8 x i32> %A to <8 x i16>
diff --git a/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
index 1114b56268f..e0baf8408d0 100644
--- a/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
@@ -355,7 +355,7 @@ define <4 x double> @test_mm256_broadcastsd_pd(<4 x double> %a0) {
define <4 x i64> @test_mm256_broadcastsi128_si256(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastsi128_si256:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1447,7 +1447,7 @@ define <4 x float> @test_mm256_mask_i64gather_ps(<4 x float> %a0, float *%a1, <4
define <4 x i64> @test0_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; CHECK-LABEL: test0_mm256_inserti128_si256:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/avx2-shift.ll b/test/CodeGen/X86/avx2-shift.ll
index 7b1a3978bcb..022c9f458db 100644
--- a/test/CodeGen/X86/avx2-shift.ll
+++ b/test/CodeGen/X86/avx2-shift.ll
@@ -532,7 +532,7 @@ define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -543,7 +543,7 @@ define <8 x i16> @variable_shl16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shl <8 x i16> %lhs, %rhs
@@ -582,7 +582,7 @@ define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -593,7 +593,7 @@ define <8 x i16> @variable_lshr16(<8 x i16> %lhs, <8 x i16> %rhs) {
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = lshr <8 x i16> %lhs, %rhs
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll
index 8f0f3777fc8..289a3af3f08 100644
--- a/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -409,7 +409,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -420,7 +420,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%shl = shl <8 x i16> %r, %a
@@ -617,7 +617,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -628,7 +628,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r, <8 x i16> %a) nounwind {
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%lshr = lshr <8 x i16> %r, %a
diff --git a/test/CodeGen/X86/avx512-arith.ll b/test/CodeGen/X86/avx512-arith.ll
index 6862280fa9d..766238f3280 100644
--- a/test/CodeGen/X86/avx512-arith.ll
+++ b/test/CodeGen/X86/avx512-arith.ll
@@ -176,10 +176,10 @@ define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) {
;
; AVX512DQ-LABEL: imulq256:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq256:
@@ -229,10 +229,10 @@ define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) {
;
; AVX512DQ-LABEL: imulq128:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -717,7 +717,7 @@ define <16 x float> @test_mask_vminps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vminpd:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
+; AVX512F-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512F-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -732,7 +732,7 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
;
; AVX512BW-LABEL: test_mask_vminpd:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
+; AVX512BW-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512BW-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -740,7 +740,7 @@ define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
;
; AVX512DQ-LABEL: test_mask_vminpd:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
+; AVX512DQ-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512DQ-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -780,7 +780,7 @@ define <16 x float> @test_mask_vmaxps(<16 x float> %dst, <16 x float> %i,
define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vmaxpd:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
+; AVX512F-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
; AVX512F-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512F-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512F-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -795,7 +795,7 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
;
; AVX512BW-LABEL: test_mask_vmaxpd:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
+; AVX512BW-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
; AVX512BW-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512BW-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512BW-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
@@ -803,7 +803,7 @@ define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
;
; AVX512DQ-LABEL: test_mask_vmaxpd:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
+; AVX512DQ-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
; AVX512DQ-NEXT: vpxor %xmm4, %xmm4, %xmm4
; AVX512DQ-NEXT: vpcmpneqd %zmm4, %zmm3, %k1
; AVX512DQ-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
diff --git a/test/CodeGen/X86/avx512-build-vector.ll b/test/CodeGen/X86/avx512-build-vector.ll
index 9751a84b927..c7664b61a33 100644
--- a/test/CodeGen/X86/avx512-build-vector.ll
+++ b/test/CodeGen/X86/avx512-build-vector.ll
@@ -14,7 +14,7 @@ define <16 x i32> @test2(<16 x i32> %x) {
define <16 x float> @test3(<4 x float> %a) {
; CHECK-LABEL: test3:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,2,3,4,18,16,7,8,9,10,11,12,13,14,15]
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
diff --git a/test/CodeGen/X86/avx512-calling-conv.ll b/test/CodeGen/X86/avx512-calling-conv.ll
index 32b243d2eba..6e6d61f37d2 100644
--- a/test/CodeGen/X86/avx512-calling-conv.ll
+++ b/test/CodeGen/X86/avx512-calling-conv.ll
@@ -65,7 +65,7 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x i1>%b) {
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 {%k1}
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: test3:
@@ -88,7 +88,7 @@ define <8 x i1> @test3(<8 x i1>%a, <8 x i1>%b) {
; KNL_X32-NEXT: vptestmq %zmm1, %zmm1, %k1 {%k1}
; KNL_X32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL_X32-NEXT: retl
%c = and <8 x i1>%a, %b
ret <8 x i1> %c
@@ -126,7 +126,7 @@ define <8 x i32> @test5(<8 x i32>%a, <8 x i32>%b) {
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: callq _func8xi1
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL-NEXT: vpslld $31, %ymm0, %ymm0
@@ -154,7 +154,7 @@ define <8 x i32> @test5(<8 x i32>%a, <8 x i32>%b) {
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL_X32-NEXT: calll _func8xi1
; KNL_X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL_X32-NEXT: vpslld $31, %ymm0, %ymm0
@@ -266,7 +266,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: callq _func8xi1
; KNL-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -275,7 +275,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: popq %rax
; KNL-NEXT: retq
;
@@ -302,7 +302,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL_X32-NEXT: calll _func8xi1
; KNL_X32-NEXT: vpmovsxwq %xmm0, %zmm0
; KNL_X32-NEXT: vpsllq $63, %zmm0, %zmm0
@@ -311,7 +311,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) {
; KNL_X32-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1}
; KNL_X32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL_X32-NEXT: addl $12, %esp
; KNL_X32-NEXT: retl
%cmpRes = icmp sgt <8 x i32>%a, %b
diff --git a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
index fd4c5d0cbd9..29b9afecbe5 100644
--- a/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
+++ b/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
@@ -19,7 +19,7 @@ define zeroext i16 @cmp_kor_seq_16(<16 x float> %a, <16 x float> %b, <16 x float
; CHECK-NEXT: korw %k2, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
entry:
%0 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %x, i32 13, i16 -1, i32 4)
diff --git a/test/CodeGen/X86/avx512-cvt.ll b/test/CodeGen/X86/avx512-cvt.ll
index 54342a10b95..e88ec9d7b15 100644
--- a/test/CodeGen/X86/avx512-cvt.ll
+++ b/test/CodeGen/X86/avx512-cvt.ll
@@ -80,9 +80,9 @@ define <4 x double> @slto4f64(<4 x i64> %a) {
;
; AVX512DQ-LABEL: slto4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
%b = sitofp <4 x i64> %a to <4 x double>
ret <4 x double> %b
@@ -105,9 +105,9 @@ define <2 x double> @slto2f64(<2 x i64> %a) {
;
; AVX512DQ-LABEL: slto2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <2 x i64> %a to <2 x double>
@@ -133,9 +133,9 @@ define <2 x float> @sltof2f32(<2 x i64> %a) {
;
; AVX512DQ-LABEL: sltof2f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <2 x i64> %a to <2 x float>
@@ -170,7 +170,7 @@ define <4 x float> @slto4f32_mem(<4 x i64>* %a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovups (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%a1 = load <4 x i64>, <4 x i64>* %a, align 8
@@ -204,9 +204,9 @@ define <4 x i64> @f64to4sl(<4 x double> %a) {
;
; AVX512DQ-LABEL: f64to4sl:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
%b = fptosi <4 x double> %a to <4 x i64>
ret <4 x i64> %b
@@ -238,9 +238,9 @@ define <4 x i64> @f32to4sl(<4 x float> %a) {
;
; AVX512DQ-LABEL: f32to4sl:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
%b = fptosi <4 x float> %a to <4 x i64>
ret <4 x i64> %b
@@ -272,9 +272,9 @@ define <4 x float> @slto4f32(<4 x i64> %a) {
;
; AVX512DQ-LABEL: slto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <4 x i64> %a to <4 x float>
@@ -307,9 +307,9 @@ define <4 x float> @ulto4f32(<4 x i64> %a) {
;
; AVX512DQ-LABEL: ulto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = uitofp <4 x i64> %a to <4 x float>
@@ -484,9 +484,9 @@ define <16 x i16> @f32to16us(<16 x float> %f) {
define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
; NOVL-LABEL: f32to8ui:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
-; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: f32to8ui:
@@ -500,9 +500,9 @@ define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
; NOVL-LABEL: f32to4ui:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
-; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -528,7 +528,7 @@ define <8 x i16> @f64to8us(<8 x double> %f) {
; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
-; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -547,7 +547,7 @@ define <8 x i8> @f64to8uc(<8 x double> %f) {
; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
-; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -564,9 +564,9 @@ define <8 x i8> @f64to8uc(<8 x double> %f) {
define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
; NOVL-LABEL: f64to4ui:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NOVL-NEXT: vcvttpd2udq %zmm0, %ymm0
-; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -1287,9 +1287,9 @@ define <8 x double> @uito8f64_maskz(<8 x i32> %a, i8 %b) nounwind {
define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f64:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0
-; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: uito4f64:
@@ -1321,9 +1321,9 @@ define <8 x double> @uito8f64(<8 x i32> %a) {
define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
; NOVL-LABEL: uito8f32:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: uito8f32:
@@ -1337,9 +1337,9 @@ define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f32:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -1553,7 +1553,7 @@ define <8 x double> @sbto8f64(<8 x double> %a) {
define <8 x float> @sbto8f32(<8 x float> %a) {
; NOVLDQ-LABEL: sbto8f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1579,7 +1579,7 @@ define <8 x float> @sbto8f32(<8 x float> %a) {
;
; AVX512DQ-LABEL: sbto8f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1885,7 +1885,7 @@ define <16 x double> @ubto16f64(<16 x i32> %a) {
define <8 x float> @ubto8f32(<8 x i32> %a) {
; NOVL-LABEL: ubto8f32:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -1907,7 +1907,7 @@ define <8 x float> @ubto8f32(<8 x i32> %a) {
define <8 x double> @ubto8f64(<8 x i32> %a) {
; NOVL-LABEL: ubto8f64:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NOVL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVL-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
diff --git a/test/CodeGen/X86/avx512-ext.ll b/test/CodeGen/X86/avx512-ext.ll
index ab7eff399f0..97beff63811 100644
--- a/test/CodeGen/X86/avx512-ext.ll
+++ b/test/CodeGen/X86/avx512-ext.ll
@@ -348,7 +348,7 @@ define <8 x i32> @zext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
; KNL-NEXT: vpmovzxbd {{.*#+}} ymm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x32:
@@ -372,7 +372,7 @@ define <8 x i32> @sext_8x8mem_to_8x32(<8 x i8> *%i , <8 x i1> %mask) nounwind re
; KNL-NEXT: vpmovsxbd (%rdi), %ymm1
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x32:
@@ -705,7 +705,7 @@ define <8 x i32> @zext_8x16mem_to_8x32(<8 x i16> *%i , <8 x i1> %mask) nounwind
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16mem_to_8x32:
@@ -729,7 +729,7 @@ define <8 x i32> @sext_8x16mem_to_8x32mask(<8 x i16> *%i , <8 x i1> %mask) nounw
; KNL-NEXT: vpmovsxwd (%rdi), %ymm1
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x16mem_to_8x32mask:
@@ -763,7 +763,7 @@ define <8 x i32> @zext_8x16_to_8x32mask(<8 x i16> %a , <8 x i1> %mask) nounwind
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL-NEXT: vpxor %xmm0, %xmm0, %xmm0
; KNL-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16_to_8x32mask:
@@ -1328,7 +1328,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: # kill: def %ax killed %ax killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i8_to_16i1:
@@ -1336,7 +1336,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq
%mask_b = trunc <16 x i8>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
@@ -1349,7 +1349,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: # kill: def %ax killed %ax killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i32_to_16i1:
@@ -1357,7 +1357,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
; SKX-NEXT: vpslld $31, %zmm0, %zmm0
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%mask_b = trunc <16 x i32>%a to <16 x i1>
@@ -1396,7 +1396,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: # kill: def %al killed %al killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_8i16_to_8i1:
@@ -1404,7 +1404,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: # kill: def %al killed %al killed %eax
; SKX-NEXT: retq
%mask_b = trunc <8 x i16>%a to <8 x i1>
%mask = bitcast <8 x i1> %mask_b to i8
@@ -1442,7 +1442,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: # kill: def %ax killed %ax killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_i32_to_i1:
@@ -1455,7 +1455,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; SKX-NEXT: kmovw %edi, %k1
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq
%a_i = trunc i32 %a to i1
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
@@ -1468,7 +1468,7 @@ define <8 x i16> @sext_8i1_8i16(<8 x i32> %a1, <8 x i32> %a2) nounwind {
; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i16:
diff --git a/test/CodeGen/X86/avx512-extract-subvector.ll b/test/CodeGen/X86/avx512-extract-subvector.ll
index 6eedb5a5e9d..d0b6369556e 100644
--- a/test/CodeGen/X86/avx512-extract-subvector.ll
+++ b/test/CodeGen/X86/avx512-extract-subvector.ll
@@ -15,7 +15,7 @@ define <8 x i16> @extract_subvector128_v32i16(<32 x i16> %x) nounwind {
define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector128_v32i16_first_element:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; SKX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -35,7 +35,7 @@ define <16 x i8> @extract_subvector128_v64i8(<64 x i8> %x) nounwind {
define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector128_v64i8_first_element:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; SKX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
diff --git a/test/CodeGen/X86/avx512-hadd-hsub.ll b/test/CodeGen/X86/avx512-hadd-hsub.ll
index 255ac8a81f3..d5bd7622a18 100644
--- a/test/CodeGen/X86/avx512-hadd-hsub.ll
+++ b/test/CodeGen/X86/avx512-hadd-hsub.ll
@@ -63,7 +63,7 @@ define float @fhadd_16(<16 x float> %x225) {
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16:
@@ -72,7 +72,7 @@ define float @fhadd_16(<16 x float> %x225) {
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; SKX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -90,7 +90,7 @@ define float @fhsub_16(<16 x float> %x225) {
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; KNL-NEXT: vsubps %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhsub_16:
@@ -99,7 +99,7 @@ define float @fhsub_16(<16 x float> %x225) {
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SKX-NEXT: vsubps %zmm1, %zmm0, %zmm0
-; SKX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -181,7 +181,7 @@ define <4 x double> @fadd_noundef_low(<8 x double> %x225, <8 x double> %x227) {
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_low:
@@ -189,7 +189,7 @@ define <4 x double> @fadd_noundef_low(<8 x double> %x225, <8 x double> %x227) {
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; SKX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; SKX-NEXT: retq
%x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
%x228 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5 ,i32 13, i32 7, i32 15>
@@ -228,7 +228,7 @@ define <8 x i32> @hadd_16_3_sv(<16 x i32> %x225, <16 x i32> %x227) {
; KNL-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; KNL-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; KNL-NEXT: vpaddd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16_3_sv:
@@ -236,7 +236,7 @@ define <8 x i32> @hadd_16_3_sv(<16 x i32> %x225, <16 x i32> %x227) {
; SKX-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; SKX-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; SKX-NEXT: vpaddd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; SKX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; SKX-NEXT: retq
%x226 = shufflevector <16 x i32> %x225, <16 x i32> %x227, <16 x i32> <i32 0, i32 2, i32 16, i32 18
, i32 4, i32 6, i32 20, i32 22, i32 8, i32 10, i32 24, i32 26, i32 12, i32 14, i32 28, i32 30>
@@ -255,7 +255,7 @@ define double @fadd_noundef_eel(<8 x double> %x225, <8 x double> %x227) {
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_eel:
@@ -263,7 +263,7 @@ define double @fadd_noundef_eel(<8 x double> %x225, <8 x double> %x227) {
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
diff --git a/test/CodeGen/X86/avx512-insert-extract.ll b/test/CodeGen/X86/avx512-insert-extract.ll
index 9d12697acf1..39d93e70dd1 100644
--- a/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/test/CodeGen/X86/avx512-insert-extract.ll
@@ -85,7 +85,7 @@ define float @test7(<16 x float> %x, i32 %ind) nounwind {
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -104,7 +104,7 @@ define double @test8(<8 x double> %x, i32 %ind) nounwind {
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -123,7 +123,7 @@ define float @test9(<8 x float> %x, i32 %ind) nounwind {
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -142,7 +142,7 @@ define i32 @test10(<16 x i32> %x, i32 %ind) nounwind {
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
@@ -237,7 +237,7 @@ define i16 @test13(i32 %a, i32 %b) {
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test13:
@@ -252,7 +252,7 @@ define i16 @test13(i32 %a, i32 %b) {
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq
%cmp_res = icmp ult i32 %a, %b
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %cmp_res, i32 0
@@ -318,7 +318,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
; KNL-NEXT: vpslld $31, %zmm2, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -332,7 +332,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
; SKX-NEXT: vpermi2d %zmm0, %zmm1, %zmm2
; SKX-NEXT: vpmovd2m %zmm2, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
@@ -355,7 +355,7 @@ define i8 @test17(i1 *%addr, i8 %a) {
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %al killed %al killed %eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -369,7 +369,7 @@ define i8 @test17(i1 *%addr, i8 %a) {
; SKX-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; SKX-NEXT: vpmovq2m %zmm2, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %al killed %al killed %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
@@ -465,7 +465,7 @@ define i16 @extract_v32i16(<32 x i16> %x, i16* %dst) {
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <32 x i16> %x, i32 1
@@ -480,7 +480,7 @@ define i16 @extract_v16i16(<16 x i16> %x, i16* %dst) {
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <16 x i16> %x, i32 1
@@ -494,7 +494,7 @@ define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vpextrw $3, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%r1 = extractelement <8 x i16> %x, i32 1
%r2 = extractelement <8 x i16> %x, i32 3
@@ -508,7 +508,7 @@ define i8 @extract_v64i8(<64 x i8> %x, i8* %dst) {
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <64 x i8> %x, i32 1
@@ -523,7 +523,7 @@ define i8 @extract_v32i8(<32 x i8> %x, i8* %dst) {
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <32 x i8> %x, i32 1
@@ -537,7 +537,7 @@ define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vpextrb $3, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%r1 = extractelement <16 x i8> %x, i32 1
%r2 = extractelement <16 x i8> %x, i32 3
@@ -1013,7 +1013,7 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %al killed %al killed %eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -1029,7 +1029,7 @@ define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y)
; SKX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3]
; SKX-NEXT: vpmovd2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %al killed %al killed %eax
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <4 x i32> %x, %y
@@ -1058,7 +1058,7 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; KNL-NEXT: vpsllq $63, %zmm2, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %al killed %al killed %eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -1073,7 +1073,7 @@ define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y)
; SKX-NEXT: kshiftrw $1, %k0, %k0
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %al killed %al killed %eax
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <2 x i64> %x, %y
@@ -1268,7 +1268,7 @@ define zeroext i8 @extractelement_v64i1_alt(<64 x i8> %a, <64 x i8> %b) {
define i64 @test_extractelement_variable_v2i64(<2 x i64> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2i64:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movq -24(%rsp,%rdi,8), %rax
@@ -1287,7 +1287,7 @@ define i64 @test_extractelement_variable_v4i64(<4 x i64> %t1, i32 %index) {
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: movq (%rsp,%rdi,8), %rax
@@ -1309,7 +1309,7 @@ define i64 @test_extractelement_variable_v8i64(<8 x i64> %t1, i32 %index) {
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movq (%rsp,%rdi,8), %rax
@@ -1324,7 +1324,7 @@ define i64 @test_extractelement_variable_v8i64(<8 x i64> %t1, i32 %index) {
define double @test_extractelement_variable_v2f64(<2 x double> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2f64:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1343,7 +1343,7 @@ define double @test_extractelement_variable_v4f64(<4 x double> %t1, i32 %index)
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1365,7 +1365,7 @@ define double @test_extractelement_variable_v8f64(<8 x double> %t1, i32 %index)
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1380,7 +1380,7 @@ define double @test_extractelement_variable_v8f64(<8 x double> %t1, i32 %index)
define i32 @test_extractelement_variable_v4i32(<4 x i32> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4i32:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: movl -24(%rsp,%rdi,4), %eax
@@ -1399,7 +1399,7 @@ define i32 @test_extractelement_variable_v8i32(<8 x i32> %t1, i32 %index) {
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
@@ -1421,7 +1421,7 @@ define i32 @test_extractelement_variable_v16i32(<16 x i32> %t1, i32 %index) {
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
@@ -1436,7 +1436,7 @@ define i32 @test_extractelement_variable_v16i32(<16 x i32> %t1, i32 %index) {
define float @test_extractelement_variable_v4f32(<4 x float> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4f32:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1455,7 +1455,7 @@ define float @test_extractelement_variable_v8f32(<8 x float> %t1, i32 %index) {
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1477,7 +1477,7 @@ define float @test_extractelement_variable_v16f32(<16 x float> %t1, i32 %index)
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1492,7 +1492,7 @@ define float @test_extractelement_variable_v16f32(<16 x float> %t1, i32 %index)
define i16 @test_extractelement_variable_v8i16(<8 x i16> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v8i16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movzwl -24(%rsp,%rdi,2), %eax
@@ -1511,7 +1511,7 @@ define i16 @test_extractelement_variable_v16i16(<16 x i16> %t1, i32 %index) {
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -1533,7 +1533,7 @@ define i16 @test_extractelement_variable_v32i16(<32 x i16> %t1, i32 %index) {
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; KNL-NEXT: vmovaps %ymm0, (%rsp)
; KNL-NEXT: andl $31, %edi
@@ -1552,7 +1552,7 @@ define i16 @test_extractelement_variable_v32i16(<32 x i16> %t1, i32 %index) {
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
; SKX-NEXT: vmovaps %zmm0, (%rsp)
; SKX-NEXT: andl $31, %edi
; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -1567,7 +1567,7 @@ define i16 @test_extractelement_variable_v32i16(<32 x i16> %t1, i32 %index) {
define i8 @test_extractelement_variable_v16i8(<16 x i8> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v16i8:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
@@ -1587,7 +1587,7 @@ define i8 @test_extractelement_variable_v32i8(<32 x i8> %t1, i32 %index) {
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $31, %edi
; CHECK-NEXT: movq %rsp, %rax
@@ -1611,7 +1611,7 @@ define i8 @test_extractelement_variable_v64i8(<64 x i8> %t1, i32 %index) {
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; KNL-NEXT: vmovaps %ymm0, (%rsp)
; KNL-NEXT: andl $63, %edi
@@ -1631,7 +1631,7 @@ define i8 @test_extractelement_variable_v64i8(<64 x i8> %t1, i32 %index) {
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
; SKX-NEXT: vmovaps %zmm0, (%rsp)
; SKX-NEXT: andl $63, %edi
; SKX-NEXT: movq %rsp, %rax
@@ -1695,7 +1695,7 @@ define i8 @test_extractelement_variable_v64i8_indexi8(<64 x i8> %t1, i8 %index)
define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v2i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
; KNL-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -1708,7 +1708,7 @@ define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b,
;
; SKX-LABEL: test_extractelement_varible_v2i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1725,7 +1725,7 @@ define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b,
define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v4i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
; KNL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; KNL-NEXT: vpxor %xmm2, %xmm1, %xmm1
; KNL-NEXT: vpxor %xmm2, %xmm0, %xmm0
@@ -1738,7 +1738,7 @@ define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b,
;
; SKX-LABEL: test_extractelement_varible_v4i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1762,9 +1762,9 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b,
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
-; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
+; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa64 %zmm0, (%rsp)
@@ -1785,7 +1785,7 @@ define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b,
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
; SKX-NEXT: vpcmpnleud %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2q %k0, %zmm0
; SKX-NEXT: vmovdqa64 %zmm0, (%rsp)
@@ -1812,7 +1812,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vmovdqa32 %zmm0, (%rsp)
@@ -1833,7 +1833,7 @@ define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %zmm0
; SKX-NEXT: vmovdqa32 %zmm0, (%rsp)
@@ -1860,7 +1860,7 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b,
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-32, %rsp
; KNL-NEXT: subq $64, %rsp
-; KNL-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm2, %ymm1, %ymm1
; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -1884,7 +1884,7 @@ define zeroext i8 @test_extractelement_varible_v32i1(<32 x i8> %a, <32 x i8> %b,
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2w %k0, %zmm0
; SKX-NEXT: vmovdqa32 %zmm0, (%rsp)
diff --git a/test/CodeGen/X86/avx512-insert-extract_i1.ll b/test/CodeGen/X86/avx512-insert-extract_i1.ll
index f088626d2f1..e28e384ae99 100644
--- a/test/CodeGen/X86/avx512-insert-extract_i1.ll
+++ b/test/CodeGen/X86/avx512-insert-extract_i1.ll
@@ -13,7 +13,7 @@ define zeroext i8 @test_extractelement_varible_v64i1(<64 x i8> %a, <64 x i8> %b,
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: vmovdqa32 %zmm0, (%rsp)
diff --git a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
index 95fe89672c6..9deb87c4de1 100644
--- a/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
@@ -9,7 +9,7 @@ define i16 @unpckbw_test(i16 %a0, i16 %a1) {
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: shll $8, %esi
; CHECK-NEXT: orl %esi, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.kunpck.bw(i16 %a0, i16 %a1)
ret i16 %res
@@ -558,7 +558,7 @@ define i16 @test_pcmpeq_d(<16 x i32> %a, <16 x i32> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
@@ -570,7 +570,7 @@ define i16 @test_mask_pcmpeq_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
@@ -583,7 +583,7 @@ define i8 @test_pcmpeq_q(<8 x i64> %a, <8 x i64> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
@@ -595,7 +595,7 @@ define i8 @test_mask_pcmpeq_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
@@ -608,7 +608,7 @@ define i16 @test_pcmpgt_d(<16 x i32> %a, <16 x i32> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
@@ -620,7 +620,7 @@ define i16 @test_mask_pcmpgt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
@@ -633,7 +633,7 @@ define i8 @test_pcmpgt_q(<8 x i64> %a, <8 x i64> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
@@ -645,7 +645,7 @@ define i8 @test_mask_pcmpgt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
@@ -3109,7 +3109,7 @@ declare <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float>, <4 x fl
define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
@@ -3130,7 +3130,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32>, <4 x i32>,
define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
@@ -3574,7 +3574,7 @@ declare <16 x float> @llvm.x86.avx512.mask.broadcastf32x4.512(<4 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: kmovw %edi, %k1
@@ -3608,7 +3608,7 @@ declare <8 x double> @llvm.x86.avx512.mask.broadcastf64x4.512(<4 x double>, <8 x
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -3642,7 +3642,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.broadcasti32x4.512(<4 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: kmovw %edi, %k1
@@ -3677,7 +3677,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.broadcasti64x4.512(<4 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -3747,7 +3747,7 @@ define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1, i8 %m) {
; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 %m)
@@ -3765,7 +3765,7 @@ define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1, i16 %m) {
; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 -1)
%res1 = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 %m)
@@ -3785,7 +3785,7 @@ define i16@test_int_x86_avx512_ptestnm_d_512(<16 x i32> %x0, <16 x i32> %x1, i16
; CHECK-NEXT: kmovw %k1, %ecx
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16-1)
@@ -3804,7 +3804,7 @@ define i8@test_int_x86_avx512_ptestnm_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2
; CHECK-NEXT: kmovw %k1, %ecx
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8-1)
diff --git a/test/CodeGen/X86/avx512-intrinsics.ll b/test/CodeGen/X86/avx512-intrinsics.ll
index bf0f6e78817..35a502b2482 100644
--- a/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/test/CodeGen/X86/avx512-intrinsics.ll
@@ -40,7 +40,7 @@ define i16 @test_kand(i16 %a0, i16 %a1) {
; CHECK-NEXT: kandw %k0, %k1, %k0
; CHECK-NEXT: kandw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kand.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kand.w(i16 %t1, i16 %a1)
@@ -58,7 +58,7 @@ define i16 @test_kandn(i16 %a0, i16 %a1) {
; CHECK-NEXT: kandnw %k2, %k1, %k1
; CHECK-NEXT: kandnw %k0, %k1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kandn.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kandn.w(i16 %t1, i16 %a1)
@@ -72,7 +72,7 @@ define i16 @test_knot(i16 %a0) {
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: knotw %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.knot.w(i16 %a0)
ret i16 %res
@@ -89,7 +89,7 @@ define i16 @test_kor(i16 %a0, i16 %a1) {
; CHECK-NEXT: korw %k0, %k1, %k0
; CHECK-NEXT: korw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kor.w(i16 %t1, i16 %a1)
@@ -109,7 +109,7 @@ define i16 @test_kxnor(i16 %a0, i16 %a1) {
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kxnor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kxnor.w(i16 %t1, i16 %a1)
@@ -127,7 +127,7 @@ define i16 @test_kxor(i16 %a0, i16 %a1) {
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kxor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kxor.w(i16 %t1, i16 %a1)
@@ -793,7 +793,7 @@ declare <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8*) nounwind readonly
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i16 -1, i32 8)
ret i16 %res
@@ -805,7 +805,7 @@ declare <8 x double> @llvm.x86.avx512.vbroadcast.sd.512(i8*) nounwind readonly
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpneqpd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i8 -1, i32 4)
ret i8 %res
@@ -3294,7 +3294,7 @@ define i8@test_int_x86_avx512_mask_cmp_sd(<2 x double> %x0, <2 x double> %x1, i8
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8)
@@ -3316,7 +3316,7 @@ define i8@test_int_x86_avx512_mask_cmp_sd_all(<2 x double> %x0, <2 x double> %x1
; CHECK-NEXT: orb %cl, %dl
; CHECK-NEXT: orb %sil, %al
; CHECK-NEXT: orb %dl, %al
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 2, i8 -1, i32 4)
@@ -3338,7 +3338,7 @@ define i8@test_int_x86_avx512_mask_cmp_ss(<4 x float> %x0, <4 x float> %x1, i8 %
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 %x3, i32 4)
@@ -3361,7 +3361,7 @@ define i8@test_int_x86_avx512_mask_cmp_ss_all(<4 x float> %x0, <4 x float> %x1,
; CHECK-NEXT: andb %cl, %dl
; CHECK-NEXT: andb %sil, %al
; CHECK-NEXT: andb %dl, %al
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 2, i8 -1, i32 4)
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 -1, i32 8)
diff --git a/test/CodeGen/X86/avx512-mask-op.ll b/test/CodeGen/X86/avx512-mask-op.ll
index 6feeb74e67c..01446017671 100644
--- a/test/CodeGen/X86/avx512-mask-op.ll
+++ b/test/CodeGen/X86/avx512-mask-op.ll
@@ -11,7 +11,7 @@ define i16 @mask16(i16 %x) {
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: mask16:
@@ -19,7 +19,7 @@ define i16 @mask16(i16 %x) {
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotw %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask16:
@@ -27,7 +27,7 @@ define i16 @mask16(i16 %x) {
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask16:
@@ -35,7 +35,7 @@ define i16 @mask16(i16 %x) {
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotw %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -84,7 +84,7 @@ define i8 @mask8(i8 %x) {
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %al killed %al killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: mask8:
@@ -92,7 +92,7 @@ define i8 @mask8(i8 %x) {
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %al killed %al killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask8:
@@ -100,7 +100,7 @@ define i8 @mask8(i8 %x) {
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask8:
@@ -108,7 +108,7 @@ define i8 @mask8(i8 %x) {
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotb %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -235,7 +235,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: korw %k0, %k2, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: mand16_mem:
@@ -246,7 +246,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: korw %k0, %k2, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mand16_mem:
@@ -257,7 +257,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: korw %k0, %k2, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mand16_mem:
@@ -268,7 +268,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: korw %k0, %k2, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512DQ-NEXT: retq
%ma = load <16 x i1>, <16 x i1>* %x
%mb = load <16 x i1>, <16 x i1>* %y
@@ -285,7 +285,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kshiftrw $8, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %al killed %al killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: shuf_test1:
@@ -293,7 +293,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kshiftrw $8, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %al killed %al killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: shuf_test1:
@@ -301,7 +301,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kshiftrw $8, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuf_test1:
@@ -309,7 +309,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kshiftrw $8, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
; AVX512DQ-NEXT: retq
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -371,7 +371,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -382,7 +382,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
@@ -393,7 +393,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: andl $1, %eax
-; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -404,7 +404,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
; AVX512DQ-NEXT: andl $1, %eax
-; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -421,7 +421,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; KNL-NEXT: kshiftrw $15, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andb $1, %al
-; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %al killed %al killed %eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -432,7 +432,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; SKX-NEXT: kshiftrw $15, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andb $1, %al
-; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %al killed %al killed %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
@@ -443,7 +443,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: andb $1, %al
-; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -454,7 +454,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; AVX512DQ-NEXT: kshiftrw $15, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
; AVX512DQ-NEXT: andb $1, %al
-; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -704,7 +704,7 @@ define <16 x i8> @test8(<16 x i32>%a, <16 x i32>%b, i32 %a1, i32 %b1) {
; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
; AVX512BW-NEXT: LBB17_3:
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -774,7 +774,7 @@ define <16 x i1> @test9(<16 x i1>%a, <16 x i1>%b, i32 %a1, i32 %b1) {
; AVX512BW-NEXT: LBB18_3:
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -907,7 +907,7 @@ define <16 x i1> @test15(i32 %x, i32 %y) {
; AVX512BW-NEXT: cmovgw %ax, %cx
; AVX512BW-NEXT: kmovd %ecx, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1177,7 +1177,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-NEXT: korw %k0, %k1, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -1221,7 +1221,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512BW-NEXT: kshiftlw $7, %k0, %k0
; AVX512BW-NEXT: korw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1244,7 +1244,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512DQ-NEXT: korb %k0, %k1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -1302,7 +1302,7 @@ define <32 x i16> @test21(<32 x i16> %x , <32 x i1> %mask) nounwind readnone {
define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
; KNL-LABEL: test22:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; KNL-NEXT: vpslld $31, %ymm0, %ymm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -1319,7 +1319,7 @@ define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
;
; AVX512BW-LABEL: test22:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512BW-NEXT: vpslld $31, %ymm0, %ymm0
; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -1329,7 +1329,7 @@ define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
;
; AVX512DQ-LABEL: test22:
; AVX512DQ: ## %bb.0:
-; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512DQ-NEXT: vpslld $31, %ymm0, %ymm0
; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512DQ-NEXT: kmovb %k0, (%rdi)
@@ -1342,7 +1342,7 @@ define void @test22(<4 x i1> %a, <4 x i1>* %addr) {
define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
; KNL-LABEL: test23:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -1359,7 +1359,7 @@ define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
;
; AVX512BW-LABEL: test23:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
@@ -1369,7 +1369,7 @@ define void @test23(<2 x i1> %a, <2 x i1>* %addr) {
;
; AVX512DQ-LABEL: test23:
; AVX512DQ: ## %bb.0:
-; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512DQ-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512DQ-NEXT: kmovb %k0, (%rdi)
@@ -2538,7 +2538,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2553,7 +2553,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2561,7 +2561,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) {
; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = load <2 x i1>, <2 x i1>* %a
@@ -2575,7 +2575,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2590,7 +2590,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2598,7 +2598,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) {
; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = load <4 x i1>, <4 x i1>* %a
@@ -3624,7 +3624,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_add:
@@ -3633,7 +3633,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_add:
@@ -3642,7 +3642,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_add:
@@ -3651,7 +3651,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -3667,7 +3667,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_sub:
@@ -3676,7 +3676,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_sub:
@@ -3685,7 +3685,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_sub:
@@ -3694,7 +3694,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -3710,7 +3710,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_mul:
@@ -3719,7 +3719,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_mul:
@@ -3728,7 +3728,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_mul:
@@ -3737,7 +3737,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -3753,7 +3753,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %al killed %al killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_add:
@@ -3762,7 +3762,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %al killed %al killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_add:
@@ -3771,7 +3771,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_add:
@@ -3780,7 +3780,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -3796,7 +3796,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %al killed %al killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_sub:
@@ -3805,7 +3805,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %al killed %al killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_sub:
@@ -3814,7 +3814,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_sub:
@@ -3823,7 +3823,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -3839,7 +3839,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %al killed %al killed %eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_mul:
@@ -3848,7 +3848,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %al killed %al killed %eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_mul:
@@ -3857,7 +3857,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_mul:
@@ -3866,7 +3866,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
diff --git a/test/CodeGen/X86/avx512-memfold.ll b/test/CodeGen/X86/avx512-memfold.ll
index 80941181995..02c51316f2e 100644
--- a/test/CodeGen/X86/avx512-memfold.ll
+++ b/test/CodeGen/X86/avx512-memfold.ll
@@ -7,7 +7,7 @@ define i8 @test_int_x86_avx512_mask_cmp_ss(<4 x float> %a, float* %b, i8 %mask)
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%b.val = load float, float* %b
%bv0 = insertelement <4 x float> undef, float %b.val, i32 0
diff --git a/test/CodeGen/X86/avx512-regcall-Mask.ll b/test/CodeGen/X86/avx512-regcall-Mask.ll
index d02d6a69f69..3bd69ef77fa 100644
--- a/test/CodeGen/X86/avx512-regcall-Mask.ll
+++ b/test/CodeGen/X86/avx512-regcall-Mask.ll
@@ -310,9 +310,9 @@ define x86_regcallcc i32 @test_argv32i1(<32 x i1> %x0, <32 x i1> %x1, <32 x i1>
; X32-NEXT: vpmovm2b %k2, %zmm0
; X32-NEXT: vpmovm2b %k1, %zmm1
; X32-NEXT: vpmovm2b %k0, %zmm2
-; X32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
-; X32-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<kill>
-; X32-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<kill>
+; X32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; X32-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1
+; X32-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2
; X32-NEXT: calll _test_argv32i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
; X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm5 # 16-byte Reload
@@ -340,9 +340,9 @@ define x86_regcallcc i32 @test_argv32i1(<32 x i1> %x0, <32 x i1> %x1, <32 x i1>
; WIN64-NEXT: vpmovm2b %k2, %zmm0
; WIN64-NEXT: vpmovm2b %k1, %zmm1
; WIN64-NEXT: vpmovm2b %k0, %zmm2
-; WIN64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
-; WIN64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<kill>
-; WIN64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<kill>
+; WIN64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; WIN64-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1
+; WIN64-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2
; WIN64-NEXT: callq test_argv32i1helper
; WIN64-NEXT: nop
; WIN64-NEXT: addq $32, %rsp
@@ -384,9 +384,9 @@ define x86_regcallcc i32 @test_argv32i1(<32 x i1> %x0, <32 x i1> %x1, <32 x i1>
; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
-; LINUXOSX64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<kill>
-; LINUXOSX64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<kill>
+; LINUXOSX64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; LINUXOSX64-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1
+; LINUXOSX64-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2
; LINUXOSX64-NEXT: callq test_argv32i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm9 # 16-byte Reload
@@ -538,9 +538,9 @@ define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1>
; X32-NEXT: vpmovm2b %k2, %zmm0
; X32-NEXT: vpmovm2b %k1, %zmm1
; X32-NEXT: vpmovm2b %k0, %zmm2
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
-; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
-; X32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; X32-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
+; X32-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
; X32-NEXT: vzeroupper
; X32-NEXT: calll _test_argv16i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
@@ -568,9 +568,9 @@ define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1>
; WIN64-NEXT: vpmovm2b %k2, %zmm0
; WIN64-NEXT: vpmovm2b %k1, %zmm1
; WIN64-NEXT: vpmovm2b %k0, %zmm2
-; WIN64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
-; WIN64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
-; WIN64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
+; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; WIN64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
+; WIN64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
; WIN64-NEXT: vzeroupper
; WIN64-NEXT: callq test_argv16i1helper
; WIN64-NEXT: nop
@@ -612,9 +612,9 @@ define x86_regcallcc i16 @test_argv16i1(<16 x i1> %x0, <16 x i1> %x1, <16 x i1>
; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
-; LINUXOSX64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
-; LINUXOSX64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
+; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; LINUXOSX64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
+; LINUXOSX64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: callq test_argv16i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
@@ -705,9 +705,9 @@ define i16 @caller_retv16i1() #0 {
; X32-LABEL: caller_retv16i1:
; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv16i1
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
+; X32-NEXT: # kill: def %ax killed %ax def %eax
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; WIN64-LABEL: caller_retv16i1:
@@ -724,9 +724,9 @@ define i16 @caller_retv16i1() #0 {
; WIN64-NEXT: .seh_savexmm 6, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: callq test_retv16i1
-; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
+; WIN64-NEXT: # kill: def %ax killed %ax def %eax
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; WIN64-NEXT: # kill: def %ax killed %ax killed %eax
; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload
; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
; WIN64-NEXT: addq $40, %rsp
@@ -742,9 +742,9 @@ define i16 @caller_retv16i1() #0 {
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv16i1
-; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
+; LINUXOSX64-NEXT: # kill: def %ax killed %ax def %eax
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax
; LINUXOSX64-NEXT: popq %rcx
; LINUXOSX64-NEXT: retq
entry:
@@ -771,9 +771,9 @@ define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2)
; X32-NEXT: vpmovm2w %k2, %zmm0
; X32-NEXT: vpmovm2w %k1, %zmm1
; X32-NEXT: vpmovm2w %k0, %zmm2
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
-; X32-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
-; X32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; X32-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
+; X32-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
; X32-NEXT: vzeroupper
; X32-NEXT: calll _test_argv8i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
@@ -801,9 +801,9 @@ define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2)
; WIN64-NEXT: vpmovm2w %k2, %zmm0
; WIN64-NEXT: vpmovm2w %k1, %zmm1
; WIN64-NEXT: vpmovm2w %k0, %zmm2
-; WIN64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
-; WIN64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
-; WIN64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
+; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; WIN64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
+; WIN64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
; WIN64-NEXT: vzeroupper
; WIN64-NEXT: callq test_argv8i1helper
; WIN64-NEXT: nop
@@ -845,9 +845,9 @@ define x86_regcallcc i8 @test_argv8i1(<8 x i1> %x0, <8 x i1> %x1, <8 x i1> %x2)
; LINUXOSX64-NEXT: vpmovm2w %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2w %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
-; LINUXOSX64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<kill>
-; LINUXOSX64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<kill>
+; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; LINUXOSX64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
+; LINUXOSX64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: callq test_argv8i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
@@ -938,10 +938,10 @@ define <8 x i1> @caller_retv8i1() #0 {
; X32-LABEL: caller_retv8i1:
; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv8i1
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<def>
+; X32-NEXT: # kill: def %al killed %al def %eax
; X32-NEXT: kmovd %eax, %k0
; X32-NEXT: vpmovm2w %k0, %zmm0
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -959,10 +959,10 @@ define <8 x i1> @caller_retv8i1() #0 {
; WIN64-NEXT: .seh_savexmm 6, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: callq test_retv8i1
-; WIN64-NEXT: # kill: %al<def> %al<kill> %eax<def>
+; WIN64-NEXT: # kill: def %al killed %al def %eax
; WIN64-NEXT: kmovd %eax, %k0
; WIN64-NEXT: vpmovm2w %k0, %zmm0
-; WIN64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload
; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
; WIN64-NEXT: addq $40, %rsp
@@ -979,10 +979,10 @@ define <8 x i1> @caller_retv8i1() #0 {
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv8i1
-; LINUXOSX64-NEXT: # kill: %al<def> %al<kill> %eax<def>
+; LINUXOSX64-NEXT: # kill: def %al killed %al def %eax
; LINUXOSX64-NEXT: kmovd %eax, %k0
; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm0
-; LINUXOSX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; LINUXOSX64-NEXT: popq %rax
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: retq
diff --git a/test/CodeGen/X86/avx512-regcall-NoMask.ll b/test/CodeGen/X86/avx512-regcall-NoMask.ll
index b8d6be37d92..9096720f172 100644
--- a/test/CodeGen/X86/avx512-regcall-NoMask.ll
+++ b/test/CodeGen/X86/avx512-regcall-NoMask.ll
@@ -8,19 +8,19 @@ define x86_regcallcc i1 @test_argReti1(i1 %a) {
; X32-LABEL: test_argReti1:
; X32: # %bb.0:
; X32-NEXT: incb %al
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti1:
; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
-; WIN64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; WIN64-NEXT: # kill: def %al killed %al killed %eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti1:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
-; LINUXOSX64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; LINUXOSX64-NEXT: # kill: def %al killed %al killed %eax
; LINUXOSX64-NEXT: retq
%add = add i1 %a, 1
ret i1 %add
@@ -75,19 +75,19 @@ define x86_regcallcc i8 @test_argReti8(i8 %a) {
; X32-LABEL: test_argReti8:
; X32: # %bb.0:
; X32-NEXT: incb %al
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti8:
; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
-; WIN64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; WIN64-NEXT: # kill: def %al killed %al killed %eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti8:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
-; LINUXOSX64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; LINUXOSX64-NEXT: # kill: def %al killed %al killed %eax
; LINUXOSX64-NEXT: retq
%add = add i8 %a, 1
ret i8 %add
@@ -142,19 +142,19 @@ define x86_regcallcc i16 @test_argReti16(i16 %a) {
; X32-LABEL: test_argReti16:
; X32: # %bb.0:
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti16:
; WIN64: # %bb.0:
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; WIN64-NEXT: # kill: def %ax killed %ax killed %eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti16:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax
; LINUXOSX64-NEXT: retq
%add = add i16 %a, 1
ret i16 %add
@@ -167,9 +167,9 @@ define x86_regcallcc i16 @test_CallargReti16(i16 %a) {
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argReti16
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
+; X32-NEXT: # kill: def %ax killed %ax def %eax
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
@@ -180,9 +180,9 @@ define x86_regcallcc i16 @test_CallargReti16(i16 %a) {
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incl %eax
; WIN64-NEXT: callq test_argReti16
-; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
+; WIN64-NEXT: # kill: def %ax killed %ax def %eax
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; WIN64-NEXT: # kill: def %ax killed %ax killed %eax
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
@@ -196,9 +196,9 @@ define x86_regcallcc i16 @test_CallargReti16(i16 %a) {
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: callq test_argReti16
-; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<def>
+; LINUXOSX64-NEXT: # kill: def %ax killed %ax def %eax
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: retq
%b = add i16 %a, 1
diff --git a/test/CodeGen/X86/avx512-schedule.ll b/test/CodeGen/X86/avx512-schedule.ll
index 589bf6c86a5..2f9f631c7e1 100755
--- a/test/CodeGen/X86/avx512-schedule.ll
+++ b/test/CodeGen/X86/avx512-schedule.ll
@@ -4335,7 +4335,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:0.33]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_16i8_to_16i1:
@@ -4343,7 +4343,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8> %a) {
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <16 x i8>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
@@ -4356,7 +4356,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
; GENERIC-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -4365,7 +4365,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i32> %a) {
; SKX-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <16 x i32>%a to <16 x i1>
@@ -4405,7 +4405,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:0.33]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %al killed %al killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_8i16_to_8i1:
@@ -4413,7 +4413,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %a) {
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: # kill: def %al killed %al killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <8 x i16>%a to <8 x i1>
%mask = bitcast <8 x i1> %mask_b to i8
@@ -4450,7 +4450,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; GENERIC-NEXT: kmovw %edi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_i32_to_i1:
@@ -4463,7 +4463,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; SKX-NEXT: kmovw %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%a_i = trunc i32 %a to i1
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
@@ -6740,7 +6740,7 @@ define i16 @mask16(i16 %x) {
; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33]
; GENERIC-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask16:
@@ -6748,7 +6748,7 @@ define i16 @mask16(i16 %x) {
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -6783,7 +6783,7 @@ define i8 @mask8(i8 %x) {
; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33]
; GENERIC-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %al killed %al killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask8:
@@ -6791,7 +6791,7 @@ define i8 @mask8(i8 %x) {
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: # kill: def %al killed %al killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -6900,7 +6900,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: korw %k0, %k2, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mand16_mem:
@@ -6911,7 +6911,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <16 x i1>* %y) {
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: korw %k0, %k2, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%ma = load <16 x i1>, <16 x i1>* %x
%mb = load <16 x i1>, <16 x i1>* %y
@@ -6928,7 +6928,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33]
; GENERIC-NEXT: kshiftrw $8, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %al killed %al killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: shuf_test1:
@@ -6936,7 +6936,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kshiftrw $8, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: # kill: def %al killed %al killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -6978,7 +6978,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; GENERIC-NEXT: kshiftrw $15, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
; GENERIC-NEXT: andl $1, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -6989,7 +6989,7 @@ define i16 @zext_test2(<16 x i32> %a, <16 x i32> %b) {
; SKX-NEXT: kshiftrw $15, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
; SKX-NEXT: andl $1, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -7006,7 +7006,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; GENERIC-NEXT: kshiftrw $15, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
; GENERIC-NEXT: andb $1, %al # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %al killed %al killed %eax
; GENERIC-NEXT: vzeroupper
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -7017,7 +7017,7 @@ define i8 @zext_test3(<16 x i32> %a, <16 x i32> %b) {
; SKX-NEXT: kshiftrw $15, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
; SKX-NEXT: andb $1, %al # sched: [1:0.25]
-; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: # kill: def %al killed %al killed %eax
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -8133,7 +8133,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_add:
@@ -8142,7 +8142,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %y) {
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -8158,7 +8158,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_sub:
@@ -8167,7 +8167,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %y) {
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -8183,7 +8183,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_mul:
@@ -8192,7 +8192,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %y) {
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -8208,7 +8208,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %al killed %al killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_add:
@@ -8217,7 +8217,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: # kill: def %al killed %al killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -8233,7 +8233,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %al killed %al killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_sub:
@@ -8242,7 +8242,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: # kill: def %al killed %al killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -8258,7 +8258,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %al killed %al killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_mul:
@@ -8267,7 +8267,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SKX-NEXT: # kill: def %al killed %al killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
diff --git a/test/CodeGen/X86/avx512-select.ll b/test/CodeGen/X86/avx512-select.ll
index c05601a263d..6491863d939 100644
--- a/test/CodeGen/X86/avx512-select.ll
+++ b/test/CodeGen/X86/avx512-select.ll
@@ -155,7 +155,7 @@ define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: korw %k1, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86-NEXT: # kill: def %al killed %al killed %eax
; X86-NEXT: retl
;
; X64-LABEL: select05_mem:
@@ -166,7 +166,7 @@ define i8 @select05_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; X64-NEXT: kmovw %eax, %k1
; X64-NEXT: korw %k1, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
@@ -205,7 +205,7 @@ define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: kandw %k1, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86-NEXT: # kill: def %al killed %al killed %eax
; X86-NEXT: retl
;
; X64-LABEL: select06_mem:
@@ -216,7 +216,7 @@ define i8 @select06_mem(<8 x i1>* %a.0, <8 x i1>* %m) {
; X64-NEXT: kmovw %eax, %k1
; X64-NEXT: kandw %k1, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
@@ -237,7 +237,7 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8 %m) {
; X86-NEXT: kandw %k0, %k1, %k0
; X86-NEXT: korw %k2, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86-NEXT: # kill: def %al killed %al killed %eax
; X86-NEXT: retl
;
; X64-LABEL: select07:
@@ -249,7 +249,7 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8 %m) {
; X64-NEXT: kandw %k0, %k1, %k0
; X64-NEXT: korw %k2, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%mask = bitcast i8 %m to <8 x i1>
%a = bitcast i8 %a.0 to <8 x i1>
diff --git a/test/CodeGen/X86/avx512-shift.ll b/test/CodeGen/X86/avx512-shift.ll
index cbc601b71da..eb424a8d935 100644
--- a/test/CodeGen/X86/avx512-shift.ll
+++ b/test/CodeGen/X86/avx512-shift.ll
@@ -34,7 +34,7 @@ define <4 x i64> @shift_4_i64(<4 x i64> %a) {
; KNL-NEXT: vpsrlq $1, %ymm0, %ymm0
; KNL-NEXT: vpsllq $12, %ymm0, %ymm0
; KNL-NEXT: vpsraq $12, %zmm0, %zmm0
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: shift_4_i64:
@@ -106,10 +106,10 @@ define <8 x i64> @variable_sra2(<8 x i64> %x, <8 x i64> %y) {
define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) {
; KNL-LABEL: variable_sra3:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra3:
@@ -127,7 +127,7 @@ define <8 x i16> @variable_sra4(<8 x i16> %x, <8 x i16> %y) {
; KNL-NEXT: vpmovsxwd %xmm0, %ymm0
; KNL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra4:
diff --git a/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
index 0f078194023..df88f0fca45 100644
--- a/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
+++ b/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
@@ -839,7 +839,7 @@ define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp) {
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm0
-; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -967,7 +967,7 @@ define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp) {
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -1493,7 +1493,7 @@ define <4 x i32> @test_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec) {
; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,0,3,4,6,4,7]
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6],ymm1[7]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%res = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 12>
@@ -1814,7 +1814,7 @@ define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp) {
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <13,0,0,6,u,u,u,u>
; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -3857,7 +3857,7 @@ define <4 x float> @test_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>* %vp) {
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = <3,3,15,9,u,u,u,u>
; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
@@ -4329,7 +4329,7 @@ define <2 x double> @test_8xdouble_to_2xdouble_perm_mask0(<8 x double> %vec) {
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%res = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> <i32 0, i32 6>
@@ -4727,7 +4727,7 @@ define <2 x double> @test_8xdouble_to_2xdouble_perm_mem_mask0(<8 x double>* %vp)
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[3],ymm1[2]
; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; CHECK-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x double>, <8 x double>* %vp
diff --git a/test/CodeGen/X86/avx512-trunc.ll b/test/CodeGen/X86/avx512-trunc.ll
index 54970083844..d40c899b495 100644
--- a/test/CodeGen/X86/avx512-trunc.ll
+++ b/test/CodeGen/X86/avx512-trunc.ll
@@ -57,9 +57,9 @@ define void @trunc_qb_512_mem(<8 x i64> %i, <8 x i8>* %res) #0 {
define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qb_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -75,7 +75,7 @@ define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 {
define void @trunc_qb_256_mem(<4 x i64> %i, <4 x i8>* %res) #0 {
; KNL-LABEL: trunc_qb_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovd %xmm0, (%rdi)
@@ -140,9 +140,9 @@ define void @trunc_qw_512_mem(<8 x i64> %i, <8 x i16>* %res) #0 {
define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qw_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -158,7 +158,7 @@ define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 {
define void @trunc_qw_256_mem(<4 x i64> %i, <4 x i16>* %res) #0 {
; KNL-LABEL: trunc_qw_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; KNL-NEXT: vmovq %xmm0, (%rdi)
@@ -223,9 +223,9 @@ define void @trunc_qd_512_mem(<8 x i64> %i, <8 x i32>* %res) #0 {
define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qd_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -241,7 +241,7 @@ define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 {
define void @trunc_qd_256_mem(<4 x i64> %i, <4 x i32>* %res) #0 {
; KNL-LABEL: trunc_qd_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
; KNL-NEXT: vzeroupper
@@ -305,9 +305,9 @@ define void @trunc_db_512_mem(<16 x i32> %i, <16 x i8>* %res) #0 {
define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_db_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -323,7 +323,7 @@ define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 {
define void @trunc_db_256_mem(<8 x i32> %i, <8 x i8>* %res) #0 {
; KNL-LABEL: trunc_db_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovq %xmm0, (%rdi)
@@ -387,9 +387,9 @@ define void @trunc_dw_512_mem(<16 x i32> %i, <16 x i16>* %res) #0 {
define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_dw_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -405,7 +405,7 @@ define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 {
define void @trunc_dw_256_mem(<8 x i32> %i, <8 x i16>* %res) #0 {
; KNL-LABEL: trunc_dw_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
; KNL-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/avx512-vbroadcast.ll b/test/CodeGen/X86/avx512-vbroadcast.ll
index 27c6fb88e01..09e7e646ca4 100644
--- a/test/CodeGen/X86/avx512-vbroadcast.ll
+++ b/test/CodeGen/X86/avx512-vbroadcast.ll
@@ -124,7 +124,7 @@ define <8 x double> @_inreg8xdouble(double %a) {
define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
+; ALL-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
; ALL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; ALL-NEXT: vpcmpneqd %zmm3, %zmm2, %k1
; ALL-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
@@ -140,7 +140,7 @@ define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %m
define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; ALL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; ALL-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
@@ -166,7 +166,7 @@ define <8 x double> @_sd8xdouble_load(double* %a.ptr) {
define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask_load:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; ALL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
; ALL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; ALL-NEXT: vpcmpneqd %zmm2, %zmm1, %k1
; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
@@ -182,7 +182,7 @@ define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8
define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz_load:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; ALL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; ALL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; ALL-NEXT: vpcmpneqd %zmm1, %zmm0, %k1
; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
diff --git a/test/CodeGen/X86/avx512-vec-cmp.ll b/test/CodeGen/X86/avx512-vec-cmp.ll
index 8bec292283b..9c25ba6c5b6 100644
--- a/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -111,11 +111,11 @@ define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; KNL-LABEL: test9:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; KNL-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test9:
@@ -131,11 +131,11 @@ define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; KNL-LABEL: test10:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test10:
@@ -166,7 +166,7 @@ define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
; KNL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; KNL-NEXT: kunpckbw %k0, %k1, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -176,7 +176,7 @@ define i16 @test12(<16 x i64> %a, <16 x i64> %b) nounwind {
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; SKX-NEXT: kunpckbw %k0, %k1, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%res = icmp eq <16 x i64> %a, %b
@@ -875,7 +875,7 @@ define <8 x i32>@test28(<8 x i64> %x, <8 x i64> %y, <8 x i64> %x1, <8 x i64> %y1
; KNL-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
; KNL-NEXT: kxnorw %k1, %k0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test28:
@@ -1007,12 +1007,12 @@ define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) no
define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) nounwind {
; KNL-LABEL: test35:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vmovups (%rdi), %ymm2
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test35:
@@ -1121,12 +1121,12 @@ define <16 x float> @test40(<16 x float> %x, <16 x float> %x1, float* %ptr) n
define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) nounwind {
; KNL-LABEL: test41:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vbroadcastss (%rdi), %ymm2
; KNL-NEXT: vcmpltps %zmm2, %zmm0, %k1
; KNL-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; KNL-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: test41:
diff --git a/test/CodeGen/X86/avx512-vec3-crash.ll b/test/CodeGen/X86/avx512-vec3-crash.ll
index a0b296caf39..562ac1fe369 100644
--- a/test/CodeGen/X86/avx512-vec3-crash.ll
+++ b/test/CodeGen/X86/avx512-vec3-crash.ll
@@ -20,9 +20,9 @@ define <3 x i8 > @foo(<3 x i8>%x, <3 x i8>%a, <3 x i8>%b) {
; CHECK-NEXT: vpextrb $0, %xmm0, %eax
; CHECK-NEXT: vpextrb $4, %xmm0, %edx
; CHECK-NEXT: vpextrb $8, %xmm0, %ecx
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
-; CHECK-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
-; CHECK-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def %dl killed %dl killed %edx
+; CHECK-NEXT: # kill: def %cl killed %cl killed %ecx
; CHECK-NEXT: retq
%cmp.i = icmp slt <3 x i8> %x, %a
%res = sext <3 x i1> %cmp.i to <3 x i8>
diff --git a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
index 28031820a8e..57271593bd0 100644
--- a/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
@@ -2039,7 +2039,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; AVX512F-32-NEXT: vpblendvb %ymm0, %ymm3, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
-; AVX512F-32-NEXT: # kill: %al<def> %al<kill> %eax<kill> %eax<def>
+; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
@@ -2408,7 +2408,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %mask) {
; AVX512F-32-NEXT: vpblendvb %ymm7, %ymm4, %ymm0, %ymm0
; AVX512F-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: # kill: %bl<def> %bl<kill> %ebx<kill> %ebx<def>
+; AVX512F-32-NEXT: # kill: def %bl killed %bl killed %ebx def %ebx
; AVX512F-32-NEXT: shrb $7, %bl
; AVX512F-32-NEXT: kmovd %ebx, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
@@ -2923,7 +2923,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
; AVX512F-32-NEXT: vpblendvb %ymm0, %ymm3, %ymm2, %ymm2
; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[4,5,6,7]
; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
-; AVX512F-32-NEXT: # kill: %al<def> %al<kill> %eax<kill> %eax<def>
+; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kmovd %eax, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
@@ -3292,7 +3292,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_512(<64 x i8> %a0, <64 x i8> %a1, i64 %m
; AVX512F-32-NEXT: vpblendvb %ymm7, %ymm4, %ymm0, %ymm0
; AVX512F-32-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512F-32-NEXT: vpmovb2m %zmm0, %k0
-; AVX512F-32-NEXT: # kill: %bl<def> %bl<kill> %ebx<kill> %ebx<def>
+; AVX512F-32-NEXT: # kill: def %bl killed %bl killed %ebx def %ebx
; AVX512F-32-NEXT: shrb $7, %bl
; AVX512F-32-NEXT: kmovd %ebx, %k1
; AVX512F-32-NEXT: vpmovm2b %k1, %zmm0
diff --git a/test/CodeGen/X86/avx512bw-mov.ll b/test/CodeGen/X86/avx512bw-mov.ll
index 82a3f6310f5..e2c27910f09 100644
--- a/test/CodeGen/X86/avx512bw-mov.ll
+++ b/test/CodeGen/X86/avx512bw-mov.ll
@@ -105,7 +105,7 @@ define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x
; CHECK-NEXT: kshiftlq $48, %k0, %k0
; CHECK-NEXT: kshiftrq $48, %k0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
ret <16 x i8> %res
@@ -120,7 +120,7 @@ define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x
; CHECK-NEXT: kshiftlq $32, %k0, %k0
; CHECK-NEXT: kshiftrq $32, %k0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> zeroinitializer)
ret <32 x i8> %res
@@ -135,7 +135,7 @@ define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i1
; CHECK-NEXT: kshiftld $24, %k0, %k0
; CHECK-NEXT: kshiftrd $24, %k0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
ret <8 x i16> %res
@@ -150,7 +150,7 @@ define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16
; CHECK-NEXT: kshiftld $16, %k0, %k0
; CHECK-NEXT: kshiftrd $16, %k0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
ret <16 x i16> %res
@@ -160,7 +160,7 @@ declare <16 x i16> @llvm.masked.load.v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i
define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_store_16xi8:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kshiftlq $48, %k0, %k0
@@ -175,7 +175,7 @@ declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_store_32xi8:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; CHECK-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kshiftlq $32, %k0, %k0
@@ -190,7 +190,7 @@ declare void @llvm.masked.store.v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>)
define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_store_8xi16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT: vpmovw2m %zmm0, %k0
; CHECK-NEXT: kshiftld $24, %k0, %k0
@@ -205,7 +205,7 @@ declare void @llvm.masked.store.v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_store_16xi16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; CHECK-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kshiftld $16, %k0, %k0
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
index 06b97cf6a41..4e343eef6fa 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
@@ -503,7 +503,7 @@ define i16 @test_pcmpeq_w_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
@@ -516,7 +516,7 @@ define i16 @test_mask_pcmpeq_w_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
@@ -555,7 +555,7 @@ define i16 @test_pcmpgt_w_256(<16 x i16> %a, <16 x i16> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
@@ -568,7 +568,7 @@ define i16 @test_mask_pcmpgt_w_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
@@ -582,7 +582,7 @@ define i16 @test_pcmpeq_b_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
@@ -594,7 +594,7 @@ define i16 @test_mask_pcmpeq_b_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
@@ -607,7 +607,7 @@ define i8 @test_pcmpeq_w_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
@@ -619,7 +619,7 @@ define i8 @test_mask_pcmpeq_w_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
@@ -632,7 +632,7 @@ define i16 @test_pcmpgt_b_128(<16 x i8> %a, <16 x i8> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
@@ -644,7 +644,7 @@ define i16 @test_mask_pcmpgt_b_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
@@ -657,7 +657,7 @@ define i8 @test_pcmpgt_w_128(<8 x i16> %a, <8 x i16> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
@@ -669,7 +669,7 @@ define i8 @test_mask_pcmpgt_w_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
@@ -3683,7 +3683,7 @@ define i16@test_int_x86_avx512_ptestm_b_128(<16 x i8> %x0, <16 x i8> %x1, i16 %x
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1)
@@ -3721,7 +3721,7 @@ define i8@test_int_x86_avx512_ptestm_w_128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
@@ -3740,7 +3740,7 @@ define i16@test_int_x86_avx512_ptestm_w_256(<16 x i16> %x0, <16 x i16> %x1, i16
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2)
@@ -3760,7 +3760,7 @@ define i16@test_int_x86_avx512_ptestnm_b_128(<16 x i8> %x0, <16 x i8> %x1, i16 %
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1)
@@ -3798,7 +3798,7 @@ define i8@test_int_x86_avx512_ptestnm_w_128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
@@ -3817,7 +3817,7 @@ define i16@test_int_x86_avx512_ptestnm_w_256(<16 x i16> %x0, <16 x i16> %x1, i16
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestnm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2)
diff --git a/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
index 45b01da8378..64ad66e336b 100644
--- a/test/CodeGen/X86/avx512bwvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512bwvl-intrinsics.ll
@@ -2311,7 +2311,7 @@ define i16@test_int_x86_avx512_cvtb2mask_128(<16 x i8> %x0) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovb2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtb2mask.128(<16 x i8> %x0)
ret i16 %res
@@ -2336,7 +2336,7 @@ define i8@test_int_x86_avx512_cvtw2mask_128(<8 x i16> %x0) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovw2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %x0)
ret i8 %res
@@ -2349,7 +2349,7 @@ define i16@test_int_x86_avx512_cvtw2mask_256(<16 x i16> %x0) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovw2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtw2mask.256(<16 x i16> %x0)
ret i16 %res
diff --git a/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll b/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
index 190d58d84b1..fba2b5f0793 100644
--- a/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
+++ b/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
@@ -7,7 +7,7 @@ define zeroext i16 @TEST_mm_test_epi8_mask(<2 x i64> %__A, <2 x i64> %__B) local
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -24,7 +24,7 @@ define zeroext i16 @TEST_mm_mask_test_epi8_mask(i16 zeroext %__U, <2 x i64> %__A
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -42,7 +42,7 @@ define zeroext i8 @TEST_mm_test_epi16_mask(<2 x i64> %__A, <2 x i64> %__B) local
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -59,7 +59,7 @@ define zeroext i8 @TEST_mm_mask_test_epi16_mask(i8 zeroext %__U, <2 x i64> %__A,
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -77,7 +77,7 @@ define zeroext i16 @TEST_mm_testn_epi8_mask(<2 x i64> %__A, <2 x i64> %__B) loca
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -94,7 +94,7 @@ define zeroext i16 @TEST_mm_mask_testn_epi8_mask(i16 zeroext %__U, <2 x i64> %__
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -112,7 +112,7 @@ define zeroext i8 @TEST_mm_testn_epi16_mask(<2 x i64> %__A, <2 x i64> %__B) loca
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -129,7 +129,7 @@ define zeroext i8 @TEST_mm_mask_testn_epi16_mask(i8 zeroext %__U, <2 x i64> %__A
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -182,7 +182,7 @@ define zeroext i16 @TEST_mm256_test_epi16_mask(<4 x i64> %__A, <4 x i64> %__B) l
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -200,7 +200,7 @@ define zeroext i16 @TEST_mm256_mask_test_epi16_mask(i16 zeroext %__U, <4 x i64>
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -254,7 +254,7 @@ define zeroext i16 @TEST_mm256_testn_epi16_mask(<4 x i64> %__A, <4 x i64> %__B)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -272,7 +272,7 @@ define zeroext i16 @TEST_mm256_mask_testn_epi16_mask(i16 zeroext %__U, <4 x i64>
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
index ba75a351130..60ee8f783c2 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
@@ -161,7 +161,7 @@ declare <16 x float> @llvm.x86.avx512.mask.broadcastf32x8.512(<8 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512(<8 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x8_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -195,7 +195,7 @@ declare <8 x double> @llvm.x86.avx512.mask.broadcastf64x2.512(<2 x double>, <8 x
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512(<2 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -230,7 +230,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.broadcasti32x8.512(<8 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512(<8 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x8_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -264,7 +264,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.broadcasti64x2.512(<2 x i64>, <8 x i64>,
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512(<2 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -299,7 +299,7 @@ declare <16 x float> @llvm.x86.avx512.mask.broadcastf32x2.512(<4 x float>, <16 x
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x2_512(<4 x float> %x0, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -321,7 +321,7 @@ declare <16 x i32> @llvm.x86.avx512.mask.broadcasti32x2.512(<4 x i32>, <16 x i32
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x2_512(<4 x i32> %x0, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
diff --git a/test/CodeGen/X86/avx512dq-intrinsics.ll b/test/CodeGen/X86/avx512dq-intrinsics.ll
index 56aee74faaa..e0e7b237447 100644
--- a/test/CodeGen/X86/avx512dq-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dq-intrinsics.ll
@@ -351,7 +351,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_pd_512(<8 x double> %x0, i8 %x1) {
; CHECK-NEXT: vfpclasspd $4, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 4, i8 -1)
@@ -369,7 +369,7 @@ define i16@test_int_x86_avx512_mask_fpclass_ps_512(<16 x float> %x0, i16 %x1) {
; CHECK-NEXT: vfpclassps $4, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 %x1)
%res1 = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 -1)
@@ -388,7 +388,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_sd(<2 x double> %x0, i8 %x1) {
; CHECK-NEXT: vfpclasssd $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1)
@@ -401,7 +401,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_sd_load(<2 x double>* %x0ptr) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vfpclasssd $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%x0 = load <2 x double>, <2 x double>* %x0ptr
%res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1)
@@ -419,7 +419,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_ss(<4 x float> %x0, i8 %x1) {
; CHECK-NEXT: vfpclassss $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1)
@@ -432,7 +432,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_ss_load(<4 x float>* %x0ptr, i8 %x1)
; CHECK: ## %bb.0:
; CHECK-NEXT: vfpclassss $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%x0 = load <4 x float>, <4 x float>* %x0ptr
%res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1)
@@ -446,7 +446,7 @@ define i16@test_int_x86_avx512_cvtd2mask_512(<16 x i32> %x0) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.cvtd2mask.512(<16 x i32> %x0)
ret i16 %res
@@ -459,7 +459,7 @@ define i8@test_int_x86_avx512_cvtq2mask_512(<8 x i64> %x0) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.cvtq2mask.512(<8 x i64> %x0)
ret i8 %res
diff --git a/test/CodeGen/X86/avx512dq-mask-op.ll b/test/CodeGen/X86/avx512dq-mask-op.ll
index 2a56532dd9d..8f7938f6a46 100644
--- a/test/CodeGen/X86/avx512dq-mask-op.ll
+++ b/test/CodeGen/X86/avx512dq-mask-op.ll
@@ -7,7 +7,7 @@ define i8 @mask8(i8 %x) {
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -57,7 +57,7 @@ define i8 @mand8_mem(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-NEXT: kxorb %k1, %k0, %k0
; CHECK-NEXT: korb %k0, %k2, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%ma = load <8 x i1>, <8 x i1>* %x
%mb = load <8 x i1>, <8 x i1>* %y
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
index 78fd5d57e40..3a1bce05e67 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
@@ -1673,7 +1673,7 @@ declare <4 x double> @llvm.x86.avx512.mask.broadcastf64x2.256(<2 x double>, <4 x
define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0, <4 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xc8,0x01]
@@ -1708,7 +1708,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.broadcasti64x2.256(<2 x i64>, <4 x i64>,
define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xc8,0x01]
@@ -1743,7 +1743,7 @@ declare <8 x float> @llvm.x86.avx512.mask.broadcastf32x2.256(<4 x float>, <8 x f
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01]
@@ -1764,7 +1764,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.broadcasti32x2.256(<4 x i32>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x2_256(<4 x i32> %x0, <8 x i32> %x2, i8 %x3, i64 * %y_ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vmovq (%rsi), %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x16]
; CHECK-NEXT: ## xmm2 = mem[0],zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
diff --git a/test/CodeGen/X86/avx512dqvl-intrinsics.ll b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
index 71e8fb67568..f201599c4aa 100644
--- a/test/CodeGen/X86/avx512dqvl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512dqvl-intrinsics.ll
@@ -560,7 +560,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_ps_128(<4 x float> %x0, i8 %x1) {
; CHECK-NEXT: vfpclassps $4, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 4, i8 -1)
@@ -579,7 +579,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_ps_256(<8 x float> %x0, i8 %x1) {
; CHECK-NEXT: vfpclassps $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 4, i8 -1)
@@ -598,7 +598,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_pd_128(<2 x double> %x0, i8 %x1) {
; CHECK-NEXT: vfpclasspd $2, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x66,0xc0,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 4, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 2, i8 -1)
@@ -617,7 +617,7 @@ define i8 @test_int_x86_avx512_mask_fpclass_pd_256(<4 x double> %x0, i8 %x1) {
; CHECK-NEXT: vfpclasspd $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 4, i8 -1)
@@ -632,7 +632,7 @@ define i8@test_int_x86_avx512_cvtd2mask_128(<4 x i32> %x0) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %x0)
ret i8 %res
@@ -645,7 +645,7 @@ define i8@test_int_x86_avx512_cvtd2mask_256(<8 x i32> %x0) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.256(<8 x i32> %x0)
ret i8 %res
@@ -658,7 +658,7 @@ define i8@test_int_x86_avx512_cvtq2mask_128(<2 x i64> %x0) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.128(<2 x i64> %x0)
ret i8 %res
@@ -671,7 +671,7 @@ define i8@test_int_x86_avx512_cvtq2mask_256(<4 x i64> %x0) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.256(<4 x i64> %x0)
ret i8 %res
diff --git a/test/CodeGen/X86/avx512f-vec-test-testn.ll b/test/CodeGen/X86/avx512f-vec-test-testn.ll
index 07dc9b8116b..731f5ffa2ca 100644
--- a/test/CodeGen/X86/avx512f-vec-test-testn.ll
+++ b/test/CodeGen/X86/avx512f-vec-test-testn.ll
@@ -7,7 +7,7 @@ define zeroext i8 @TEST_mm512_test_epi64_mask(<8 x i64> %__A, <8 x i64> %__B) lo
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -23,7 +23,7 @@ define zeroext i16 @TEST_mm512_test_epi32_mask(<8 x i64> %__A, <8 x i64> %__B) l
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -41,7 +41,7 @@ define zeroext i8 @TEST_mm512_mask_test_epi64_mask(i8 %__U, <8 x i64> %__A, <8 x
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -60,7 +60,7 @@ define zeroext i16 @TEST_mm512_mask_test_epi32_mask(i16 %__U, <8 x i64> %__A, <8
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -79,7 +79,7 @@ define zeroext i8 @TEST_mm512_testn_epi64_mask(<8 x i64> %__A, <8 x i64> %__B) l
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -95,7 +95,7 @@ define zeroext i16 @TEST_mm512_testn_epi32_mask(<8 x i64> %__A, <8 x i64> %__B)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -113,7 +113,7 @@ define zeroext i8 @TEST_mm512_mask_testn_epi64_mask(i8 %__U, <8 x i64> %__A, <8
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -132,7 +132,7 @@ define zeroext i16 @TEST_mm512_mask_testn_epi32_mask(i16 %__U, <8 x i64> %__A, <
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index 6d8e019a0ee..073cf46e9fb 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -1064,7 +1064,7 @@ define i8 @test_pcmpeq_d_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
@@ -1076,7 +1076,7 @@ define i8 @test_mask_pcmpeq_d_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
@@ -1089,7 +1089,7 @@ define i8 @test_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
@@ -1101,7 +1101,7 @@ define i8 @test_mask_pcmpeq_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
@@ -1114,7 +1114,7 @@ define i8 @test_pcmpgt_d_256(<8 x i32> %a, <8 x i32> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
@@ -1126,7 +1126,7 @@ define i8 @test_mask_pcmpgt_d_256(<8 x i32> %a, <8 x i32> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
@@ -1139,7 +1139,7 @@ define i8 @test_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
@@ -1151,7 +1151,7 @@ define i8 @test_mask_pcmpgt_q_256(<4 x i64> %a, <4 x i64> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
@@ -1164,7 +1164,7 @@ define i8 @test_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
@@ -1176,7 +1176,7 @@ define i8 @test_mask_pcmpeq_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
@@ -1189,7 +1189,7 @@ define i8 @test_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
@@ -1201,7 +1201,7 @@ define i8 @test_mask_pcmpeq_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
@@ -1214,7 +1214,7 @@ define i8 @test_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
@@ -1226,7 +1226,7 @@ define i8 @test_mask_pcmpgt_d_128(<4 x i32> %a, <4 x i32> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
@@ -1239,7 +1239,7 @@ define i8 @test_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
@@ -1251,7 +1251,7 @@ define i8 @test_mask_pcmpgt_q_128(<2 x i64> %a, <2 x i64> %b, i8 %mask) {
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
@@ -5867,7 +5867,7 @@ declare <8 x float> @llvm.x86.avx512.mask.broadcastf32x4.256(<4 x float>, <8 x f
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256(<4 x float> %x0, <8 x float> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01]
@@ -5900,7 +5900,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.broadcasti32x4.256(<4 x i32>, <8 x i32>,
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x i32> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x38,0xc8,0x01]
@@ -6003,7 +6003,7 @@ define i8@test_int_x86_avx512_ptestm_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
@@ -6022,7 +6022,7 @@ define i8@test_int_x86_avx512_ptestm_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
@@ -6041,7 +6041,7 @@ define i8@test_int_x86_avx512_ptestm_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
@@ -6060,7 +6060,7 @@ define i8@test_int_x86_avx512_ptestm_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
@@ -6079,7 +6079,7 @@ define i8@test_int_x86_avx512_ptestnm_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
@@ -6098,7 +6098,7 @@ define i8@test_int_x86_avx512_ptestnm_d_256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
@@ -6117,7 +6117,7 @@ define i8@test_int_x86_avx512_ptestnm_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
@@ -6136,7 +6136,7 @@ define i8@test_int_x86_avx512_ptestnm_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
diff --git a/test/CodeGen/X86/avx512vl-intrinsics.ll b/test/CodeGen/X86/avx512vl-intrinsics.ll
index 2cd118832d3..f635342218a 100644
--- a/test/CodeGen/X86/avx512vl-intrinsics.ll
+++ b/test/CodeGen/X86/avx512vl-intrinsics.ll
@@ -718,7 +718,7 @@ define i8 @test_cmpps_256(<8 x float> %a, <8 x float> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.ps.256(<8 x float> %a, <8 x float> %b, i32 2, i8 -1)
ret i8 %res
@@ -730,7 +730,7 @@ define i8 @test_cmpps_128(<4 x float> %a, <4 x float> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7c,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.ps.128(<4 x float> %a, <4 x float> %b, i32 2, i8 -1)
ret i8 %res
@@ -742,7 +742,7 @@ define i8 @test_cmppd_256(<4 x double> %a, <4 x double> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmplepd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x28,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.256(<4 x double> %a, <4 x double> %b, i32 2, i8 -1)
ret i8 %res
@@ -754,7 +754,7 @@ define i8 @test_cmppd_128(<2 x double> %a, <2 x double> %b) {
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmplepd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0xfd,0x08,0xc2,0xc1,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.128(<2 x double> %a, <2 x double> %b, i32 2, i8 -1)
ret i8 %res
diff --git a/test/CodeGen/X86/avx512vl-vec-cmp.ll b/test/CodeGen/X86/avx512vl-vec-cmp.ll
index 036ab037b3b..8af9f73c842 100644
--- a/test/CodeGen/X86/avx512vl-vec-cmp.ll
+++ b/test/CodeGen/X86/avx512vl-vec-cmp.ll
@@ -45,12 +45,12 @@ define <8 x i32> @test256_3(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1) nounwind
;
; NoVLX-LABEL: test256_3:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k1
; NoVLX-NEXT: vpblendmd %zmm2, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%mask = icmp sge <8 x i32> %x, %y
%max = select <8 x i1> %mask, <8 x i32> %x1, <8 x i32> %y
@@ -86,12 +86,12 @@ define <8 x i32> @test256_5(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwin
;
; NoVLX-LABEL: test256_5:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpeqd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %x, %y
@@ -108,12 +108,12 @@ define <8 x i32> @test256_5b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
;
; NoVLX-LABEL: test256_5b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpeqd %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp eq <8 x i32> %y, %x
@@ -130,12 +130,12 @@ define <8 x i32> @test256_6(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
;
; NoVLX-LABEL: test256_6:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sgt <8 x i32> %x, %y
@@ -152,12 +152,12 @@ define <8 x i32> @test256_6b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
;
; NoVLX-LABEL: test256_6b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpgtd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp slt <8 x i32> %y, %x
@@ -174,12 +174,12 @@ define <8 x i32> @test256_7(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
;
; NoVLX-LABEL: test256_7:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sle <8 x i32> %x, %y
@@ -196,12 +196,12 @@ define <8 x i32> @test256_7b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
;
; NoVLX-LABEL: test256_7b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp sge <8 x i32> %y, %x
@@ -218,12 +218,12 @@ define <8 x i32> @test256_8(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) noun
;
; NoVLX-LABEL: test256_8:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpleud %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp ule <8 x i32> %x, %y
@@ -240,12 +240,12 @@ define <8 x i32> @test256_8b(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %y.ptr) nou
;
; NoVLX-LABEL: test256_8b:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
%mask = icmp uge <8 x i32> %y, %x
@@ -263,14 +263,14 @@ define <8 x i32> @test256_9(<8 x i32> %x, <8 x i32> %y, <8 x i32> %x1, <8 x i32>
;
; NoVLX-LABEL: test256_9:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm3<def> %ymm3<kill> %zmm3<def>
-; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; NoVLX-NEXT: vpcmpeqd %zmm3, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%mask1 = icmp eq <8 x i32> %x1, %y1
%mask0 = icmp eq <8 x i32> %x, %y
@@ -336,14 +336,14 @@ define <8 x i32> @test256_12(<8 x i32> %x, <8 x i32>* %y.ptr, <8 x i32> %x1, <8
;
; NoVLX-LABEL: test256_12:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm3
; NoVLX-NEXT: vpcmpleud %zmm3, %zmm0, %k1
; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%y = load <8 x i32>, <8 x i32>* %y.ptr, align 4
@@ -383,12 +383,12 @@ define <8 x i32> @test256_14(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1) nounwind
;
; NoVLX-LABEL: test256_14:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm2
; NoVLX-NEXT: vpcmpled %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%yb = load i32, i32* %yb.ptr, align 4
%y.0 = insertelement <8 x i32> undef, i32 %yb, i32 0
@@ -408,14 +408,14 @@ define <8 x i32> @test256_15(<8 x i32> %x, i32* %yb.ptr, <8 x i32> %x1, <8 x i32
;
; NoVLX-LABEL: test256_15:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm3
; NoVLX-NEXT: vpcmpgtd %zmm3, %zmm0, %k1
; NoVLX-NEXT: vpcmpled %zmm1, %zmm2, %k1 {%k1}
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%mask1 = icmp sge <8 x i32> %x1, %y1
%yb = load i32, i32* %yb.ptr, align 4
@@ -462,12 +462,12 @@ define <8 x i32> @test256_17(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
;
; NoVLX-LABEL: test256_17:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpneqd %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %x, %y
@@ -484,12 +484,12 @@ define <8 x i32> @test256_18(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
;
; NoVLX-LABEL: test256_18:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpneqd %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp ne <8 x i32> %y, %x
@@ -506,12 +506,12 @@ define <8 x i32> @test256_19(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
;
; NoVLX-LABEL: test256_19:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpnltud %zmm2, %zmm0, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %x, %y
@@ -528,12 +528,12 @@ define <8 x i32> @test256_20(<8 x i32> %x, <8 x i32> %x1, <8 x i32>* %yp) nounwi
;
; NoVLX-LABEL: test256_20:
; NoVLX: # %bb.0:
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqu (%rdi), %ymm2
; NoVLX-NEXT: vpcmpnltud %zmm0, %zmm2, %k1
; NoVLX-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; NoVLX-NEXT: retq
%y = load <8 x i32>, <8 x i32>* %yp, align 4
%mask = icmp uge <8 x i32> %y, %x
diff --git a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
index 7969a9ff1df..7900812aae9 100644
--- a/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
+++ b/test/CodeGen/X86/avx512vl-vec-masked-cmp.ll
@@ -1208,7 +1208,7 @@ define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask:
@@ -1218,7 +1218,7 @@ define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1235,7 +1235,7 @@ define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqw_v8i1_v16i1_mask_mem:
@@ -1245,7 +1245,7 @@ define zeroext i16 @test_vpcmpeqw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1264,7 +1264,7 @@ define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask:
@@ -1275,7 +1275,7 @@ define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x i
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -1295,7 +1295,7 @@ define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqw_v8i1_v16i1_mask_mem:
@@ -1306,7 +1306,7 @@ define zeroext i16 @test_masked_vpcmpeqw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <2
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4200,7 +4200,7 @@ define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask:
@@ -4239,7 +4239,7 @@ define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4256,7 +4256,7 @@ define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem:
@@ -4295,7 +4295,7 @@ define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4314,7 +4314,7 @@ define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask:
@@ -4371,7 +4371,7 @@ define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i64
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4392,7 +4392,7 @@ define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem:
@@ -4449,7 +4449,7 @@ define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2 x
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4471,7 +4471,7 @@ define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v8i1_mask_mem_b:
@@ -4511,7 +4511,7 @@ define zeroext i8 @test_vpcmpeqd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4531,7 +4531,7 @@ define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b:
@@ -4589,7 +4589,7 @@ define zeroext i8 @test_masked_vpcmpeqd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4612,7 +4612,7 @@ define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask:
@@ -4650,7 +4650,7 @@ define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4667,7 +4667,7 @@ define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem:
@@ -4705,7 +4705,7 @@ define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4724,7 +4724,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask:
@@ -4780,7 +4780,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x i
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4801,7 +4801,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem:
@@ -4857,7 +4857,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <2
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4879,7 +4879,7 @@ define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v4i1_v16i1_mask_mem_b:
@@ -4918,7 +4918,7 @@ define zeroext i16 @test_vpcmpeqd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -4938,7 +4938,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b:
@@ -4995,7 +4995,7 @@ define zeroext i16 @test_masked_vpcmpeqd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5698,19 +5698,19 @@ define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5727,19 +5727,19 @@ define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5758,20 +5758,20 @@ define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x i
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5791,20 +5791,20 @@ define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5825,19 +5825,19 @@ define zeroext i16 @test_vpcmpeqd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5857,20 +5857,20 @@ define zeroext i16 @test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -5904,8 +5904,8 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__b
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -5977,7 +5977,7 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -6052,8 +6052,8 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x i
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -6129,7 +6129,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <4
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -6207,7 +6207,7 @@ define zeroext i32 @test_vpcmpeqd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -6283,7 +6283,7 @@ define zeroext i32 @test_masked_vpcmpeqd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -6362,8 +6362,8 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__b
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -6440,7 +6440,7 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -6520,8 +6520,8 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x i
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -6602,7 +6602,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <4
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -6685,7 +6685,7 @@ define zeroext i64 @test_vpcmpeqd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -6766,7 +6766,7 @@ define zeroext i64 @test_masked_vpcmpeqd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
@@ -8520,7 +8520,7 @@ define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask:
@@ -8543,7 +8543,7 @@ define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8560,7 +8560,7 @@ define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem:
@@ -8583,7 +8583,7 @@ define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>* %
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8602,7 +8602,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask:
@@ -8635,7 +8635,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i64
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8656,7 +8656,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem:
@@ -8689,7 +8689,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2 x
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8711,7 +8711,7 @@ define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v8i1_mask_mem_b:
@@ -8735,7 +8735,7 @@ define zeroext i8 @test_vpcmpeqq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8755,7 +8755,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b:
@@ -8789,7 +8789,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8812,7 +8812,7 @@ define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask:
@@ -8834,7 +8834,7 @@ define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__b
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8851,7 +8851,7 @@ define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem:
@@ -8873,7 +8873,7 @@ define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8892,7 +8892,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask:
@@ -8924,7 +8924,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x i
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8945,7 +8945,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem:
@@ -8977,7 +8977,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <2
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -8999,7 +8999,7 @@ define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpeqq_v2i1_v16i1_mask_mem_b:
@@ -9022,7 +9022,7 @@ define zeroext i16 @test_vpcmpeqq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9042,7 +9042,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b:
@@ -9075,7 +9075,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9730,7 +9730,7 @@ define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -9771,7 +9771,7 @@ define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9788,7 +9788,7 @@ define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -9829,7 +9829,7 @@ define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>* %
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9848,7 +9848,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -9907,7 +9907,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i64
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -9928,7 +9928,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -9987,7 +9987,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4 x
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10009,7 +10009,7 @@ define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10051,7 +10051,7 @@ define zeroext i8 @test_vpcmpeqq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10071,7 +10071,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10131,7 +10131,7 @@ define zeroext i8 @test_masked_vpcmpeqq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <4
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10154,7 +10154,7 @@ define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10194,7 +10194,7 @@ define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__b
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10211,7 +10211,7 @@ define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10251,7 +10251,7 @@ define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10270,7 +10270,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10328,7 +10328,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x i
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10349,7 +10349,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10407,7 +10407,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <4
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10429,7 +10429,7 @@ define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10470,7 +10470,7 @@ define zeroext i16 @test_vpcmpeqq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -10490,7 +10490,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -10549,7 +10549,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11276,7 +11276,7 @@ define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -11284,7 +11284,7 @@ define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__b
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11301,7 +11301,7 @@ define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -11309,7 +11309,7 @@ define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>*
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11328,7 +11328,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -11337,7 +11337,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x i
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11357,7 +11357,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -11366,7 +11366,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <8
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11387,7 +11387,7 @@ define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -11395,7 +11395,7 @@ define zeroext i16 @test_vpcmpeqq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %__
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpeqq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -11415,7 +11415,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -11424,7 +11424,7 @@ define zeroext i16 @test_masked_vpcmpeqq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpeqq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13569,7 +13569,7 @@ define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask:
@@ -13579,7 +13579,7 @@ define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13596,7 +13596,7 @@ define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtw_v8i1_v16i1_mask_mem:
@@ -13606,7 +13606,7 @@ define zeroext i16 @test_vpcmpsgtw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13625,7 +13625,7 @@ define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask:
@@ -13636,7 +13636,7 @@ define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -13656,7 +13656,7 @@ define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem:
@@ -13667,7 +13667,7 @@ define zeroext i16 @test_masked_vpcmpsgtw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16561,7 +16561,7 @@ define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask:
@@ -16600,7 +16600,7 @@ define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16617,7 +16617,7 @@ define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem:
@@ -16656,7 +16656,7 @@ define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16675,7 +16675,7 @@ define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask:
@@ -16732,7 +16732,7 @@ define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16753,7 +16753,7 @@ define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem:
@@ -16810,7 +16810,7 @@ define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16832,7 +16832,7 @@ define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
@@ -16872,7 +16872,7 @@ define zeroext i8 @test_vpcmpsgtd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16892,7 +16892,7 @@ define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b:
@@ -16950,7 +16950,7 @@ define zeroext i8 @test_masked_vpcmpsgtd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -16973,7 +16973,7 @@ define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask:
@@ -17011,7 +17011,7 @@ define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -17028,7 +17028,7 @@ define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem:
@@ -17066,7 +17066,7 @@ define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -17085,7 +17085,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask:
@@ -17141,7 +17141,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -17162,7 +17162,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem:
@@ -17218,7 +17218,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -17240,7 +17240,7 @@ define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
@@ -17279,7 +17279,7 @@ define zeroext i16 @test_vpcmpsgtd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %_
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -17299,7 +17299,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b:
@@ -17356,7 +17356,7 @@ define zeroext i16 @test_masked_vpcmpsgtd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18059,19 +18059,19 @@ define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18088,19 +18088,19 @@ define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18119,20 +18119,20 @@ define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18152,20 +18152,20 @@ define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18186,19 +18186,19 @@ define zeroext i16 @test_vpcmpsgtd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18218,20 +18218,20 @@ define zeroext i16 @test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -18265,8 +18265,8 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -18338,7 +18338,7 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -18413,8 +18413,8 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -18490,7 +18490,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -18568,7 +18568,7 @@ define zeroext i32 @test_vpcmpsgtd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %_
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -18644,7 +18644,7 @@ define zeroext i32 @test_masked_vpcmpsgtd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -18723,8 +18723,8 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -18801,7 +18801,7 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -18881,8 +18881,8 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -18963,7 +18963,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -19046,7 +19046,7 @@ define zeroext i64 @test_vpcmpsgtd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %_
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -19127,7 +19127,7 @@ define zeroext i64 @test_masked_vpcmpsgtd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
@@ -20881,7 +20881,7 @@ define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask:
@@ -20904,7 +20904,7 @@ define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20921,7 +20921,7 @@ define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem:
@@ -20944,7 +20944,7 @@ define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -20963,7 +20963,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask:
@@ -20996,7 +20996,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21017,7 +21017,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem:
@@ -21050,7 +21050,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21072,7 +21072,7 @@ define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
@@ -21096,7 +21096,7 @@ define zeroext i8 @test_vpcmpsgtq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21116,7 +21116,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b:
@@ -21150,7 +21150,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21173,7 +21173,7 @@ define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask:
@@ -21195,7 +21195,7 @@ define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21212,7 +21212,7 @@ define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem:
@@ -21234,7 +21234,7 @@ define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21253,7 +21253,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask:
@@ -21285,7 +21285,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21306,7 +21306,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem:
@@ -21338,7 +21338,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21360,7 +21360,7 @@ define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
@@ -21383,7 +21383,7 @@ define zeroext i16 @test_vpcmpsgtq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %_
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -21403,7 +21403,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b:
@@ -21436,7 +21436,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22091,7 +22091,7 @@ define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22132,7 +22132,7 @@ define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22149,7 +22149,7 @@ define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22190,7 +22190,7 @@ define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22209,7 +22209,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i6
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22268,7 +22268,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22289,7 +22289,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22348,7 +22348,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22370,7 +22370,7 @@ define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22412,7 +22412,7 @@ define zeroext i8 @test_vpcmpsgtq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22432,7 +22432,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22492,7 +22492,7 @@ define zeroext i8 @test_masked_vpcmpsgtq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22515,7 +22515,7 @@ define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22555,7 +22555,7 @@ define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22572,7 +22572,7 @@ define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22612,7 +22612,7 @@ define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22631,7 +22631,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22689,7 +22689,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22710,7 +22710,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22768,7 +22768,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22790,7 +22790,7 @@ define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22831,7 +22831,7 @@ define zeroext i16 @test_vpcmpsgtq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %_
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -22851,7 +22851,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -22910,7 +22910,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23637,7 +23637,7 @@ define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -23645,7 +23645,7 @@ define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23662,7 +23662,7 @@ define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -23670,7 +23670,7 @@ define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23689,7 +23689,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -23698,7 +23698,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23718,7 +23718,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -23727,7 +23727,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23748,7 +23748,7 @@ define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -23756,7 +23756,7 @@ define zeroext i16 @test_vpcmpsgtq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %_
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpgtq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -23776,7 +23776,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -23785,7 +23785,7 @@ define zeroext i16 @test_masked_vpcmpsgtq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpgtq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25960,7 +25960,7 @@ define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask:
@@ -25972,7 +25972,7 @@ define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -25989,7 +25989,7 @@ define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgew_v8i1_v16i1_mask_mem:
@@ -26002,7 +26002,7 @@ define zeroext i16 @test_vpcmpsgew_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -26021,7 +26021,7 @@ define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmplew %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask:
@@ -26034,7 +26034,7 @@ define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask(i8 zeroext %__u, <2 x
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -26054,7 +26054,7 @@ define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgew_v8i1_v16i1_mask_mem:
@@ -26068,7 +26068,7 @@ define zeroext i16 @test_masked_vpcmpsgew_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29018,7 +29018,7 @@ define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask:
@@ -29059,7 +29059,7 @@ define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29076,7 +29076,7 @@ define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem:
@@ -29118,7 +29118,7 @@ define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29137,7 +29137,7 @@ define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask:
@@ -29194,7 +29194,7 @@ define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29215,7 +29215,7 @@ define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem:
@@ -29273,7 +29273,7 @@ define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29295,7 +29295,7 @@ define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v8i1_mask_mem_b:
@@ -29337,7 +29337,7 @@ define zeroext i8 @test_vpcmpsged_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29357,7 +29357,7 @@ define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b:
@@ -29415,7 +29415,7 @@ define zeroext i8 @test_masked_vpcmpsged_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29438,7 +29438,7 @@ define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask:
@@ -29478,7 +29478,7 @@ define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29495,7 +29495,7 @@ define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem:
@@ -29536,7 +29536,7 @@ define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29555,7 +29555,7 @@ define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask:
@@ -29611,7 +29611,7 @@ define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask(i8 zeroext %__u, <2 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29632,7 +29632,7 @@ define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem:
@@ -29689,7 +29689,7 @@ define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29711,7 +29711,7 @@ define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v4i1_v16i1_mask_mem_b:
@@ -29752,7 +29752,7 @@ define zeroext i16 @test_vpcmpsged_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %_
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -29772,7 +29772,7 @@ define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b:
@@ -29829,7 +29829,7 @@ define zeroext i16 @test_masked_vpcmpsged_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -30548,19 +30548,19 @@ define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -30577,19 +30577,19 @@ define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -30608,20 +30608,20 @@ define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpled %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -30641,20 +30641,20 @@ define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -30675,19 +30675,19 @@ define zeroext i16 @test_vpcmpsged_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltd (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsged_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -30707,20 +30707,20 @@ define zeroext i16 @test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltd (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsged_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -30754,8 +30754,8 @@ define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -30827,7 +30827,7 @@ define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -30902,8 +30902,8 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -30979,7 +30979,7 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -31057,7 +31057,7 @@ define zeroext i32 @test_vpcmpsged_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %_
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -31133,7 +31133,7 @@ define zeroext i32 @test_masked_vpcmpsged_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -31212,8 +31212,8 @@ define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -31290,7 +31290,7 @@ define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -31370,8 +31370,8 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -31452,7 +31452,7 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -31535,7 +31535,7 @@ define zeroext i64 @test_vpcmpsged_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %_
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -31616,7 +31616,7 @@ define zeroext i64 @test_masked_vpcmpsged_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpled %zmm0, %zmm1, %k0 {%k1}
@@ -33378,7 +33378,7 @@ define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask:
@@ -33403,7 +33403,7 @@ define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33420,7 +33420,7 @@ define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem:
@@ -33446,7 +33446,7 @@ define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33465,7 +33465,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask:
@@ -33498,7 +33498,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33519,7 +33519,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem:
@@ -33553,7 +33553,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33575,7 +33575,7 @@ define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
@@ -33601,7 +33601,7 @@ define zeroext i8 @test_vpcmpsgeq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33621,7 +33621,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b:
@@ -33655,7 +33655,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33678,7 +33678,7 @@ define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask:
@@ -33702,7 +33702,7 @@ define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33719,7 +33719,7 @@ define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem:
@@ -33744,7 +33744,7 @@ define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33763,7 +33763,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %xmm0, %xmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask:
@@ -33795,7 +33795,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33816,7 +33816,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem:
@@ -33849,7 +33849,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33871,7 +33871,7 @@ define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
@@ -33896,7 +33896,7 @@ define zeroext i16 @test_vpcmpsgeq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %_
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -33916,7 +33916,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b:
@@ -33949,7 +33949,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -34620,7 +34620,7 @@ define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -34663,7 +34663,7 @@ define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -34680,7 +34680,7 @@ define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -34724,7 +34724,7 @@ define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -34743,7 +34743,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i6
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -34804,7 +34804,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -34825,7 +34825,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -34887,7 +34887,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -34909,7 +34909,7 @@ define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -34953,7 +34953,7 @@ define zeroext i8 @test_vpcmpsgeq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -34973,7 +34973,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -35035,7 +35035,7 @@ define zeroext i8 @test_masked_vpcmpsgeq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -35058,7 +35058,7 @@ define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -35100,7 +35100,7 @@ define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -35117,7 +35117,7 @@ define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -35160,7 +35160,7 @@ define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -35179,7 +35179,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %ymm0, %ymm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -35239,7 +35239,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -35260,7 +35260,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -35321,7 +35321,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -35343,7 +35343,7 @@ define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -35386,7 +35386,7 @@ define zeroext i16 @test_vpcmpsgeq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %_
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -35406,7 +35406,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -35467,7 +35467,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -36222,7 +36222,7 @@ define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -36230,7 +36230,7 @@ define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -36247,7 +36247,7 @@ define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -36255,7 +36255,7 @@ define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpnltq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -36274,7 +36274,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -36283,7 +36283,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpleq %zmm0, %zmm1, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -36303,7 +36303,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -36312,7 +36312,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpnltq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -36333,7 +36333,7 @@ define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -36341,7 +36341,7 @@ define zeroext i16 @test_vpcmpsgeq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %_
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpnltq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -36361,7 +36361,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -36370,7 +36370,7 @@ define zeroext i16 @test_masked_vpcmpsgeq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpnltq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -38551,7 +38551,7 @@ define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask:
@@ -38564,7 +38564,7 @@ define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -38581,7 +38581,7 @@ define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuw (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultw_v8i1_v16i1_mask_mem:
@@ -38594,7 +38594,7 @@ define zeroext i16 @test_vpcmpultw_v8i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpsllq $63, %zmm0, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -38613,7 +38613,7 @@ define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask:
@@ -38627,7 +38627,7 @@ define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask(i8 zeroext %__u, <2 x
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -38647,7 +38647,7 @@ define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuw (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultw_v8i1_v16i1_mask_mem:
@@ -38661,7 +38661,7 @@ define zeroext i16 @test_masked_vpcmpultw_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -41623,7 +41623,7 @@ define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask:
@@ -41665,7 +41665,7 @@ define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -41682,7 +41682,7 @@ define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem:
@@ -41724,7 +41724,7 @@ define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -41743,7 +41743,7 @@ define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask:
@@ -41803,7 +41803,7 @@ define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -41824,7 +41824,7 @@ define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem:
@@ -41884,7 +41884,7 @@ define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem(i8 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -41906,7 +41906,7 @@ define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v8i1_mask_mem_b:
@@ -41949,7 +41949,7 @@ define zeroext i8 @test_vpcmpultd_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, i32* %__b
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -41969,7 +41969,7 @@ define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b:
@@ -42030,7 +42030,7 @@ define zeroext i8 @test_masked_vpcmpultd_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -42053,7 +42053,7 @@ define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask:
@@ -42094,7 +42094,7 @@ define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -42111,7 +42111,7 @@ define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem:
@@ -42152,7 +42152,7 @@ define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -42171,7 +42171,7 @@ define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask:
@@ -42230,7 +42230,7 @@ define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask(i8 zeroext %__u, <2 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -42251,7 +42251,7 @@ define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem:
@@ -42310,7 +42310,7 @@ define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -42332,7 +42332,7 @@ define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v4i1_v16i1_mask_mem_b:
@@ -42374,7 +42374,7 @@ define zeroext i16 @test_vpcmpultd_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, i32* %_
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -42394,7 +42394,7 @@ define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b:
@@ -42454,7 +42454,7 @@ define zeroext i16 @test_masked_vpcmpultd_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -43193,19 +43193,19 @@ define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -43222,19 +43222,19 @@ define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -43253,20 +43253,20 @@ define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -43286,20 +43286,20 @@ define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -43320,19 +43320,19 @@ define zeroext i16 @test_vpcmpultd_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, i32* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltud (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -43352,20 +43352,20 @@ define zeroext i16 @test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltud (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultd_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -43399,8 +43399,8 @@ define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -43472,7 +43472,7 @@ define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -43547,8 +43547,8 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -43624,7 +43624,7 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -43702,7 +43702,7 @@ define zeroext i32 @test_vpcmpultd_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, i32* %_
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -43778,7 +43778,7 @@ define zeroext i32 @test_masked_vpcmpultd_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -43857,8 +43857,8 @@ define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -43935,7 +43935,7 @@ define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -44015,8 +44015,8 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -44097,7 +44097,7 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovdqa (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -44180,7 +44180,7 @@ define zeroext i64 @test_vpcmpultd_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, i32* %_
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rdi), %ymm1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -44261,7 +44261,7 @@ define zeroext i64 @test_masked_vpcmpultd_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vpbroadcastd (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltud %zmm1, %zmm0, %k0 {%k1}
@@ -46033,7 +46033,7 @@ define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask:
@@ -46059,7 +46059,7 @@ define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46076,7 +46076,7 @@ define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem:
@@ -46102,7 +46102,7 @@ define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46121,7 +46121,7 @@ define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask:
@@ -46157,7 +46157,7 @@ define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask(i8 zeroext %__u, <2 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46178,7 +46178,7 @@ define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem:
@@ -46214,7 +46214,7 @@ define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem(i8 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46236,7 +46236,7 @@ define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v8i1_mask_mem_b:
@@ -46263,7 +46263,7 @@ define zeroext i8 @test_vpcmpultq_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, i64* %__b
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46283,7 +46283,7 @@ define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b:
@@ -46320,7 +46320,7 @@ define zeroext i8 @test_masked_vpcmpultq_v2i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46343,7 +46343,7 @@ define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask:
@@ -46368,7 +46368,7 @@ define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46385,7 +46385,7 @@ define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem:
@@ -46410,7 +46410,7 @@ define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46429,7 +46429,7 @@ define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask:
@@ -46464,7 +46464,7 @@ define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask(i8 zeroext %__u, <2 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46485,7 +46485,7 @@ define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem:
@@ -46520,7 +46520,7 @@ define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46542,7 +46542,7 @@ define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vpcmpultq_v2i1_v16i1_mask_mem_b:
@@ -46568,7 +46568,7 @@ define zeroext i16 @test_vpcmpultq_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, i64* %_
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -46588,7 +46588,7 @@ define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b:
@@ -46624,7 +46624,7 @@ define zeroext i16 @test_masked_vpcmpultq_v2i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47315,7 +47315,7 @@ define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -47359,7 +47359,7 @@ define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47376,7 +47376,7 @@ define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -47420,7 +47420,7 @@ define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47439,7 +47439,7 @@ define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i6
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -47501,7 +47501,7 @@ define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask(i8 zeroext %__u, <4 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47522,7 +47522,7 @@ define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -47584,7 +47584,7 @@ define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem(i8 zeroext %__u, <4
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47606,7 +47606,7 @@ define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -47651,7 +47651,7 @@ define zeroext i8 @test_vpcmpultq_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, i64* %__b
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47671,7 +47671,7 @@ define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -47734,7 +47734,7 @@ define zeroext i8 @test_masked_vpcmpultq_v4i1_v8i1_mask_mem_b(i8 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47757,7 +47757,7 @@ define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -47800,7 +47800,7 @@ define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47817,7 +47817,7 @@ define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -47860,7 +47860,7 @@ define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47879,7 +47879,7 @@ define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -47940,7 +47940,7 @@ define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -47961,7 +47961,7 @@ define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -48022,7 +48022,7 @@ define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -48044,7 +48044,7 @@ define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -48088,7 +48088,7 @@ define zeroext i16 @test_vpcmpultq_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, i64* %_
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -48108,7 +48108,7 @@ define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -48170,7 +48170,7 @@ define zeroext i16 @test_masked_vpcmpultq_v4i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -48933,7 +48933,7 @@ define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -48941,7 +48941,7 @@ define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -48958,7 +48958,7 @@ define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -48966,7 +48966,7 @@ define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpltuq (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -48985,7 +48985,7 @@ define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -48994,7 +48994,7 @@ define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -49014,7 +49014,7 @@ define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -49023,7 +49023,7 @@ define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -49044,7 +49044,7 @@ define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -49052,7 +49052,7 @@ define zeroext i16 @test_vpcmpultq_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, i64* %_
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vpcmpltuq (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -49072,7 +49072,7 @@ define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -49081,7 +49081,7 @@ define zeroext i16 @test_masked_vpcmpultq_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vpcmpltuq (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50026,7 +50026,7 @@ define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask:
@@ -50065,7 +50065,7 @@ define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50082,7 +50082,7 @@ define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem:
@@ -50121,7 +50121,7 @@ define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50139,7 +50139,7 @@ define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, float* %_
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v8i1_mask_mem_b:
@@ -50179,7 +50179,7 @@ define zeroext i8 @test_vcmpoeqps_v4i1_v8i1_mask_mem_b(<2 x i64> %__a, float* %_
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50200,7 +50200,7 @@ define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask(i4 zeroext %__u, <2 x i6
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask:
@@ -50244,7 +50244,7 @@ define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask(i4 zeroext %__u, <2 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50265,7 +50265,7 @@ define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask_mem(i4 zeroext %__u, <2
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem:
@@ -50309,7 +50309,7 @@ define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask_mem(i4 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50331,7 +50331,7 @@ define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b(i4 zeroext %__u, <
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b:
@@ -50376,7 +50376,7 @@ define zeroext i8 @test_masked_vcmpoeqps_v4i1_v8i1_mask_mem_b(i4 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50399,7 +50399,7 @@ define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask:
@@ -50437,7 +50437,7 @@ define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50454,7 +50454,7 @@ define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem:
@@ -50492,7 +50492,7 @@ define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50510,7 +50510,7 @@ define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, float*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to4}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v4i1_v16i1_mask_mem_b:
@@ -50549,7 +50549,7 @@ define zeroext i16 @test_vcmpoeqps_v4i1_v16i1_mask_mem_b(<2 x i64> %__a, float*
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50570,7 +50570,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask(i4 zeroext %__u, <2 x
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask:
@@ -50613,7 +50613,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask(i4 zeroext %__u, <2 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50634,7 +50634,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask_mem(i4 zeroext %__u, <
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem:
@@ -50677,7 +50677,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask_mem(i4 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -50699,7 +50699,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b(i4 zeroext %__u,
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqps (%rsi){1to4}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b:
@@ -50743,7 +50743,7 @@ define zeroext i16 @test_masked_vcmpoeqps_v4i1_v16i1_mask_mem_b(i4 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -51368,19 +51368,19 @@ define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -51397,19 +51397,19 @@ define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -51427,19 +51427,19 @@ define zeroext i16 @test_vcmpoeqps_v8i1_v16i1_mask_mem_b(<4 x i64> %__a, float*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqps (%rdi){1to8}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqps_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -51459,20 +51459,20 @@ define zeroext i16 @test_masked_vcmpoeqps_v8i1_v16i1_mask(i8 zeroext %__u, <4 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -51492,20 +51492,20 @@ define zeroext i16 @test_masked_vcmpoeqps_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -51526,20 +51526,20 @@ define zeroext i16 @test_masked_vcmpoeqps_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqps (%rsi){1to8}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqps_v8i1_v16i1_mask_mem_b:
; NoVLX: # %bb.0: # %entry
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kshiftlw $8, %k0, %k0
; NoVLX-NEXT: kshiftrw $8, %k0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -51574,8 +51574,8 @@ define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -51647,7 +51647,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -51721,7 +51721,7 @@ define zeroext i32 @test_vcmpoeqps_v8i1_v32i1_mask_mem_b(<4 x i64> %__a, float*
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -51797,8 +51797,8 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -51874,7 +51874,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -51952,7 +51952,7 @@ define zeroext i32 @test_masked_vcmpoeqps_v8i1_v32i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $32, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -52032,8 +52032,8 @@ define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
; NoVLX-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
@@ -52110,7 +52110,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovaps (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -52189,7 +52189,7 @@ define zeroext i64 @test_vcmpoeqps_v8i1_v64i1_mask_mem_b(<4 x i64> %__a, float*
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vbroadcastss (%rdi), %ymm1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -52270,8 +52270,8 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask(i8 zeroext %__u, <4 x
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kxorw %k0, %k0, %k1
@@ -52352,7 +52352,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vmovaps (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -52435,7 +52435,7 @@ define zeroext i64 @test_masked_vcmpoeqps_v8i1_v64i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: .cfi_def_cfa_register %rbp
; NoVLX-NEXT: andq $-32, %rsp
; NoVLX-NEXT: subq $64, %rsp
-; NoVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; NoVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; NoVLX-NEXT: vbroadcastss (%rsi), %ymm1
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqps %zmm1, %zmm0, %k0 {%k1}
@@ -54269,7 +54269,7 @@ define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask:
@@ -54292,7 +54292,7 @@ define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask(<2 x i64> %__a, <2 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54309,7 +54309,7 @@ define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem:
@@ -54332,7 +54332,7 @@ define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem(<2 x i64> %__a, <2 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54350,7 +54350,7 @@ define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, double* %
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
@@ -54374,7 +54374,7 @@ define zeroext i8 @test_vcmpoeqpd_v2i1_v8i1_mask_mem_b(<2 x i64> %__a, double* %
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54395,7 +54395,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask(i2 zeroext %__u, <2 x i6
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask:
@@ -54423,7 +54423,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask(i2 zeroext %__u, <2 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54444,7 +54444,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem(i2 zeroext %__u, <2
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem:
@@ -54472,7 +54472,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem(i2 zeroext %__u, <2
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54494,7 +54494,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b(i2 zeroext %__u, <
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b:
@@ -54523,7 +54523,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v2i1_v8i1_mask_mem_b(i2 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54546,7 +54546,7 @@ define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask:
@@ -54568,7 +54568,7 @@ define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask(<2 x i64> %__a, <2 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54585,7 +54585,7 @@ define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem:
@@ -54607,7 +54607,7 @@ define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem(<2 x i64> %__a, <2 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54625,7 +54625,7 @@ define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, double*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to2}, %xmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
@@ -54648,7 +54648,7 @@ define zeroext i16 @test_vcmpoeqpd_v2i1_v16i1_mask_mem_b(<2 x i64> %__a, double*
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54669,7 +54669,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask(i2 zeroext %__u, <2 x
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %xmm1, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask:
@@ -54696,7 +54696,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask(i2 zeroext %__u, <2 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54717,7 +54717,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem(i2 zeroext %__u, <
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem:
@@ -54744,7 +54744,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem(i2 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -54766,7 +54766,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b(i2 zeroext %__u,
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to2}, %xmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: retq
;
; NoVLX-LABEL: test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b:
@@ -54794,7 +54794,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v2i1_v16i1_mask_mem_b(i2 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55419,7 +55419,7 @@ define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -55460,7 +55460,7 @@ define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask(<4 x i64> %__a, <4 x i64> %__b)
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55477,7 +55477,7 @@ define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -55518,7 +55518,7 @@ define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem(<4 x i64> %__a, <4 x i64>*
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55536,7 +55536,7 @@ define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, double* %
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -55578,7 +55578,7 @@ define zeroext i8 @test_vcmpoeqpd_v4i1_v8i1_mask_mem_b(<4 x i64> %__a, double* %
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55599,7 +55599,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask(i4 zeroext %__u, <4 x i6
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -55645,7 +55645,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask(i4 zeroext %__u, <4 x i6
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55666,7 +55666,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem(i4 zeroext %__u, <4
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -55712,7 +55712,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem(i4 zeroext %__u, <4
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55734,7 +55734,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem_b(i4 zeroext %__u, <
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VLX-NEXT: # kill: def %al killed %al killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -55781,7 +55781,7 @@ define zeroext i8 @test_masked_vcmpoeqpd_v4i1_v8i1_mask_mem_b(i4 zeroext %__u, <
; NoVLX-NEXT: vpsllq $63, %zmm2, %zmm0
; NoVLX-NEXT: vptestmq %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %al killed %al killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55804,7 +55804,7 @@ define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -55844,7 +55844,7 @@ define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask(<4 x i64> %__a, <4 x i64> %__
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55861,7 +55861,7 @@ define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -55901,7 +55901,7 @@ define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem(<4 x i64> %__a, <4 x i64>
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55919,7 +55919,7 @@ define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, double*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to4}, %ymm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -55960,7 +55960,7 @@ define zeroext i16 @test_vcmpoeqpd_v4i1_v16i1_mask_mem_b(<4 x i64> %__a, double*
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -55981,7 +55981,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask(i4 zeroext %__u, <4 x
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd %ymm1, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56026,7 +56026,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask(i4 zeroext %__u, <4 x
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -56047,7 +56047,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem(i4 zeroext %__u, <
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi), %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56092,7 +56092,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem(i4 zeroext %__u, <
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -56114,7 +56114,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem_b(i4 zeroext %__u,
; VLX-NEXT: kmovb -{{[0-9]+}}(%rsp), %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to4}, %ymm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56160,7 +56160,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v4i1_v16i1_mask_mem_b(i4 zeroext %__u,
; NoVLX-NEXT: vpslld $31, %zmm2, %zmm0
; NoVLX-NEXT: vptestmd %zmm0, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -56809,7 +56809,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56817,7 +56817,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask(<8 x i64> %__a, <8 x i64> %__
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -56834,7 +56834,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56842,7 +56842,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem(<8 x i64> %__a, <8 x i64>
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi), %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -56860,7 +56860,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, double*
; VLX: # %bb.0: # %entry
; VLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56868,7 +56868,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_mask_mem_b(<8 x i64> %__a, double*
; NoVLX: # %bb.0: # %entry
; NoVLX-NEXT: vcmpeqpd (%rdi){1to8}, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -56888,7 +56888,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56897,7 +56897,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask(i8 zeroext %__u, <8 x
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -56917,7 +56917,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56926,7 +56926,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem(i8 zeroext %__u, <
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd (%rsi), %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -56947,7 +56947,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; VLX-NEXT: kmovd %edi, %k1
; VLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56956,7 +56956,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_mask_mem_b(i8 zeroext %__u,
; NoVLX-NEXT: kmovw %edi, %k1
; NoVLX-NEXT: vcmpeqpd (%rsi){1to8}, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -56980,7 +56980,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_sae_mask(<8 x i64> %__a, <8 x i64>
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: movzbl %al, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -56989,7 +56989,7 @@ define zeroext i16 @test_vcmpoeqpd_v8i1_v16i1_sae_mask(<8 x i64> %__a, <8 x i64>
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzbl %al, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
@@ -57007,7 +57007,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_sae_mask(i8 zeroext %__u, <
; VLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; VLX-NEXT: kmovd %k0, %eax
; VLX-NEXT: movzbl %al, %eax
-; VLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VLX-NEXT: # kill: def %ax killed %ax killed %eax
; VLX-NEXT: vzeroupper
; VLX-NEXT: retq
;
@@ -57017,7 +57017,7 @@ define zeroext i16 @test_masked_vcmpoeqpd_v8i1_v16i1_sae_mask(i8 zeroext %__u, <
; NoVLX-NEXT: vcmplepd {sae}, %zmm1, %zmm0, %k0 {%k1}
; NoVLX-NEXT: kmovw %k0, %eax
; NoVLX-NEXT: movzbl %al, %eax
-; NoVLX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NoVLX-NEXT: # kill: def %ax killed %ax killed %eax
; NoVLX-NEXT: vzeroupper
; NoVLX-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/avx512vl-vec-test-testn.ll b/test/CodeGen/X86/avx512vl-vec-test-testn.ll
index 65f9d9fc3d5..89791abdeea 100644
--- a/test/CodeGen/X86/avx512vl-vec-test-testn.ll
+++ b/test/CodeGen/X86/avx512vl-vec-test-testn.ll
@@ -8,14 +8,14 @@ define zeroext i8 @TEST_mm_test_epi64_mask(<2 x i64> %__A, <2 x i64> %__B) local
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_test_epi64_mask:
; I386: # %bb.0: # %entry
; I386-NEXT: vptestmq %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -31,14 +31,14 @@ define zeroext i8 @TEST_mm_test_epi32_mask(<2 x i64> %__A, <2 x i64> %__B) local
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_test_epi32_mask:
; I386: # %bb.0: # %entry
; I386-NEXT: vptestmd %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -55,7 +55,7 @@ define zeroext i8 @TEST_mm256_test_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) lo
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -63,7 +63,7 @@ define zeroext i8 @TEST_mm256_test_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) lo
; I386: # %bb.0: # %entry
; I386-NEXT: vptestmq %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -80,7 +80,7 @@ define zeroext i8 @TEST_mm256_test_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) lo
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -88,7 +88,7 @@ define zeroext i8 @TEST_mm256_test_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) lo
; I386: # %bb.0: # %entry
; I386-NEXT: vptestmd %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -106,7 +106,7 @@ define zeroext i8 @TEST_mm_mask_test_epi64_mask(i8 %__U, <2 x i64> %__A, <2 x i6
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_test_epi64_mask:
@@ -115,7 +115,7 @@ define zeroext i8 @TEST_mm_mask_test_epi64_mask(i8 %__U, <2 x i64> %__A, <2 x i6
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmq %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -135,7 +135,7 @@ define zeroext i8 @TEST_mm_mask_test_epi32_mask(i8 %__U, <2 x i64> %__A, <2 x i6
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_test_epi32_mask:
@@ -144,7 +144,7 @@ define zeroext i8 @TEST_mm_mask_test_epi32_mask(i8 %__U, <2 x i64> %__A, <2 x i6
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmd %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -166,7 +166,7 @@ define zeroext i8 @TEST_mm256_mask_test_epi64_mask(i8 %__U, <4 x i64> %__A, <4 x
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -176,7 +176,7 @@ define zeroext i8 @TEST_mm256_mask_test_epi64_mask(i8 %__U, <4 x i64> %__A, <4 x
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmq %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -197,7 +197,7 @@ define zeroext i8 @TEST_mm256_mask_test_epi32_mask(i8 %__U, <4 x i64> %__A, <4 x
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -207,7 +207,7 @@ define zeroext i8 @TEST_mm256_mask_test_epi32_mask(i8 %__U, <4 x i64> %__A, <4 x
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestmd %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -226,14 +226,14 @@ define zeroext i8 @TEST_mm_testn_epi64_mask(<2 x i64> %__A, <2 x i64> %__B) loca
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_testn_epi64_mask:
; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -249,14 +249,14 @@ define zeroext i8 @TEST_mm_testn_epi32_mask(<2 x i64> %__A, <2 x i64> %__B) loca
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_testn_epi32_mask:
; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -273,7 +273,7 @@ define zeroext i8 @TEST_mm256_testn_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) l
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -281,7 +281,7 @@ define zeroext i8 @TEST_mm256_testn_epi64_mask(<4 x i64> %__A, <4 x i64> %__B) l
; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -298,7 +298,7 @@ define zeroext i8 @TEST_mm256_testn_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) l
; X86_64: # %bb.0: # %entry
; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -306,7 +306,7 @@ define zeroext i8 @TEST_mm256_testn_epi32_mask(<4 x i64> %__A, <4 x i64> %__B) l
; I386: # %bb.0: # %entry
; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -324,7 +324,7 @@ define zeroext i8 @TEST_mm_mask_testn_epi64_mask(i8 %__U, <2 x i64> %__A, <2 x i
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_testn_epi64_mask:
@@ -333,7 +333,7 @@ define zeroext i8 @TEST_mm_mask_testn_epi64_mask(i8 %__U, <2 x i64> %__A, <2 x i
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmq %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -353,7 +353,7 @@ define zeroext i8 @TEST_mm_mask_testn_epi32_mask(i8 %__U, <2 x i64> %__A, <2 x i
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: retq
;
; I386-LABEL: TEST_mm_mask_testn_epi32_mask:
@@ -362,7 +362,7 @@ define zeroext i8 @TEST_mm_mask_testn_epi32_mask(i8 %__U, <2 x i64> %__A, <2 x i
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmd %xmm0, %xmm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: retl
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -384,7 +384,7 @@ define zeroext i8 @TEST_mm256_mask_testn_epi64_mask(i8 %__U, <4 x i64> %__A, <4
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -394,7 +394,7 @@ define zeroext i8 @TEST_mm256_mask_testn_epi64_mask(i8 %__U, <4 x i64> %__A, <4
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmq %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
@@ -415,7 +415,7 @@ define zeroext i8 @TEST_mm256_mask_testn_epi32_mask(i8 %__U, <4 x i64> %__A, <4
; X86_64-NEXT: kmovw %edi, %k1
; X86_64-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
; X86_64-NEXT: kmovw %k0, %eax
-; X86_64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86_64-NEXT: # kill: def %al killed %al killed %eax
; X86_64-NEXT: vzeroupper
; X86_64-NEXT: retq
;
@@ -425,7 +425,7 @@ define zeroext i8 @TEST_mm256_mask_testn_epi32_mask(i8 %__U, <4 x i64> %__A, <4
; I386-NEXT: kmovw %eax, %k1
; I386-NEXT: vptestnmd %ymm0, %ymm1, %k0 {%k1}
; I386-NEXT: kmovw %k0, %eax
-; I386-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; I386-NEXT: # kill: def %al killed %al killed %eax
; I386-NEXT: vzeroupper
; I386-NEXT: retl
entry:
diff --git a/test/CodeGen/X86/bitcast-and-setcc-128.ll b/test/CodeGen/X86/bitcast-and-setcc-128.ll
index d65c789a2d5..2276e563453 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-128.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-128.ll
@@ -14,7 +14,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
@@ -24,7 +24,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i16:
@@ -38,7 +38,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -47,7 +47,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i16> %a, %b
%x1 = icmp sgt <8 x i16> %c, %d
@@ -63,7 +63,7 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; SSE2-SSSE3-NEXT: pcmpgtd %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
@@ -72,7 +72,7 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
; AVX12-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i32:
@@ -106,7 +106,7 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d)
; SSE2-SSSE3-NEXT: cmpltps %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andps %xmm1, %xmm3
; SSE2-SSSE3-NEXT: movmskps %xmm3, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
@@ -115,7 +115,7 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d)
; AVX12-NEXT: vcmpltps %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f32:
@@ -149,7 +149,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; SSE2-SSSE3-NEXT: pcmpgtb %xmm3, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
@@ -158,7 +158,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; AVX12-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %ax killed %ax killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v16i8:
@@ -172,7 +172,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -181,7 +181,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtb %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <16 x i8> %a, %b
%x1 = icmp sgt <16 x i8> %c, %d
@@ -244,7 +244,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
@@ -273,7 +273,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
@@ -302,7 +302,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b, <2 x i8> %c, <2 x i8> %d) {
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i8:
@@ -399,7 +399,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
@@ -428,7 +428,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
@@ -457,7 +457,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) {
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i16:
@@ -546,7 +546,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm3, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
@@ -571,7 +571,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
@@ -596,7 +596,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) {
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i32:
@@ -665,7 +665,7 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskpd %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
@@ -674,7 +674,7 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
; AVX12-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2i64:
@@ -708,7 +708,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double>
; SSE2-SSSE3-NEXT: cmpltpd %xmm2, %xmm3
; SSE2-SSSE3-NEXT: andpd %xmm1, %xmm3
; SSE2-SSSE3-NEXT: movmskpd %xmm3, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
@@ -717,7 +717,7 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double>
; AVX12-NEXT: vcmpltpd %xmm2, %xmm3, %xmm1
; AVX12-NEXT: vandpd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2f64:
@@ -759,7 +759,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
@@ -776,7 +776,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b, <4 x i8> %c, <4 x i8> %d) {
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i8:
@@ -834,7 +834,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
@@ -851,7 +851,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b, <4 x i16> %c, <4 x i16> %d) {
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i16:
@@ -910,7 +910,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
@@ -928,7 +928,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; AVX12-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i8:
@@ -950,7 +950,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -967,7 +967,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k1
; AVX512BW-NEXT: vpcmpgtw %xmm3, %xmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i8> %a, %b
%x1 = icmp sgt <8 x i8> %c, %d
diff --git a/test/CodeGen/X86/bitcast-and-setcc-256.ll b/test/CodeGen/X86/bitcast-and-setcc-256.ll
index 9cb5750d2cb..fdce65516e3 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -54,7 +54,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
; SSE2-SSSE3-NEXT: andps %xmm0, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v4i64:
@@ -71,7 +71,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; AVX1-NEXT: vpackssdw %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskps %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -85,7 +85,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskps %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -126,7 +126,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
; SSE2-SSSE3-NEXT: andps %xmm2, %xmm6
; SSE2-SSSE3-NEXT: movmskps %xmm6, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f64:
@@ -139,7 +139,7 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
; AVX12-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -180,7 +180,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; SSE2-SSSE3-NEXT: packsswb %xmm5, %xmm4
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm4
; SSE2-SSSE3-NEXT: pmovmskb %xmm4, %eax
-; SSE2-SSSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v16i16:
@@ -197,7 +197,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -211,7 +211,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -226,7 +226,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -235,7 +235,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtw %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <16 x i16> %a, %b
@@ -257,7 +257,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm4
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm4
; SSE2-SSSE3-NEXT: pmovmskb %xmm4, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v8i32:
@@ -275,7 +275,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -290,7 +290,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -299,7 +299,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512F-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -308,7 +308,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k1
; AVX512BW-NEXT: vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i32> %a, %b
@@ -330,7 +330,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm6
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm6
; SSE2-SSSE3-NEXT: pmovmskb %xmm6, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8f32:
@@ -344,7 +344,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -353,7 +353,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512F-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -362,7 +362,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k1
; AVX512BW-NEXT: vcmpltps %ymm2, %ymm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <8 x float> %a, %b
diff --git a/test/CodeGen/X86/bitcast-and-setcc-512.ll b/test/CodeGen/X86/bitcast-and-setcc-512.ll
index 79ef2cc13a8..fa59c744d2e 100644
--- a/test/CodeGen/X86/bitcast-and-setcc-512.ll
+++ b/test/CodeGen/X86/bitcast-and-setcc-512.ll
@@ -41,7 +41,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; SSE-NEXT: psraw $15, %xmm8
; SSE-NEXT: packsswb %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE-NEXT: # kill: def %al killed %al killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v8i64:
@@ -76,7 +76,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -104,7 +104,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -113,7 +113,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -122,7 +122,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <8 x i64> %a, %b
@@ -168,7 +168,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
; SSE-NEXT: psraw $15, %xmm8
; SSE-NEXT: packsswb %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE-NEXT: # kill: def %al killed %al killed %eax
; SSE-NEXT: retq
;
; AVX12-LABEL: v8f64:
@@ -195,7 +195,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -204,7 +204,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -213,7 +213,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vcmpltpd %zmm2, %zmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <8 x double> %a, %b
@@ -634,7 +634,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; SSE-NEXT: packsswb %xmm10, %xmm8
; SSE-NEXT: pand %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v16i32:
@@ -663,7 +663,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -685,7 +685,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; AVX2-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -694,7 +694,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -703,7 +703,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = icmp sgt <16 x i32> %a, %b
@@ -736,7 +736,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; SSE-NEXT: packsswb %xmm10, %xmm8
; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX12-LABEL: v16f32:
@@ -757,7 +757,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; AVX12-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %ax killed %ax killed %eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -766,7 +766,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512F-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -775,7 +775,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k1
; AVX512BW-NEXT: vcmpltps %zmm2, %zmm3, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x0 = fcmp ogt <16 x float> %a, %b
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
index 92a4ebc8051..dcddb8e8264 100644
--- a/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
+++ b/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll
@@ -12,7 +12,7 @@
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -24,7 +24,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
;
; AVX1-LABEL: ext_i2_2i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -34,7 +34,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
;
; AVX2-LABEL: ext_i2_2i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -49,7 +49,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = bitcast i2 %a0 to <2 x i1>
@@ -93,7 +93,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
@@ -197,7 +197,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -215,7 +215,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
;
; AVX1-LABEL: ext_i4_4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -232,7 +232,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
;
; AVX2-LABEL: ext_i4_4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8]
@@ -247,7 +247,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
%2 = sext <4 x i1> %1 to <4 x i64>
@@ -422,7 +422,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -452,7 +452,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
;
; AVX1-LABEL: ext_i8_8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
@@ -476,7 +476,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
;
; AVX2-LABEL: ext_i8_8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8]
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
index 360ee6b15be..f88b540323c 100644
--- a/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
+++ b/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll
@@ -13,7 +13,7 @@
define <2 x i64> @ext_i2_2i64(i2 %a0) {
; SSE2-SSSE3-LABEL: ext_i2_2i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -26,7 +26,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
;
; AVX1-LABEL: ext_i2_2i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -37,7 +37,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
;
; AVX2-LABEL: ext_i2_2i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -53,7 +53,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; AVX512F-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -64,7 +64,7 @@ define <2 x i64> @ext_i2_2i64(i2 %a0) {
; AVX512VLBW-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512VLBW-NEXT: kmovd %eax, %k1
; AVX512VLBW-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512VLBW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512VLBW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%1 = bitcast i2 %a0 to <2 x i1>
@@ -110,7 +110,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; AVX512F-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -121,7 +121,7 @@ define <4 x i32> @ext_i4_4i32(i4 %a0) {
; AVX512VLBW-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512VLBW-NEXT: kmovd %eax, %k1
; AVX512VLBW-NEXT: vpbroadcastd {{.*}}(%rip), %ymm0 {%k1} {z}
-; AVX512VLBW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VLBW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VLBW-NEXT: vzeroupper
; AVX512VLBW-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
@@ -167,7 +167,7 @@ define <8 x i16> @ext_i8_8i16(i8 %a0) {
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -253,7 +253,7 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) {
define <4 x i64> @ext_i4_4i64(i4 %a0) {
; SSE2-SSSE3-LABEL: ext_i4_4i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -273,7 +273,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
;
; AVX1-LABEL: ext_i4_4i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
@@ -292,7 +292,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
;
; AVX2-LABEL: ext_i4_4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,4,8]
@@ -308,7 +308,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; AVX512F-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i4_4i64:
@@ -318,7 +318,7 @@ define <4 x i64> @ext_i4_4i64(i4 %a0) {
; AVX512VLBW-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512VLBW-NEXT: kmovd %eax, %k1
; AVX512VLBW-NEXT: vpbroadcastq {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512VLBW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512VLBW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512VLBW-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
%2 = zext <4 x i1> %1 to <4 x i64>
@@ -373,7 +373,7 @@ define <8 x i32> @ext_i8_8i32(i8 %a0) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: kmovw %edi, %k1
; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512VLBW-LABEL: ext_i8_8i32:
@@ -550,7 +550,7 @@ define <32 x i8> @ext_i32_32i8(i32 %a0) {
define <8 x i64> @ext_i8_8i64(i8 %a0) {
; SSE2-SSSE3-LABEL: ext_i8_8i64:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -584,7 +584,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
;
; AVX1-LABEL: ext_i8_8i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
@@ -612,7 +612,7 @@ define <8 x i64> @ext_i8_8i64(i8 %a0) {
;
; AVX2-LABEL: ext_i8_8i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %ymm1
; AVX2-NEXT: vmovdqa {{.*#+}} ymm0 = [1,2,4,8]
diff --git a/test/CodeGen/X86/bitcast-int-to-vector-bool.ll b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
index 50893949544..6d9f832d861 100644
--- a/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
+++ b/test/CodeGen/X86/bitcast-int-to-vector-bool.ll
@@ -8,7 +8,7 @@
define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; SSE2-SSSE3-LABEL: bitcast_i2_2i1:
; SSE2-SSSE3: # %bb.0:
-; SSE2-SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-SSSE3-NEXT: movq %rdi, %xmm0
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
; SSE2-SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [1,2]
@@ -21,7 +21,7 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
;
; AVX1-LABEL: bitcast_i2_2i1:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
; AVX1-NEXT: vmovq %rdi, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -32,7 +32,7 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
;
; AVX2-LABEL: bitcast_i2_2i1:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
; AVX2-NEXT: vmovq %rdi, %xmm0
; AVX2-NEXT: vpbroadcastq %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2]
@@ -47,7 +47,7 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) {
; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = bitcast i2 %a0 to <2 x i1>
@@ -92,7 +92,7 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) {
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = bitcast i4 %a0 to <4 x i1>
diff --git a/test/CodeGen/X86/bitcast-int-to-vector.ll b/test/CodeGen/X86/bitcast-int-to-vector.ll
index 42383e6b9fe..64fcf3d4944 100644
--- a/test/CodeGen/X86/bitcast-int-to-vector.ll
+++ b/test/CodeGen/X86/bitcast-int-to-vector.ll
@@ -10,7 +10,7 @@ define i1 @foo(i64 %a) {
; X86-NEXT: flds {{[0-9]+}}(%esp)
; X86-NEXT: fucompp
; X86-NEXT: fnstsw %ax
-; X86-NEXT: # kill: %ah<def> %ah<kill> %ax<kill>
+; X86-NEXT: # kill: def %ah killed %ah killed %ax
; X86-NEXT: sahf
; X86-NEXT: setp %al
; X86-NEXT: retl
diff --git a/test/CodeGen/X86/bitcast-setcc-128.ll b/test/CodeGen/X86/bitcast-setcc-128.ll
index 4cd31137839..7d0381837b7 100644
--- a/test/CodeGen/X86/bitcast-setcc-128.ll
+++ b/test/CodeGen/X86/bitcast-setcc-128.ll
@@ -12,7 +12,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i16:
@@ -20,7 +20,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i16:
@@ -30,7 +30,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -38,7 +38,7 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i16> %a, %b
%res = bitcast <8 x i1> %x to i8
@@ -50,14 +50,14 @@ define i4 @v4i32(<4 x i32> %a, <4 x i32> %b) {
; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i32:
; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i32:
@@ -85,14 +85,14 @@ define i4 @v4f32(<4 x float> %a, <4 x float> %b) {
; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltps %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f32:
; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4f32:
@@ -120,14 +120,14 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: pcmpgtb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v16i8:
; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %ax killed %ax killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v16i8:
@@ -137,7 +137,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -145,7 +145,7 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtb %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: retq
%x = icmp sgt <16 x i8> %a, %b
%res = bitcast <16 x i1> %x to i16
@@ -181,7 +181,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i8:
@@ -198,7 +198,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i8:
@@ -215,7 +215,7 @@ define i2 @v2i8(<2 x i8> %a, <2 x i8> %b) {
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i8:
@@ -275,7 +275,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i16:
@@ -292,7 +292,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i16:
@@ -309,7 +309,7 @@ define i2 @v2i16(<2 x i16> %a, <2 x i16> %b) {
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i16:
@@ -365,7 +365,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v2i32:
@@ -380,7 +380,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovmskpd %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: retq
;
; AVX2-LABEL: v2i32:
@@ -395,7 +395,7 @@ define i2 @v2i32(<2 x i32> %a, <2 x i32> %b) {
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
; AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovmskpd %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: retq
;
; AVX512F-LABEL: v2i32:
@@ -441,14 +441,14 @@ define i2 @v2i64(<2 x i64> %a, <2 x i64> %b) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2i64:
; AVX12: # %bb.0:
; AVX12-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2i64:
@@ -476,14 +476,14 @@ define i2 @v2f64(<2 x double> %a, <2 x double> %b) {
; SSE2-SSSE3: # %bb.0:
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm1
; SSE2-SSSE3-NEXT: movmskpd %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v2f64:
; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0
; AVX12-NEXT: vmovmskpd %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v2f64:
@@ -515,7 +515,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
; SSE2-SSSE3-NEXT: psrad $24, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i8:
@@ -526,7 +526,7 @@ define i4 @v4i8(<4 x i8> %a, <4 x i8> %b) {
; AVX12-NEXT: vpsrad $24, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i8:
@@ -566,7 +566,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
; SSE2-SSSE3-NEXT: psrad $16, %xmm0
; SSE2-SSSE3-NEXT: pcmpgtd %xmm1, %xmm0
; SSE2-SSSE3-NEXT: movmskps %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4i16:
@@ -577,7 +577,7 @@ define i4 @v4i16(<4 x i16> %a, <4 x i16> %b) {
; AVX12-NEXT: vpsrad $16, %xmm0, %xmm0
; AVX12-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vmovmskps %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v4i16:
@@ -618,7 +618,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; SSE2-SSSE3-NEXT: pcmpgtw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8i8:
@@ -630,7 +630,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; AVX12-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: retq
;
; AVX512F-LABEL: v8i8:
@@ -644,7 +644,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -656,7 +656,7 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b) {
; AVX512BW-NEXT: vpsraw $8, %xmm0, %xmm0
; AVX512BW-NEXT: vpcmpgtw %xmm1, %xmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i8> %a, %b
%res = bitcast <8 x i1> %x to i8
diff --git a/test/CodeGen/X86/bitcast-setcc-256.ll b/test/CodeGen/X86/bitcast-setcc-256.ll
index a0a8a2e3ad7..48e28c9d26c 100644
--- a/test/CodeGen/X86/bitcast-setcc-256.ll
+++ b/test/CodeGen/X86/bitcast-setcc-256.ll
@@ -13,7 +13,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; SSE2-SSSE3-NEXT: pcmpgtw %xmm2, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm1, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %ax killed %ax killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v16i16:
@@ -24,7 +24,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -34,7 +34,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -45,7 +45,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -53,7 +53,7 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <16 x i16> %a, %b
@@ -69,7 +69,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; SSE2-SSSE3-NEXT: packssdw %xmm1, %xmm0
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm0
; SSE2-SSSE3-NEXT: pmovmskb %xmm0, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v8i32:
@@ -80,7 +80,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -88,7 +88,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -96,7 +96,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -104,7 +104,7 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %ymm1, %ymm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i32> %a, %b
@@ -120,14 +120,14 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2
; SSE2-SSSE3-NEXT: packsswb %xmm0, %xmm2
; SSE2-SSSE3-NEXT: pmovmskb %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v8f32:
; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vmovmskps %ymm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
@@ -135,7 +135,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -143,7 +143,7 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %ymm0, %ymm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <8 x float> %a, %b
@@ -244,7 +244,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
; SSE2-SSSE3-NEXT: por %xmm0, %xmm1
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm1
; SSE2-SSSE3-NEXT: movmskps %xmm1, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX1-LABEL: v4i64:
@@ -255,7 +255,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovmskpd %ymm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -263,7 +263,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vmovmskpd %ymm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -296,14 +296,14 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b) {
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2
; SSE2-SSSE3-NEXT: packssdw %xmm3, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-SSSE3-NEXT: # kill: def %al killed %al killed %eax
; SSE2-SSSE3-NEXT: retq
;
; AVX12-LABEL: v4f64:
; AVX12: # %bb.0:
; AVX12-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX12-NEXT: vmovmskpd %ymm0, %eax
-; AVX12-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX12-NEXT: # kill: def %al killed %al killed %eax
; AVX12-NEXT: vzeroupper
; AVX12-NEXT: retq
;
diff --git a/test/CodeGen/X86/bitcast-setcc-512.ll b/test/CodeGen/X86/bitcast-setcc-512.ll
index 7779fa00e5f..371aef00c8d 100644
--- a/test/CodeGen/X86/bitcast-setcc-512.ll
+++ b/test/CodeGen/X86/bitcast-setcc-512.ll
@@ -228,7 +228,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: packsswb %xmm2, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v16i32:
@@ -245,7 +245,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -258,7 +258,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -266,7 +266,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -274,7 +274,7 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <16 x i32> %a, %b
@@ -293,7 +293,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; SSE-NEXT: packssdw %xmm5, %xmm4
; SSE-NEXT: packsswb %xmm6, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v16f32:
@@ -306,7 +306,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -319,7 +319,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -327,7 +327,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -335,7 +335,7 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <16 x float> %a, %b
@@ -1047,7 +1047,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; SSE-NEXT: packssdw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm0, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
-; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE-NEXT: # kill: def %al killed %al killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v8i64:
@@ -1064,7 +1064,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1075,7 +1075,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1083,7 +1083,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1091,7 +1091,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = icmp sgt <8 x i64> %a, %b
@@ -1111,7 +1111,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; SSE-NEXT: packssdw %xmm6, %xmm4
; SSE-NEXT: packsswb %xmm0, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %eax
-; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE-NEXT: # kill: def %al killed %al killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: v8f64:
@@ -1124,7 +1124,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1135,7 +1135,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovmskps %ymm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1143,7 +1143,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1151,7 +1151,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512BW-NEXT: # kill: def %al killed %al killed %eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%x = fcmp ogt <8 x double> %a, %b
diff --git a/test/CodeGen/X86/bitreverse.ll b/test/CodeGen/X86/bitreverse.ll
index ffb93b4453c..a393db30c9f 100644
--- a/test/CodeGen/X86/bitreverse.ll
+++ b/test/CodeGen/X86/bitreverse.ll
@@ -46,8 +46,8 @@ define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
; X86-NEXT: andl $43690, %ecx # imm = 0xAAAA
; X86-NEXT: shrl %ecx
; X86-NEXT: leal (%ecx,%edx,2), %edx
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
-; X86-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
+; X86-NEXT: # kill: def %dx killed %dx killed %edx
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_v2i16:
@@ -191,7 +191,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
;
; X64-LABEL: test_bitreverse_i32:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: bswapl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
@@ -242,7 +242,7 @@ define i24 @test_bitreverse_i24(i24 %a) nounwind {
;
; X64-LABEL: test_bitreverse_i24:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: bswapl %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
@@ -289,12 +289,12 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; X86-NEXT: andl $43690, %eax # imm = 0xAAAA
; X86-NEXT: shrl %eax
; X86-NEXT: leal (%eax,%ecx,2), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_bitreverse_i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: rolw $8, %di
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $3855, %eax # imm = 0xF0F
@@ -312,7 +312,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; X64-NEXT: andl $43690, %eax # imm = 0xAAAA
; X64-NEXT: shrl %eax
; X64-NEXT: leal (%rax,%rcx,2), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%b = call i16 @llvm.bitreverse.i16(i16 %a)
ret i16 %b
diff --git a/test/CodeGen/X86/bmi-schedule.ll b/test/CodeGen/X86/bmi-schedule.ll
index 58ed1dc8565..59817ad504a 100644
--- a/test/CodeGen/X86/bmi-schedule.ll
+++ b/test/CodeGen/X86/bmi-schedule.ll
@@ -14,7 +14,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; GENERIC-NEXT: notl %edi # sched: [1:0.33]
; GENERIC-NEXT: andw (%rdx), %di # sched: [6:0.50]
; GENERIC-NEXT: addl %edi, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_andn_i16:
@@ -23,7 +23,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; HASWELL-NEXT: notl %edi # sched: [1:0.25]
; HASWELL-NEXT: andw (%rdx), %di # sched: [1:0.50]
; HASWELL-NEXT: addl %edi, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_andn_i16:
@@ -32,7 +32,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; BROADWELL-NEXT: notl %edi # sched: [1:0.25]
; BROADWELL-NEXT: andw (%rdx), %di # sched: [6:0.50]
; BROADWELL-NEXT: addl %edi, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_andn_i16:
@@ -41,7 +41,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; SKYLAKE-NEXT: notl %edi # sched: [1:0.25]
; SKYLAKE-NEXT: andw (%rdx), %di # sched: [6:0.50]
; SKYLAKE-NEXT: addl %edi, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_andn_i16:
@@ -50,7 +50,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; BTVER2-NEXT: notl %edi # sched: [1:0.50]
; BTVER2-NEXT: andw (%rdx), %di # sched: [4:1.00]
; BTVER2-NEXT: addl %edi, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_andn_i16:
@@ -59,7 +59,7 @@ define i16 @test_andn_i16(i16 zeroext %a0, i16 zeroext %a1, i16 *%a2) {
; ZNVER1-NEXT: notl %edi # sched: [1:0.25]
; ZNVER1-NEXT: andw (%rdx), %di # sched: [5:0.50]
; ZNVER1-NEXT: addl %edi, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a2
%2 = xor i16 %a0, -1
@@ -581,7 +581,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; GENERIC-NEXT: tzcntw (%rsi), %cx
; GENERIC-NEXT: tzcntw %di, %ax
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_cttz_i16:
@@ -589,7 +589,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; HASWELL-NEXT: tzcntw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_cttz_i16:
@@ -597,7 +597,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; BROADWELL-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_cttz_i16:
@@ -605,7 +605,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; SKYLAKE-NEXT: tzcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: tzcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_cttz_i16:
@@ -613,7 +613,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; BTVER2-NEXT: tzcntw (%rsi), %cx
; BTVER2-NEXT: tzcntw %di, %ax
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_cttz_i16:
@@ -621,7 +621,7 @@ define i16 @test_cttz_i16(i16 zeroext %a0, i16 *%a1) {
; ZNVER1-NEXT: tzcntw (%rsi), %cx # sched: [6:0.50]
; ZNVER1-NEXT: tzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.cttz.i16( i16 %1, i1 false )
diff --git a/test/CodeGen/X86/bmi.ll b/test/CodeGen/X86/bmi.ll
index 66c76131ba6..b855b89183b 100644
--- a/test/CodeGen/X86/bmi.ll
+++ b/test/CodeGen/X86/bmi.ll
@@ -13,7 +13,7 @@ define i8 @t1(i8 %x) {
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $256, %eax # imm = 0x100
; CHECK-NEXT: tzcntl %eax, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 false )
ret i8 %tmp
@@ -61,7 +61,7 @@ define i8 @t5(i8 %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: tzcntl %eax, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%tmp = tail call i8 @llvm.cttz.i8( i8 %x, i1 true )
ret i8 %tmp
@@ -516,7 +516,7 @@ define i32 @bzhi32d(i32 %a, i32 %b) {
; BMI1-NEXT: movl $32, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movl $-1, %eax
-; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
; BMI1-NEXT: shrl %cl, %eax
; BMI1-NEXT: andl %edi, %eax
; BMI1-NEXT: retq
@@ -538,7 +538,7 @@ define i32 @bzhi32e(i32 %a, i32 %b) {
; BMI1-NEXT: movl $32, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shll %cl, %edi
-; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
; BMI1-NEXT: shrl %cl, %edi
; BMI1-NEXT: movl %edi, %eax
; BMI1-NEXT: retq
@@ -566,7 +566,7 @@ define i64 @bzhi64b(i64 %x, i8 zeroext %index) {
;
; BMI2-LABEL: bzhi64b:
; BMI2: # %bb.0: # %entry
-; BMI2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BMI2-NEXT: # kill: def %esi killed %esi def %rsi
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
@@ -583,7 +583,7 @@ define i64 @bzhi64c(i64 %a, i64 %b) {
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movq $-1, %rax
-; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
; BMI1-NEXT: shrq %cl, %rax
; BMI1-NEXT: andq %rdi, %rax
; BMI1-NEXT: retq
@@ -605,14 +605,14 @@ define i64 @bzhi64d(i64 %a, i32 %b) {
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: movq $-1, %rax
-; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
; BMI1-NEXT: shrq %cl, %rax
; BMI1-NEXT: andq %rdi, %rax
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64d:
; BMI2: # %bb.0: # %entry
-; BMI2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BMI2-NEXT: # kill: def %esi killed %esi def %rsi
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
@@ -629,7 +629,7 @@ define i64 @bzhi64e(i64 %a, i64 %b) {
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shlq %cl, %rdi
-; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
; BMI1-NEXT: shrq %cl, %rdi
; BMI1-NEXT: movq %rdi, %rax
; BMI1-NEXT: retq
@@ -651,14 +651,14 @@ define i64 @bzhi64f(i64 %a, i32 %b) {
; BMI1-NEXT: movl $64, %ecx
; BMI1-NEXT: subl %esi, %ecx
; BMI1-NEXT: shlq %cl, %rdi
-; BMI1-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; BMI1-NEXT: # kill: def %cl killed %cl killed %ecx
; BMI1-NEXT: shrq %cl, %rdi
; BMI1-NEXT: movq %rdi, %rax
; BMI1-NEXT: retq
;
; BMI2-LABEL: bzhi64f:
; BMI2: # %bb.0: # %entry
-; BMI2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; BMI2-NEXT: # kill: def %esi killed %esi def %rsi
; BMI2-NEXT: bzhiq %rsi, %rdi, %rax
; BMI2-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/bool-simplify.ll b/test/CodeGen/X86/bool-simplify.ll
index fbd39d27d89..87929ad3325 100644
--- a/test/CodeGen/X86/bool-simplify.ll
+++ b/test/CodeGen/X86/bool-simplify.ll
@@ -55,7 +55,7 @@ define i16 @rnd16(i16 %arg) nounwind {
; CHECK-NEXT: rdrandw %cx
; CHECK-NEXT: cmovbw %di, %ax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%1 = tail call { i16, i32 } @llvm.x86.rdrand.16() nounwind
%2 = extractvalue { i16, i32 } %1, 0
@@ -107,7 +107,7 @@ define i16 @seed16(i16 %arg) nounwind {
; CHECK-NEXT: rdseedw %cx
; CHECK-NEXT: cmovbw %di, %ax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%1 = tail call { i16, i32 } @llvm.x86.rdseed.16() nounwind
%2 = extractvalue { i16, i32 } %1, 0
diff --git a/test/CodeGen/X86/bool-vector.ll b/test/CodeGen/X86/bool-vector.ll
index 7a4af821de5..ec9e42fcece 100644
--- a/test/CodeGen/X86/bool-vector.ll
+++ b/test/CodeGen/X86/bool-vector.ll
@@ -138,10 +138,10 @@ define i32 @PR15215_good(<4 x i32> %input) {
;
; X64-LABEL: PR15215_good:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; X64-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %ecx killed %ecx def %rcx
+; X64-NEXT: # kill: def %edx killed %edx def %rdx
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: andl $1, %edi
; X64-NEXT: andl $1, %esi
; X64-NEXT: andl $1, %edx
diff --git a/test/CodeGen/X86/broadcastm-lowering.ll b/test/CodeGen/X86/broadcastm-lowering.ll
index af1a7bf33fa..8548d8b7677 100644
--- a/test/CodeGen/X86/broadcastm-lowering.ll
+++ b/test/CodeGen/X86/broadcastm-lowering.ll
@@ -106,8 +106,8 @@ entry:
define <8 x i64> @test_mm512_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-LABEL: test_mm512_epi64:
; AVX512CD: # %bb.0: # %entry
-; AVX512CD-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512CD-NEXT: vpbroadcastmb2q %k0, %zmm0
; AVX512CD-NEXT: retq
@@ -140,8 +140,8 @@ entry:
define <4 x i64> @test_mm256_epi64(<8 x i32> %a, <8 x i32> %b) {
; AVX512CD-LABEL: test_mm256_epi64:
; AVX512CD: # %bb.0: # %entry
-; AVX512CD-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512CD-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512CD-NEXT: kmovw %k0, %eax
; AVX512CD-NEXT: vpxor %xmm0, %xmm0, %xmm0
diff --git a/test/CodeGen/X86/bypass-slow-division-32.ll b/test/CodeGen/X86/bypass-slow-division-32.ll
index 6677ccfbaf8..a3a07519b3e 100644
--- a/test/CodeGen/X86/bypass-slow-division-32.ll
+++ b/test/CodeGen/X86/bypass-slow-division-32.ll
@@ -17,7 +17,7 @@ define i32 @Test_get_quotient(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB0_1:
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; CHECK-NEXT: # kill: def %eax killed %eax def %ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
@@ -41,7 +41,7 @@ define i32 @Test_get_remainder(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB1_1:
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; CHECK-NEXT: # kill: def %eax killed %eax def %ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %ah, %eax # NOREX
; CHECK-NEXT: retl
@@ -65,7 +65,7 @@ define i32 @Test_get_quotient_and_remainder(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: retl
; CHECK-NEXT: .LBB2_1:
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; CHECK-NEXT: # kill: def %eax killed %eax def %ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %ah, %edx # NOREX
; CHECK-NEXT: movzbl %al, %eax
@@ -103,14 +103,14 @@ define i32 @Test_use_div_and_idiv(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: jmp .LBB3_6
; CHECK-NEXT: .LBB3_1:
; CHECK-NEXT: movzbl %cl, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; CHECK-NEXT: # kill: def %eax killed %eax def %ax
; CHECK-NEXT: divb %bl
; CHECK-NEXT: movzbl %al, %esi
; CHECK-NEXT: testl $-256, %edi
; CHECK-NEXT: jne .LBB3_5
; CHECK-NEXT: .LBB3_4:
; CHECK-NEXT: movzbl %cl, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; CHECK-NEXT: # kill: def %eax killed %eax def %ax
; CHECK-NEXT: divb %bl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: .LBB3_6:
@@ -208,7 +208,7 @@ define i32 @Test_use_div_imm_reg(i32 %a) nounwind {
; CHECK-NEXT: .LBB8_1:
; CHECK-NEXT: movb $4, %al
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; CHECK-NEXT: # kill: def %eax killed %eax def %ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
@@ -230,7 +230,7 @@ define i32 @Test_use_rem_imm_reg(i32 %a) nounwind {
; CHECK-NEXT: .LBB9_1:
; CHECK-NEXT: movb $4, %al
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; CHECK-NEXT: # kill: def %eax killed %eax def %ax
; CHECK-NEXT: divb %cl
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: retl
diff --git a/test/CodeGen/X86/bypass-slow-division-64.ll b/test/CodeGen/X86/bypass-slow-division-64.ll
index 33789c93bcc..cf5cd70ac4f 100644
--- a/test/CodeGen/X86/bypass-slow-division-64.ll
+++ b/test/CodeGen/X86/bypass-slow-division-64.ll
@@ -20,7 +20,7 @@ define i64 @Test_get_quotient(i64 %a, i64 %b) nounwind {
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divl %esi
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<def>
+; CHECK-NEXT: # kill: def %eax killed %eax def %rax
; CHECK-NEXT: retq
%result = sdiv i64 %a, %b
ret i64 %result
@@ -43,7 +43,7 @@ define i64 @Test_get_remainder(i64 %a, i64 %b) nounwind {
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divl %esi
-; CHECK-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
+; CHECK-NEXT: # kill: def %edx killed %edx def %rdx
; CHECK-NEXT: movq %rdx, %rax
; CHECK-NEXT: retq
%result = srem i64 %a, %b
@@ -67,8 +67,8 @@ define i64 @Test_get_quotient_and_remainder(i64 %a, i64 %b) nounwind {
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: divl %esi
-; CHECK-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<def>
+; CHECK-NEXT: # kill: def %edx killed %edx def %rdx
+; CHECK-NEXT: # kill: def %eax killed %eax def %rax
; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: retq
%resultdiv = sdiv i64 %a, %b
diff --git a/test/CodeGen/X86/clz.ll b/test/CodeGen/X86/clz.ll
index 5f58e79a94e..bd63a8006e5 100644
--- a/test/CodeGen/X86/clz.ll
+++ b/test/CodeGen/X86/clz.ll
@@ -19,28 +19,28 @@ define i8 @cttz_i8(i8 %x) {
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8:
; X32-CLZ: # %bb.0:
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8:
; X64-CLZ: # %bb.0:
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: tzcntl %eax, %eax
-; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X64-CLZ-NEXT: retq
%tmp = call i8 @llvm.cttz.i8( i8 %x, i1 true )
ret i8 %tmp
@@ -144,7 +144,7 @@ define i8 @ctlz_i8(i8 %x) {
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8:
@@ -152,7 +152,7 @@ define i8 @ctlz_i8(i8 %x) {
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8:
@@ -160,7 +160,7 @@ define i8 @ctlz_i8(i8 %x) {
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8:
@@ -168,7 +168,7 @@ define i8 @ctlz_i8(i8 %x) {
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
-; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X64-CLZ-NEXT: retq
%tmp2 = call i8 @llvm.ctlz.i8( i8 %x, i1 true )
ret i8 %tmp2
@@ -179,14 +179,14 @@ define i16 @ctlz_i16(i16 %x) {
; X32: # %bb.0:
; X32-NEXT: bsrw {{[0-9]+}}(%esp), %ax
; X32-NEXT: xorl $15, %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i16:
; X64: # %bb.0:
; X64-NEXT: bsrw %di, %ax
; X64-NEXT: xorl $15, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i16:
@@ -286,11 +286,11 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
; X32-NEXT: .LBB8_1:
; X32-NEXT: movb $8, %al
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8_zero_test:
@@ -301,11 +301,11 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
; X64-NEXT: .LBB8_1:
; X64-NEXT: movb $8, %al
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8_zero_test:
@@ -313,7 +313,7 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_zero_test:
@@ -321,7 +321,7 @@ define i8 @ctlz_i8_zero_test(i8 %n) {
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
-; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X64-CLZ-NEXT: retq
%tmp1 = call i8 @llvm.ctlz.i8(i8 %n, i1 false)
ret i8 %tmp1
@@ -337,11 +337,11 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: bsrw %ax, %ax
; X32-NEXT: xorl $15, %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
; X32-NEXT: .LBB9_1:
; X32-NEXT: movw $16, %ax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i16_zero_test:
@@ -351,11 +351,11 @@ define i16 @ctlz_i16_zero_test(i16 %n) {
; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: bsrw %di, %ax
; X64-NEXT: xorl $15, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
; X64-NEXT: .LBB9_1:
; X64-NEXT: movw $16, %ax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i16_zero_test:
@@ -480,11 +480,11 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; X32-NEXT: # %bb.2: # %cond.false
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
; X32-NEXT: .LBB12_1
; X32-NEXT: movb $8, %al
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8_zero_test:
@@ -494,11 +494,11 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; X64-NEXT: # %bb.2: # %cond.false
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
; X64-NEXT: .LBB12_1:
; X64-NEXT: movb $8, %al
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8_zero_test:
@@ -506,7 +506,7 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; X32-CLZ-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-CLZ-NEXT: orl $256, %eax # imm = 0x100
; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_zero_test:
@@ -514,7 +514,7 @@ define i8 @cttz_i8_zero_test(i8 %n) {
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: orl $256, %eax # imm = 0x100
; X64-CLZ-NEXT: tzcntl %eax, %eax
-; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X64-CLZ-NEXT: retq
%tmp1 = call i8 @llvm.cttz.i8(i8 %n, i1 false)
ret i8 %tmp1
@@ -786,7 +786,7 @@ define i8 @cttz_i8_knownbits(i8 %x) {
; X32-NEXT: orb $2, %al
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsfl %eax, %eax
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: cttz_i8_knownbits:
@@ -794,7 +794,7 @@ define i8 @cttz_i8_knownbits(i8 %x) {
; X64-NEXT: orb $2, %dil
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsfl %eax, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: cttz_i8_knownbits:
@@ -803,7 +803,7 @@ define i8 @cttz_i8_knownbits(i8 %x) {
; X32-CLZ-NEXT: orb $2, %al
; X32-CLZ-NEXT: movzbl %al, %eax
; X32-CLZ-NEXT: tzcntl %eax, %eax
-; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: cttz_i8_knownbits:
@@ -811,7 +811,7 @@ define i8 @cttz_i8_knownbits(i8 %x) {
; X64-CLZ-NEXT: orb $2, %dil
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: tzcntl %eax, %eax
-; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X64-CLZ-NEXT: retq
%x2 = or i8 %x, 2
%tmp = call i8 @llvm.cttz.i8(i8 %x2, i1 true )
@@ -827,7 +827,7 @@ define i8 @ctlz_i8_knownbits(i8 %x) {
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: bsrl %eax, %eax
; X32-NEXT: xorl $7, %eax
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: ctlz_i8_knownbits:
@@ -836,7 +836,7 @@ define i8 @ctlz_i8_knownbits(i8 %x) {
; X64-NEXT: movzbl %dil, %eax
; X64-NEXT: bsrl %eax, %eax
; X64-NEXT: xorl $7, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
;
; X32-CLZ-LABEL: ctlz_i8_knownbits:
@@ -846,7 +846,7 @@ define i8 @ctlz_i8_knownbits(i8 %x) {
; X32-CLZ-NEXT: movzbl %al, %eax
; X32-CLZ-NEXT: lzcntl %eax, %eax
; X32-CLZ-NEXT: addl $-24, %eax
-; X32-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X32-CLZ-NEXT: retl
;
; X64-CLZ-LABEL: ctlz_i8_knownbits:
@@ -855,7 +855,7 @@ define i8 @ctlz_i8_knownbits(i8 %x) {
; X64-CLZ-NEXT: movzbl %dil, %eax
; X64-CLZ-NEXT: lzcntl %eax, %eax
; X64-CLZ-NEXT: addl $-24, %eax
-; X64-CLZ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-CLZ-NEXT: # kill: def %al killed %al killed %eax
; X64-CLZ-NEXT: retq
%x2 = or i8 %x, 64
diff --git a/test/CodeGen/X86/cmov-into-branch.ll b/test/CodeGen/X86/cmov-into-branch.ll
index 4c1b2bcb162..c18a9ca7459 100644
--- a/test/CodeGen/X86/cmov-into-branch.ll
+++ b/test/CodeGen/X86/cmov-into-branch.ll
@@ -65,7 +65,7 @@ define i32 @test5(i32 %a, i32* nocapture %b, i32 %x, i32 %y) {
define void @test6(i32 %a, i32 %x, i32* %y.ptr, i64* %z.ptr) {
; CHECK-LABEL: test6:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: cmovnsl (%rdx), %esi
; CHECK-NEXT: movq %rsi, (%rcx)
diff --git a/test/CodeGen/X86/cmov-promotion.ll b/test/CodeGen/X86/cmov-promotion.ll
index 1021a5b5716..8e34b62eadb 100644
--- a/test/CodeGen/X86/cmov-promotion.ll
+++ b/test/CodeGen/X86/cmov-promotion.ll
@@ -12,7 +12,7 @@ define i16 @cmov_zpromotion_8_to_16(i1 %c) {
; CMOV-NEXT: movb $-19, %al
; CMOV-NEXT: .LBB0_2:
; CMOV-NEXT: movzbl %al, %eax
-; CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CMOV-NEXT: # kill: def %ax killed %ax killed %eax
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_zpromotion_8_to_16:
@@ -24,7 +24,7 @@ define i16 @cmov_zpromotion_8_to_16(i1 %c) {
; NO_CMOV-NEXT: movb $-19, %al
; NO_CMOV-NEXT: .LBB0_2:
; NO_CMOV-NEXT: movzbl %al, %eax
-; NO_CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NO_CMOV-NEXT: # kill: def %ax killed %ax killed %eax
; NO_CMOV-NEXT: retl
%t0 = select i1 %c, i8 117, i8 -19
%ret = zext i8 %t0 to i16
@@ -167,7 +167,7 @@ define i16 @cmov_spromotion_8_to_16(i1 %c) {
; CMOV-NEXT: movb $-19, %al
; CMOV-NEXT: .LBB6_2:
; CMOV-NEXT: movsbl %al, %eax
-; CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CMOV-NEXT: # kill: def %ax killed %ax killed %eax
; CMOV-NEXT: retq
;
; NO_CMOV-LABEL: cmov_spromotion_8_to_16:
@@ -179,7 +179,7 @@ define i16 @cmov_spromotion_8_to_16(i1 %c) {
; NO_CMOV-NEXT: movb $-19, %al
; NO_CMOV-NEXT: .LBB6_2:
; NO_CMOV-NEXT: movsbl %al, %eax
-; NO_CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; NO_CMOV-NEXT: # kill: def %ax killed %ax killed %eax
; NO_CMOV-NEXT: retl
%t0 = select i1 %c, i8 117, i8 -19
%ret = sext i8 %t0 to i16
diff --git a/test/CodeGen/X86/cmov.ll b/test/CodeGen/X86/cmov.ll
index 0495b74c962..e860a59806e 100644
--- a/test/CodeGen/X86/cmov.ll
+++ b/test/CodeGen/X86/cmov.ll
@@ -83,7 +83,7 @@ define i1 @test4() nounwind {
; CHECK-NEXT: shrb $7, %al
; CHECK-NEXT: movzbl %al, %ecx
; CHECK-NEXT: xorl $1, %ecx
-; CHECK-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; CHECK-NEXT: # kill: def %cl killed %cl killed %ecx
; CHECK-NEXT: sarl %cl, %edx
; CHECK-NEXT: movb {{.*}}(%rip), %al
; CHECK-NEXT: testb %al, %al
diff --git a/test/CodeGen/X86/cmovcmov.ll b/test/CodeGen/X86/cmovcmov.ll
index 3d0a60f1a31..98a7eb7db0f 100644
--- a/test/CodeGen/X86/cmovcmov.ll
+++ b/test/CodeGen/X86/cmovcmov.ll
@@ -227,8 +227,8 @@ attributes #0 = { nounwind }
; The following test failed because llvm had a bug where a structure like:
;
-; %12<def> = CMOV_GR8 %7, %11 ... (lt)
-; %13<def> = CMOV_GR8 %12, %11 ... (gt)
+; %12 = CMOV_GR8 %7, %11 ... (lt)
+; %13 = CMOV_GR8 %12, %11 ... (gt)
;
; was lowered to:
;
diff --git a/test/CodeGen/X86/coalescer-dce.ll b/test/CodeGen/X86/coalescer-dce.ll
index d97d11c6695..90a07720e65 100644
--- a/test/CodeGen/X86/coalescer-dce.ll
+++ b/test/CodeGen/X86/coalescer-dce.ll
@@ -4,19 +4,19 @@ target triple = "x86_64-apple-macosx10.7.0"
; This test case has a sub-register join followed by a remat:
;
-; 256L %2<def> = COPY %7:sub_32bit<kill>; GR32:%2 GR64:%7
+; 256L %2 = COPY killed %7:sub_32bit; GR32:%2 GR64:%7
; Considering merging %2 with %7:sub_32bit
; Cross-class to GR64.
; RHS = %2 = [256d,272d:0) 0@256d
; LHS = %7 = [208d,256d:0)[304L,480L:0) 0@208d
-; updated: 272L %0<def> = COPY %7:sub_32bit<kill>; GR32:%0 GR64:%7
+; updated: 272L %0 = COPY killed %7:sub_32bit; GR32:%0 GR64:%7
; Joined. Result = %7 = [208d,272d:0)[304L,480L:0) 0@208d
;
-; 272L %10:sub_32bit<def> = COPY %7:sub_32bit<kill>, %10<imp-def>; GR64:%10,%7
+; 272L %10:sub_32bit = COPY killed %7:sub_32bit, implicit-def %10; GR64:%10,%7
; Considering merging %7 with %10
; RHS = %7 = [208d,272d:0)[304L,480L:0) 0@208d
; LHS = %10 = [16d,64L:2)[64L,160L:1)[192L,240L:1)[272d,304L:3)[304L,352d:1)[352d,400d:0)[400d,400S:4) 0@352d 1@64L-phidef 2@16d-phikill 3@272d-phikill 4@400d
-; Remat: %10<def> = MOV64r0 %10<imp-def>, %eflags<imp-def,dead>, %10<imp-def>; GR64:%10
+; Remat: %10 = MOV64r0 implicit-def %10, implicit dead %eflags, implicit-def %10; GR64:%10
; Shrink: %7 = [208d,272d:0)[304L,480L:0) 0@208d
; live-in at 240L
; live-in at 416L
diff --git a/test/CodeGen/X86/combine-abs.ll b/test/CodeGen/X86/combine-abs.ll
index 3ecec6ecfa0..dd867538092 100644
--- a/test/CodeGen/X86/combine-abs.ll
+++ b/test/CodeGen/X86/combine-abs.ll
@@ -77,9 +77,9 @@ define <4 x i64> @combine_v4i64_abs_abs(<4 x i64> %a) {
;
; AVX512F-LABEL: combine_v4i64_abs_abs:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpabsq %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: combine_v4i64_abs_abs:
diff --git a/test/CodeGen/X86/compress_expand.ll b/test/CodeGen/X86/compress_expand.ll
index 4e90eccfc62..14b41094109 100644
--- a/test/CodeGen/X86/compress_expand.ll
+++ b/test/CodeGen/X86/compress_expand.ll
@@ -72,11 +72,11 @@ define <4 x float> @test4(float* %base, <4 x float> %src0) {
;
; KNL-LABEL: test4:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL-NEXT: movw $7, %ax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: retq
%res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> %src0)
ret <4 x float>%res
@@ -92,11 +92,11 @@ define <2 x i64> @test5(i64* %base, <2 x i64> %src0) {
;
; KNL-LABEL: test5:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL-NEXT: movb $2, %al
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpexpandq (%rdi), %zmm0 {%k1}
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: retq
%res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> <i1 false, i1 true>, <2 x i64> %src0)
ret <2 x i64>%res
@@ -137,7 +137,7 @@ define void @test7(float* %base, <8 x float> %V, <8 x i1> %mask) {
;
; KNL-LABEL: test7:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpmovsxwq %xmm1, %zmm1
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
@@ -198,7 +198,7 @@ define void @test10(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
;
; KNL-LABEL: test10:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -221,7 +221,7 @@ define void @test11(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
;
; KNL-LABEL: test11:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL-NEXT: vpsraq $63, %zmm1, %zmm1
; KNL-NEXT: vmovdqa %xmm1, %xmm1
@@ -243,7 +243,7 @@ define void @test12(float* %base, <4 x float> %V, <4 x i1> %mask) {
;
; KNL-LABEL: test12:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL-NEXT: vmovdqa %xmm1, %xmm1
@@ -266,7 +266,7 @@ define <2 x float> @test13(float* %base, <2 x float> %src0, <2 x i32> %trigger)
;
; KNL-LABEL: test13:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
@@ -275,7 +275,7 @@ define <2 x float> @test13(float* %base, <2 x float> %src0, <2 x i32> %trigger)
; KNL-NEXT: vpslld $31, %zmm1, %zmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
%res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0)
@@ -293,7 +293,7 @@ define void @test14(float* %base, <2 x float> %V, <2 x i32> %trigger) {
;
; KNL-LABEL: test14:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vpcmpeqq %xmm2, %xmm1, %xmm1
diff --git a/test/CodeGen/X86/critical-edge-split-2.ll b/test/CodeGen/X86/critical-edge-split-2.ll
index 39c713ef71b..4ebfddf0316 100644
--- a/test/CodeGen/X86/critical-edge-split-2.ll
+++ b/test/CodeGen/X86/critical-edge-split-2.ll
@@ -25,7 +25,7 @@ define i16 @test1(i1 zeroext %C, i8** nocapture %argv) nounwind ssp {
; CHECK-NEXT: divl %esi
; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: .LBB0_2: # %cond.end.i
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
entry:
br i1 %C, label %cond.end.i, label %cond.false.i
diff --git a/test/CodeGen/X86/ctpop-combine.ll b/test/CodeGen/X86/ctpop-combine.ll
index d09f46c164e..40dc6c46487 100644
--- a/test/CodeGen/X86/ctpop-combine.ll
+++ b/test/CodeGen/X86/ctpop-combine.ll
@@ -55,7 +55,7 @@ define i8 @test4(i8 %x) nounwind readnone {
; CHECK: # %bb.0:
; CHECK-NEXT: andl $127, %edi
; CHECK-NEXT: popcntl %edi, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
%x2 = and i8 %x, 127
%count = tail call i8 @llvm.ctpop.i8(i8 %x2)
diff --git a/test/CodeGen/X86/dagcombine-cse.ll b/test/CodeGen/X86/dagcombine-cse.ll
index 043dbcabcbf..544407e184a 100644
--- a/test/CodeGen/X86/dagcombine-cse.ll
+++ b/test/CodeGen/X86/dagcombine-cse.ll
@@ -19,8 +19,8 @@ define i32 @t(i8* %ref_frame_ptr, i32 %ref_frame_stride, i32 %idxX, i32 %idxY) n
;
; X64-LABEL: t:
; X64: ## %bb.0: ## %entry
-; X64-NEXT: ## kill: %edx<def> %edx<kill> %rdx<def>
-; X64-NEXT: ## kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: ## kill: def %edx killed %edx def %rdx
+; X64-NEXT: ## kill: def %esi killed %esi def %rsi
; X64-NEXT: imull %ecx, %esi
; X64-NEXT: leal (%rsi,%rdx), %eax
; X64-NEXT: cltq
diff --git a/test/CodeGen/X86/divide-by-constant.ll b/test/CodeGen/X86/divide-by-constant.ll
index 3121f6b2cb0..f1322dd6145 100644
--- a/test/CodeGen/X86/divide-by-constant.ll
+++ b/test/CodeGen/X86/divide-by-constant.ll
@@ -8,14 +8,14 @@ define zeroext i16 @test1(i16 zeroext %x) nounwind {
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $63551, %eax, %eax # imm = 0xF83F
; X32-NEXT: shrl $21, %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test1:
; X64: # %bb.0: # %entry
; X64-NEXT: imull $63551, %edi, %eax # imm = 0xF83F
; X64-NEXT: shrl $21, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
entry:
%div = udiv i16 %x, 33
@@ -28,14 +28,14 @@ define zeroext i16 @test2(i8 signext %x, i16 zeroext %c) nounwind readnone ssp n
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: imull $43691, %eax, %eax # imm = 0xAAAB
; X32-NEXT: shrl $17, %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
; X64-NEXT: imull $43691, %esi, %eax # imm = 0xAAAB
; X64-NEXT: shrl $17, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
entry:
%div = udiv i16 %c, 3
@@ -50,7 +50,7 @@ define zeroext i8 @test3(i8 zeroext %x, i8 zeroext %c) nounwind readnone ssp nor
; X32-NEXT: imull $171, %eax, %eax
; X32-NEXT: shrl $9, %eax
; X32-NEXT: movzwl %ax, %eax
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test3:
@@ -58,7 +58,7 @@ define zeroext i8 @test3(i8 zeroext %x, i8 zeroext %c) nounwind readnone ssp nor
; X64-NEXT: imull $171, %esi, %eax
; X64-NEXT: shrl $9, %eax
; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
entry:
%div = udiv i8 %c, 3
@@ -74,7 +74,7 @@ define signext i16 @test4(i16 signext %x) nounwind {
; X32-NEXT: shrl $31, %ecx
; X32-NEXT: shrl $16, %eax
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test4:
@@ -84,7 +84,7 @@ define signext i16 @test4(i16 signext %x) nounwind {
; X64-NEXT: shrl $31, %ecx
; X64-NEXT: shrl $16, %eax
; X64-NEXT: addl %ecx, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
entry:
%div = sdiv i16 %x, 33 ; <i32> [#uses=1]
@@ -105,7 +105,7 @@ define i32 @test5(i32 %A) nounwind {
; X64-NEXT: movl %edi, %eax
; X64-NEXT: imulq $365384439, %rax, %rax # imm = 0x15C752F7
; X64-NEXT: shrq $59, %rax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-NEXT: # kill: def %eax killed %eax killed %rax
; X64-NEXT: retq
%tmp1 = udiv i32 %A, 1577682821 ; <i32> [#uses=1]
ret i32 %tmp1
@@ -120,7 +120,7 @@ define signext i16 @test6(i16 signext %x) nounwind {
; X32-NEXT: shrl $31, %ecx
; X32-NEXT: sarl $18, %eax
; X32-NEXT: addl %ecx, %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test6:
@@ -130,7 +130,7 @@ define signext i16 @test6(i16 signext %x) nounwind {
; X64-NEXT: shrl $31, %ecx
; X64-NEXT: sarl $18, %eax
; X64-NEXT: addl %ecx, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
entry:
%div = sdiv i16 %x, 10
@@ -149,11 +149,11 @@ define i32 @test7(i32 %x) nounwind {
;
; X64-LABEL: test7:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: shrl $2, %edi
; X64-NEXT: imulq $613566757, %rdi, %rax # imm = 0x24924925
; X64-NEXT: shrq $32, %rax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-NEXT: # kill: def %eax killed %eax killed %rax
; X64-NEXT: retq
%div = udiv i32 %x, 28
ret i32 %div
@@ -169,7 +169,7 @@ define i8 @test8(i8 %x) nounwind {
; X32-NEXT: imull $211, %eax, %eax
; X32-NEXT: shrl $13, %eax
; X32-NEXT: movzwl %ax, %eax
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test8:
@@ -179,7 +179,7 @@ define i8 @test8(i8 %x) nounwind {
; X64-NEXT: imull $211, %eax, %eax
; X64-NEXT: shrl $13, %eax
; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%div = udiv i8 %x, 78
ret i8 %div
@@ -194,7 +194,7 @@ define i8 @test9(i8 %x) nounwind {
; X32-NEXT: imull $71, %eax, %eax
; X32-NEXT: shrl $11, %eax
; X32-NEXT: movzwl %ax, %eax
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test9:
@@ -204,7 +204,7 @@ define i8 @test9(i8 %x) nounwind {
; X64-NEXT: imull $71, %eax, %eax
; X64-NEXT: shrl $11, %eax
; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%div = udiv i8 %x, 116
ret i8 %div
diff --git a/test/CodeGen/X86/divrem.ll b/test/CodeGen/X86/divrem.ll
index a43d3e51745..6648d34aa0f 100644
--- a/test/CodeGen/X86/divrem.ll
+++ b/test/CodeGen/X86/divrem.ll
@@ -262,7 +262,7 @@ define void @ui8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind {
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X32-NEXT: # kill: def %eax killed %eax def %ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %ebx # NOREX
; X32-NEXT: movb %al, (%edx)
@@ -273,7 +273,7 @@ define void @ui8(i8 %x, i8 %y, i8* %p, i8* %q) nounwind {
; X64-LABEL: ui8:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X64-NEXT: # kill: def %eax killed %eax def %ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %esi # NOREX
; X64-NEXT: movb %al, (%rdx)
diff --git a/test/CodeGen/X86/divrem8_ext.ll b/test/CodeGen/X86/divrem8_ext.ll
index eaa22c1a77c..8b6590141e1 100644
--- a/test/CodeGen/X86/divrem8_ext.ll
+++ b/test/CodeGen/X86/divrem8_ext.ll
@@ -6,7 +6,7 @@ define zeroext i8 @test_udivrem_zext_ah(i8 %x, i8 %y) {
; X32-LABEL: test_udivrem_zext_ah:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X32-NEXT: # kill: def %eax killed %eax def %ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %ecx # NOREX
; X32-NEXT: movb %al, z
@@ -16,7 +16,7 @@ define zeroext i8 @test_udivrem_zext_ah(i8 %x, i8 %y) {
; X64-LABEL: test_udivrem_zext_ah:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X64-NEXT: # kill: def %eax killed %eax def %ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %ecx # NOREX
; X64-NEXT: movb %al, {{.*}}(%rip)
@@ -32,19 +32,19 @@ define zeroext i8 @test_urem_zext_ah(i8 %x, i8 %y) {
; X32-LABEL: test_urem_zext_ah:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X32-NEXT: # kill: def %eax killed %eax def %ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %eax # NOREX
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test_urem_zext_ah:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X64-NEXT: # kill: def %eax killed %eax def %ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax # NOREX
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%1 = urem i8 %x, %y
ret i8 %1
@@ -55,21 +55,21 @@ define i8 @test_urem_noext_ah(i8 %x, i8 %y) {
; X32: # %bb.0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %cl
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X32-NEXT: # kill: def %eax killed %eax def %ax
; X32-NEXT: divb %cl
; X32-NEXT: movzbl %ah, %eax # NOREX
; X32-NEXT: addb %cl, %al
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test_urem_noext_ah:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X64-NEXT: # kill: def %eax killed %eax def %ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax # NOREX
; X64-NEXT: addb %sil, %al
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%1 = urem i8 %x, %y
%2 = add i8 %1, %y
@@ -80,7 +80,7 @@ define i64 @test_urem_zext64_ah(i8 %x, i8 %y) {
; X32-LABEL: test_urem_zext64_ah:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X32-NEXT: # kill: def %eax killed %eax def %ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %eax # NOREX
; X32-NEXT: xorl %edx, %edx
@@ -89,7 +89,7 @@ define i64 @test_urem_zext64_ah(i8 %x, i8 %y) {
; X64-LABEL: test_urem_zext64_ah:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X64-NEXT: # kill: def %eax killed %eax def %ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax # NOREX
; X64-NEXT: retq
@@ -131,7 +131,7 @@ define signext i8 @test_srem_sext_ah(i8 %x, i8 %y) {
; X32-NEXT: cbtw
; X32-NEXT: idivb {{[0-9]+}}(%esp)
; X32-NEXT: movsbl %ah, %eax # NOREX
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test_srem_sext_ah:
@@ -140,7 +140,7 @@ define signext i8 @test_srem_sext_ah(i8 %x, i8 %y) {
; X64-NEXT: cbtw
; X64-NEXT: idivb %sil
; X64-NEXT: movsbl %ah, %eax # NOREX
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%1 = srem i8 %x, %y
ret i8 %1
@@ -155,7 +155,7 @@ define i8 @test_srem_noext_ah(i8 %x, i8 %y) {
; X32-NEXT: idivb %cl
; X32-NEXT: movsbl %ah, %eax # NOREX
; X32-NEXT: addb %cl, %al
-; X32-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-NEXT: # kill: def %al killed %al killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test_srem_noext_ah:
@@ -165,7 +165,7 @@ define i8 @test_srem_noext_ah(i8 %x, i8 %y) {
; X64-NEXT: idivb %sil
; X64-NEXT: movsbl %ah, %eax # NOREX
; X64-NEXT: addb %sil, %al
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%1 = srem i8 %x, %y
%2 = add i8 %1, %y
@@ -200,7 +200,7 @@ define i64 @pr25754(i8 %a, i8 %c) {
; X32-LABEL: pr25754:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X32-NEXT: # kill: def %eax killed %eax def %ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %ah, %ecx # NOREX
; X32-NEXT: movzbl %al, %eax
@@ -211,7 +211,7 @@ define i64 @pr25754(i8 %a, i8 %c) {
; X64-LABEL: pr25754:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X64-NEXT: # kill: def %eax killed %eax def %ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %ecx # NOREX
; X64-NEXT: movzbl %al, %eax
diff --git a/test/CodeGen/X86/extractelement-index.ll b/test/CodeGen/X86/extractelement-index.ll
index 3a8e3b356b0..4d24a15fe2e 100644
--- a/test/CodeGen/X86/extractelement-index.ll
+++ b/test/CodeGen/X86/extractelement-index.ll
@@ -13,19 +13,19 @@ define i8 @extractelement_v16i8_1(<16 x i8> %a) nounwind {
; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-NEXT: # kill: def %al killed %al killed %eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_1:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $1, %xmm0, %eax
-; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE41-NEXT: # kill: def %al killed %al killed %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_1:
; AVX: # %bb.0:
; AVX-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX-NEXT: # kill: def %al killed %al killed %eax
; AVX-NEXT: retq
%b = extractelement <16 x i8> %a, i256 1
ret i8 %b
@@ -36,19 +36,19 @@ define i8 @extractelement_v16i8_11(<16 x i8> %a) nounwind {
; SSE2: # %bb.0:
; SSE2-NEXT: pextrw $5, %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-NEXT: # kill: def %al killed %al killed %eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_11:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $11, %xmm0, %eax
-; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE41-NEXT: # kill: def %al killed %al killed %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_11:
; AVX: # %bb.0:
; AVX-NEXT: vpextrb $11, %xmm0, %eax
-; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX-NEXT: # kill: def %al killed %al killed %eax
; AVX-NEXT: retq
%b = extractelement <16 x i8> %a, i256 11
ret i8 %b
@@ -58,19 +58,19 @@ define i8 @extractelement_v16i8_14(<16 x i8> %a) nounwind {
; SSE2-LABEL: extractelement_v16i8_14:
; SSE2: # %bb.0:
; SSE2-NEXT: pextrw $7, %xmm0, %eax
-; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-NEXT: # kill: def %al killed %al killed %eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v16i8_14:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $14, %xmm0, %eax
-; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE41-NEXT: # kill: def %al killed %al killed %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v16i8_14:
; AVX: # %bb.0:
; AVX-NEXT: vpextrb $14, %xmm0, %eax
-; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX-NEXT: # kill: def %al killed %al killed %eax
; AVX-NEXT: retq
%b = extractelement <16 x i8> %a, i256 14
ret i8 %b
@@ -81,19 +81,19 @@ define i8 @extractelement_v32i8_1(<32 x i8> %a) nounwind {
; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-NEXT: # kill: def %al killed %al killed %eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_1:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $1, %xmm0, %eax
-; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE41-NEXT: # kill: def %al killed %al killed %eax
; SSE41-NEXT: retq
;
; AVX-LABEL: extractelement_v32i8_1:
; AVX: # %bb.0:
; AVX-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX-NEXT: # kill: def %al killed %al killed %eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%b = extractelement <32 x i8> %a, i256 1
@@ -105,20 +105,20 @@ define i8 @extractelement_v32i8_17(<32 x i8> %a) nounwind {
; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: shrl $8, %eax
-; SSE2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE2-NEXT: # kill: def %al killed %al killed %eax
; SSE2-NEXT: retq
;
; SSE41-LABEL: extractelement_v32i8_17:
; SSE41: # %bb.0:
; SSE41-NEXT: pextrb $1, %xmm1, %eax
-; SSE41-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE41-NEXT: # kill: def %al killed %al killed %eax
; SSE41-NEXT: retq
;
; AVX1-LABEL: extractelement_v32i8_17:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpextrb $1, %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -126,7 +126,7 @@ define i8 @extractelement_v32i8_17(<32 x i8> %a) nounwind {
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpextrb $1, %xmm0, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%b = extractelement <32 x i8> %a, i256 17
@@ -137,13 +137,13 @@ define i16 @extractelement_v8i16_0(<8 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v8i16_0:
; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i16_0:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-NEXT: # kill: def %ax killed %ax killed %eax
; AVX-NEXT: retq
%b = extractelement <8 x i16> %a, i256 0
ret i16 %b
@@ -153,13 +153,13 @@ define i16 @extractelement_v8i16_3(<8 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v8i16_3:
; SSE: # %bb.0:
; SSE-NEXT: pextrw $3, %xmm0, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v8i16_3:
; AVX: # %bb.0:
; AVX-NEXT: vpextrw $3, %xmm0, %eax
-; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-NEXT: # kill: def %ax killed %ax killed %eax
; AVX-NEXT: retq
%b = extractelement <8 x i16> %a, i256 3
ret i16 %b
@@ -169,13 +169,13 @@ define i16 @extractelement_v16i16_0(<16 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v16i16_0:
; SSE: # %bb.0:
; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX-LABEL: extractelement_v16i16_0:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-NEXT: # kill: def %ax killed %ax killed %eax
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%b = extractelement <16 x i16> %a, i256 0
@@ -186,14 +186,14 @@ define i16 @extractelement_v16i16_13(<16 x i16> %a, i256 %i) nounwind {
; SSE-LABEL: extractelement_v16i16_13:
; SSE: # %bb.0:
; SSE-NEXT: pextrw $5, %xmm1, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: extractelement_v16i16_13:
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vpextrw $5, %xmm0, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -201,7 +201,7 @@ define i16 @extractelement_v16i16_13(<16 x i16> %a, i256 %i) nounwind {
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpextrw $5, %xmm0, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%b = extractelement <16 x i16> %a, i256 13
diff --git a/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll b/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
index 3c4fc8c9baa..47cd1ba95bc 100644
--- a/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/f16c-intrinsics-fast-isel.ll
@@ -43,7 +43,7 @@ define i16 @test_cvtss_sh(float %a0) nounwind {
; X32-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X32-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X32-NEXT: vmovd %xmm0, %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test_cvtss_sh:
@@ -52,7 +52,7 @@ define i16 @test_cvtss_sh(float %a0) nounwind {
; X64-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64-NEXT: vcvtps2ph $0, %xmm0, %xmm0
; X64-NEXT: vmovd %xmm0, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%ins0 = insertelement <4 x float> undef, float %a0, i32 0
%ins1 = insertelement <4 x float> %ins0, float 0.000000e+00, i32 1
diff --git a/test/CodeGen/X86/fast-isel-cmp.ll b/test/CodeGen/X86/fast-isel-cmp.ll
index 991ecfd5e99..355e6eb1b1e 100644
--- a/test/CodeGen/X86/fast-isel-cmp.ll
+++ b/test/CodeGen/X86/fast-isel-cmp.ll
@@ -10,7 +10,7 @@ define zeroext i1 @fcmp_oeq(float %x, float %y) {
; SDAG-NEXT: cmpeqss %xmm1, %xmm0
; SDAG-NEXT: movd %xmm0, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SDAG-NEXT: ## kill: def %al killed %al killed %eax
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oeq:
@@ -354,7 +354,7 @@ define zeroext i1 @fcmp_une(float %x, float %y) {
; SDAG-NEXT: cmpneqss %xmm1, %xmm0
; SDAG-NEXT: movd %xmm0, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SDAG-NEXT: ## kill: def %al killed %al killed %eax
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_une:
@@ -594,7 +594,7 @@ define zeroext i1 @fcmp_oeq3(float %x) {
; SDAG-NEXT: cmpeqss %xmm0, %xmm1
; SDAG-NEXT: movd %xmm1, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SDAG-NEXT: ## kill: def %al killed %al killed %eax
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_oeq3:
@@ -1249,7 +1249,7 @@ define zeroext i1 @fcmp_une3(float %x) {
; SDAG-NEXT: cmpneqss %xmm0, %xmm1
; SDAG-NEXT: movd %xmm1, %eax
; SDAG-NEXT: andl $1, %eax
-; SDAG-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; SDAG-NEXT: ## kill: def %al killed %al killed %eax
; SDAG-NEXT: retq
;
; FAST_NOAVX-LABEL: fcmp_une3:
diff --git a/test/CodeGen/X86/fast-isel-sext-zext.ll b/test/CodeGen/X86/fast-isel-sext-zext.ll
index a99a46dc40f..92344a5c1a3 100644
--- a/test/CodeGen/X86/fast-isel-sext-zext.ll
+++ b/test/CodeGen/X86/fast-isel-sext-zext.ll
@@ -30,7 +30,7 @@ define i16 @test2(i16 %x) nounwind {
; X32-NEXT: andb $1, %al
; X32-NEXT: negb %al
; X32-NEXT: movsbl %al, %eax
-; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: ## kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
@@ -39,7 +39,7 @@ define i16 @test2(i16 %x) nounwind {
; X64-NEXT: andb $1, %dil
; X64-NEXT: negb %dil
; X64-NEXT: movsbl %dil, %eax
-; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%z = trunc i16 %x to i1
@@ -116,7 +116,7 @@ define i16 @test6(i16 %x) nounwind {
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
; X32-NEXT: movzbl %al, %eax
-; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: ## kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
@@ -124,7 +124,7 @@ define i16 @test6(i16 %x) nounwind {
; X64: ## %bb.0:
; X64-NEXT: andb $1, %dil
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%z = trunc i16 %x to i1
@@ -176,14 +176,14 @@ define i16 @test9(i8 %x) nounwind {
; X32-LABEL: test9:
; X32: ## %bb.0:
; X32-NEXT: movsbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: ## kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test9:
; X64: ## %bb.0:
; X64-NEXT: movsbl %dil, %eax
-; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%u = sext i8 %x to i16
@@ -228,14 +228,14 @@ define i16 @test12(i8 %x) nounwind {
; X32-LABEL: test12:
; X32: ## %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: ## kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
; X32-NEXT: ## -- End function
;
; X64-LABEL: test12:
; X64: ## %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
; X64-NEXT: ## -- End function
%u = zext i8 %x to i16
diff --git a/test/CodeGen/X86/fast-isel-shift.ll b/test/CodeGen/X86/fast-isel-shift.ll
index ff6858c2b79..5d416e18260 100644
--- a/test/CodeGen/X86/fast-isel-shift.ll
+++ b/test/CodeGen/X86/fast-isel-shift.ll
@@ -16,7 +16,7 @@ define i16 @shl_i16(i16 %a, i16 %b) {
; CHECK-LABEL: shl_i16:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %cl<def> %cx<kill>
+; CHECK-NEXT: ## kill: def %cl killed %cx
; CHECK-NEXT: shlw %cl, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -28,7 +28,7 @@ define i32 @shl_i32(i32 %a, i32 %b) {
; CHECK-LABEL: shl_i32:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %cl<def> %ecx<kill>
+; CHECK-NEXT: ## kill: def %cl killed %ecx
; CHECK-NEXT: shll %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -40,7 +40,7 @@ define i64 @shl_i64(i64 %a, i64 %b) {
; CHECK-LABEL: shl_i64:
; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rcx
-; CHECK-NEXT: ## kill: %cl<def> %rcx<kill>
+; CHECK-NEXT: ## kill: def %cl killed %rcx
; CHECK-NEXT: shlq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -63,7 +63,7 @@ define i16 @lshr_i16(i16 %a, i16 %b) {
; CHECK-LABEL: lshr_i16:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %cl<def> %cx<kill>
+; CHECK-NEXT: ## kill: def %cl killed %cx
; CHECK-NEXT: shrw %cl, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -75,7 +75,7 @@ define i32 @lshr_i32(i32 %a, i32 %b) {
; CHECK-LABEL: lshr_i32:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %cl<def> %ecx<kill>
+; CHECK-NEXT: ## kill: def %cl killed %ecx
; CHECK-NEXT: shrl %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -87,7 +87,7 @@ define i64 @lshr_i64(i64 %a, i64 %b) {
; CHECK-LABEL: lshr_i64:
; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rcx
-; CHECK-NEXT: ## kill: %cl<def> %rcx<kill>
+; CHECK-NEXT: ## kill: def %cl killed %rcx
; CHECK-NEXT: shrq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -110,7 +110,7 @@ define i16 @ashr_i16(i16 %a, i16 %b) {
; CHECK-LABEL: ashr_i16:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %cl<def> %cx<kill>
+; CHECK-NEXT: ## kill: def %cl killed %cx
; CHECK-NEXT: sarw %cl, %di
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -122,7 +122,7 @@ define i32 @ashr_i32(i32 %a, i32 %b) {
; CHECK-LABEL: ashr_i32:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
-; CHECK-NEXT: ## kill: %cl<def> %ecx<kill>
+; CHECK-NEXT: ## kill: def %cl killed %ecx
; CHECK-NEXT: sarl %cl, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: retq
@@ -134,7 +134,7 @@ define i64 @ashr_i64(i64 %a, i64 %b) {
; CHECK-LABEL: ashr_i64:
; CHECK: ## %bb.0:
; CHECK-NEXT: movq %rsi, %rcx
-; CHECK-NEXT: ## kill: %cl<def> %rcx<kill>
+; CHECK-NEXT: ## kill: def %cl killed %rcx
; CHECK-NEXT: sarq %cl, %rdi
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: retq
@@ -155,9 +155,9 @@ define i8 @shl_imm1_i8(i8 %a) {
define i16 @shl_imm1_i16(i16 %a) {
; CHECK-LABEL: shl_imm1_i16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal (,%rdi,2), %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%c = shl i16 %a, 1
ret i16 %c
@@ -166,7 +166,7 @@ define i16 @shl_imm1_i16(i16 %a) {
define i32 @shl_imm1_i32(i32 %a) {
; CHECK-LABEL: shl_imm1_i32:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal (,%rdi,2), %eax
; CHECK-NEXT: retq
%c = shl i32 %a, 1
diff --git a/test/CodeGen/X86/fixup-bw-copy.ll b/test/CodeGen/X86/fixup-bw-copy.ll
index 443fcf3f504..dead278bb0d 100644
--- a/test/CodeGen/X86/fixup-bw-copy.ll
+++ b/test/CodeGen/X86/fixup-bw-copy.ll
@@ -54,7 +54,7 @@ define i8 @test_movb_hreg(i16 %a0) {
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shrl $8, %eax
; X64-NEXT: addb %dil, %al
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
;
; X32-LABEL: test_movb_hreg:
diff --git a/test/CodeGen/X86/fixup-bw-inst.mir b/test/CodeGen/X86/fixup-bw-inst.mir
index 6638934f714..cea483e1b9b 100644
--- a/test/CodeGen/X86/fixup-bw-inst.mir
+++ b/test/CodeGen/X86/fixup-bw-inst.mir
@@ -10,9 +10,9 @@
;
; %0 is used in %if.end BB (before tail-duplication), so its
; corresponding super-register (EAX) is live-in into that BB (%if.end)
- ; and also has an EAX<imp-def> flag. Make sure that we still change
+ ; and also has an implicit-def EAX flag. Make sure that we still change
; the movw into movzwl because EAX is not live before the load (which
- ; can be seen by the fact that EAX<imp-use> flag is missing).
+ ; can be seen by the fact that implicit EAX flag is missing).
entry:
%tobool = icmp eq i16* %p, null
br i1 %tobool, label %if.end, label %if.then
diff --git a/test/CodeGen/X86/gpr-to-mask.ll b/test/CodeGen/X86/gpr-to-mask.ll
index f558541416a..ead07adb387 100644
--- a/test/CodeGen/X86/gpr-to-mask.ll
+++ b/test/CodeGen/X86/gpr-to-mask.ll
@@ -260,8 +260,8 @@ exit:
define void @test_shl1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_shl1:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-64-NEXT: testb $1, %dil
; X86-64-NEXT: je .LBB5_2
; X86-64-NEXT: # %bb.1: # %if
@@ -278,8 +278,8 @@ define void @test_shl1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x
;
; X86-32-LABEL: test_shl1:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-32-NEXT: je .LBB5_2
@@ -319,8 +319,8 @@ exit:
define void @test_shr1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_shr1:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-64-NEXT: testb $1, %dil
; X86-64-NEXT: je .LBB6_2
; X86-64-NEXT: # %bb.1: # %if
@@ -338,8 +338,8 @@ define void @test_shr1(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x
;
; X86-32-LABEL: test_shr1:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-32-NEXT: je .LBB6_2
@@ -380,8 +380,8 @@ exit:
define void @test_shr2(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_shr2:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-64-NEXT: testb $1, %dil
; X86-64-NEXT: je .LBB7_2
; X86-64-NEXT: # %bb.1: # %if
@@ -398,8 +398,8 @@ define void @test_shr2(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x
;
; X86-32-LABEL: test_shr2:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-32-NEXT: je .LBB7_2
@@ -439,8 +439,8 @@ exit:
define void @test_shl(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_shl:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-64-NEXT: testb $1, %dil
; X86-64-NEXT: je .LBB8_2
; X86-64-NEXT: # %bb.1: # %if
@@ -457,8 +457,8 @@ define void @test_shl(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x f
;
; X86-32-LABEL: test_shl:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: testb $1, {{[0-9]+}}(%esp)
; X86-32-NEXT: je .LBB8_2
@@ -498,8 +498,8 @@ exit:
define void @test_add(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x float> %fvec2, <8 x float>* %fptrvec) {
; X86-64-LABEL: test_add:
; X86-64: # %bb.0: # %entry
-; X86-64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-64-NEXT: kmovb (%rsi), %k0
; X86-64-NEXT: kmovb (%rdx), %k1
; X86-64-NEXT: testb $1, %dil
@@ -517,8 +517,8 @@ define void @test_add(i1 %cond, i8* %ptr1, i8* %ptr2, <8 x float> %fvec1, <8 x f
;
; X86-32-LABEL: test_add:
; X86-32: # %bb.0: # %entry
-; X86-32-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; X86-32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X86-32-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; X86-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-32-NEXT: movl {{[0-9]+}}(%esp), %edx
diff --git a/test/CodeGen/X86/half.ll b/test/CodeGen/X86/half.ll
index d36a04b8459..20db4a5e388 100644
--- a/test/CodeGen/X86/half.ll
+++ b/test/CodeGen/X86/half.ll
@@ -777,7 +777,7 @@ define void @test_trunc64_vec4(<4 x double> %a, <4 x half>* %p) #0 {
; BWON-F16C-NEXT: callq __truncdfhf2
; BWON-F16C-NEXT: movl %eax, %r15d
; BWON-F16C-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; BWON-F16C-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; BWON-F16C-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; BWON-F16C-NEXT: vzeroupper
; BWON-F16C-NEXT: callq __truncdfhf2
; BWON-F16C-NEXT: movl %eax, %ebp
diff --git a/test/CodeGen/X86/handle-move.ll b/test/CodeGen/X86/handle-move.ll
index a152f6db54e..0a43ef3fc22 100644
--- a/test/CodeGen/X86/handle-move.ll
+++ b/test/CodeGen/X86/handle-move.ll
@@ -8,7 +8,7 @@
; %edx has a live range into the function and is used by the DIV32r.
;
; Here sinking a kill + dead def:
-; 144B -> 180B: DIV32r %4, %eax<imp-def>, %edx<imp-def,dead>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
+; 144B -> 180B: DIV32r %4, implicit-def %eax, implicit dead %edx, implicit dead %EFLAGS, implicit killed %eax, implicit %edx
; %4: [48r,144r:0) 0@48r
; --> [48r,180r:0) 0@48r
; DH: [0B,16r:0)[128r,144r:2)[144r,144d:1) 0@0B-phi 1@144r 2@128r
@@ -25,7 +25,7 @@ entry:
}
; Same as above, but moving a kill + live def:
-; 144B -> 180B: DIV32r %4, %eax<imp-def,dead>, %edx<imp-def>, %EFLAGS<imp-def,dead>, %eax<imp-use,kill>, %edx<imp-use>
+; 144B -> 180B: DIV32r %4, implicit dead %eax, implicit-def %edx, implicit dead %EFLAGS, implicit killed %eax, implicit %edx
; %4: [48r,144r:0) 0@48r
; --> [48r,180r:0) 0@48r
; DH: [0B,16r:0)[128r,144r:2)[144r,184r:1) 0@0B-phi 1@144r 2@128r
@@ -59,7 +59,7 @@ entry:
}
; Move EFLAGS dead def across another def:
-; handleMove 208B -> 36B: %edx<def> = MOV32r0 %EFLAGS<imp-def,dead>
+; handleMove 208B -> 36B: %edx = MOV32r0 implicit dead %EFLAGS
; EFLAGS: [20r,20d:4)[160r,160d:3)[208r,208d:0)[224r,224d:1)[272r,272d:2)[304r,304d:5) 0@208r 1@224r 2@272r 3@160r 4@20r 5@304r
; --> [20r,20d:4)[36r,36d:0)[160r,160d:3)[224r,224d:1)[272r,272d:2)[304r,304d:5) 0@36r 1@224r 2@272r 3@160r 4@20r 5@304r
;
diff --git a/test/CodeGen/X86/horizontal-reduce-smax.ll b/test/CodeGen/X86/horizontal-reduce-smax.ll
index 9e53aea03e9..a54e01d9af6 100644
--- a/test/CodeGen/X86/horizontal-reduce-smax.ll
+++ b/test/CodeGen/X86/horizontal-reduce-smax.ll
@@ -206,7 +206,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
@@ -216,7 +216,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
@@ -226,7 +226,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
@@ -239,7 +239,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
@@ -249,7 +249,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
@@ -259,7 +259,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp sgt <8 x i16> %a0, %1
@@ -304,7 +304,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
@@ -320,7 +320,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
@@ -334,7 +334,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
@@ -366,7 +366,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
@@ -382,7 +382,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
@@ -396,7 +396,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp sgt <16 x i8> %a0, %1
@@ -746,7 +746,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
@@ -757,7 +757,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
@@ -769,7 +769,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -782,7 +782,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -797,7 +797,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
@@ -808,7 +808,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
@@ -820,7 +820,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -833,7 +833,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -846,7 +846,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -900,7 +900,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm1, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
@@ -917,7 +917,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
@@ -933,7 +933,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -950,7 +950,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -988,7 +988,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm1, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
@@ -1005,7 +1005,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
@@ -1021,7 +1021,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1038,7 +1038,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1055,7 +1055,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1552,7 +1552,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
@@ -1565,7 +1565,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
@@ -1580,7 +1580,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1594,7 +1594,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1611,7 +1611,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pmaxsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
@@ -1624,7 +1624,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
@@ -1639,7 +1639,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1653,7 +1653,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1668,7 +1668,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1735,7 +1735,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
@@ -1754,7 +1754,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
@@ -1773,7 +1773,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1791,7 +1791,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1839,7 +1839,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
@@ -1858,7 +1858,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
@@ -1877,7 +1877,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpmaxsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1895,7 +1895,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpmaxsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1914,7 +1914,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpmaxsb %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/horizontal-reduce-smin.ll b/test/CodeGen/X86/horizontal-reduce-smin.ll
index e92dcc1072e..f03e745598e 100644
--- a/test/CodeGen/X86/horizontal-reduce-smin.ll
+++ b/test/CodeGen/X86/horizontal-reduce-smin.ll
@@ -208,7 +208,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
@@ -218,7 +218,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
@@ -228,7 +228,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
@@ -241,7 +241,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
@@ -251,7 +251,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
@@ -261,7 +261,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp slt <8 x i16> %a0, %1
@@ -306,7 +306,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
@@ -322,7 +322,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
@@ -336,7 +336,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
@@ -368,7 +368,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
@@ -384,7 +384,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
@@ -398,7 +398,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp slt <16 x i8> %a0, %1
@@ -750,7 +750,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
@@ -761,7 +761,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
@@ -773,7 +773,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -786,7 +786,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -801,7 +801,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
@@ -812,7 +812,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
@@ -824,7 +824,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -837,7 +837,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -850,7 +850,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -904,7 +904,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm1, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
@@ -921,7 +921,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
@@ -937,7 +937,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -954,7 +954,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -992,7 +992,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm1, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
@@ -1009,7 +1009,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
@@ -1025,7 +1025,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1042,7 +1042,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1059,7 +1059,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1554,7 +1554,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-NEXT: psrld $16, %xmm1
; X86-SSE2-NEXT: pminsw %xmm0, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
@@ -1567,7 +1567,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
@@ -1582,7 +1582,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1596,7 +1596,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1613,7 +1613,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE2-NEXT: psrld $16, %xmm1
; X64-SSE2-NEXT: pminsw %xmm0, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
@@ -1626,7 +1626,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
@@ -1641,7 +1641,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1655,7 +1655,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1670,7 +1670,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1737,7 +1737,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
@@ -1756,7 +1756,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminsb %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
@@ -1775,7 +1775,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1793,7 +1793,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1841,7 +1841,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm2, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
@@ -1860,7 +1860,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminsb %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
@@ -1879,7 +1879,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpminsb %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1897,7 +1897,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpminsb %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1916,7 +1916,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpminsb %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/horizontal-reduce-umax.ll b/test/CodeGen/X86/horizontal-reduce-umax.ll
index 84020e1bd66..52e623b8271 100644
--- a/test/CodeGen/X86/horizontal-reduce-umax.ll
+++ b/test/CodeGen/X86/horizontal-reduce-umax.ll
@@ -254,7 +254,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm3
; X86-SSE2-NEXT: por %xmm2, %xmm3
; X86-SSE2-NEXT: movd %xmm3, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
@@ -264,7 +264,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
@@ -274,7 +274,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
@@ -308,7 +308,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm3
; X64-SSE2-NEXT: por %xmm2, %xmm3
; X64-SSE2-NEXT: movd %xmm3, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
@@ -318,7 +318,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
@@ -328,7 +328,7 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ugt <8 x i16> %a0, %1
@@ -357,7 +357,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
@@ -373,7 +373,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
@@ -387,7 +387,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
@@ -403,7 +403,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
@@ -419,7 +419,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
@@ -433,7 +433,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ugt <16 x i8> %a0, %1
@@ -863,7 +863,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm3, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
@@ -874,7 +874,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
@@ -886,7 +886,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -899,7 +899,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -942,7 +942,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm3, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
@@ -953,7 +953,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
@@ -965,7 +965,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -978,7 +978,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -991,7 +991,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1025,7 +1025,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
@@ -1042,7 +1042,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
@@ -1058,7 +1058,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1075,7 +1075,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1093,7 +1093,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
@@ -1110,7 +1110,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
@@ -1126,7 +1126,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1143,7 +1143,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1160,7 +1160,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1787,7 +1787,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm1, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
@@ -1800,7 +1800,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: pxor %xmm1, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
@@ -1815,7 +1815,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1829,7 +1829,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1888,7 +1888,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm1, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
@@ -1901,7 +1901,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: pxor %xmm1, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
@@ -1916,7 +1916,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1930,7 +1930,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1945,7 +1945,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1984,7 +1984,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
@@ -2003,7 +2003,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
@@ -2022,7 +2022,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -2040,7 +2040,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -2060,7 +2060,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
@@ -2079,7 +2079,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pmaxub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
@@ -2098,7 +2098,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpmaxub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -2116,7 +2116,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpmaxub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -2135,7 +2135,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpmaxub %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/horizontal-reduce-umin.ll b/test/CodeGen/X86/horizontal-reduce-umin.ll
index 749fe7ee4a4..505663656a3 100644
--- a/test/CodeGen/X86/horizontal-reduce-umin.ll
+++ b/test/CodeGen/X86/horizontal-reduce-umin.ll
@@ -256,21 +256,21 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm1
; X86-SSE2-NEXT: por %xmm3, %xmm1
; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v8i16:
; X86-SSE42: ## %bb.0:
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v8i16:
; X86-AVX: ## %bb.0:
; X86-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v8i16:
@@ -304,21 +304,21 @@ define i16 @test_reduce_v8i16(<8 x i16> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm1
; X64-SSE2-NEXT: por %xmm3, %xmm1
; X64-SSE2-NEXT: movd %xmm1, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v8i16:
; X64-SSE42: ## %bb.0:
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v8i16:
; X64-AVX: ## %bb.0:
; X64-AVX-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX-NEXT: vmovd %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ult <8 x i16> %a0, %1
@@ -347,7 +347,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i8:
@@ -363,7 +363,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX-LABEL: test_reduce_v16i8:
@@ -377,7 +377,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_reduce_v16i8:
@@ -393,7 +393,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i8:
@@ -409,7 +409,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX-LABEL: test_reduce_v16i8:
@@ -423,7 +423,7 @@ define i8 @test_reduce_v16i8(<16 x i8> %a0) {
; X64-AVX-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
%2 = icmp ult <16 x i8> %a0, %1
@@ -857,7 +857,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm2
; X86-SSE2-NEXT: por %xmm4, %xmm2
; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v16i16:
@@ -865,7 +865,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-SSE42-NEXT: pminuw %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v16i16:
@@ -874,7 +874,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -884,7 +884,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -927,7 +927,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm2
; X64-SSE2-NEXT: por %xmm4, %xmm2
; X64-SSE2-NEXT: movd %xmm2, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v16i16:
@@ -935,7 +935,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-SSE42-NEXT: pminuw %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v16i16:
@@ -944,7 +944,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -954,7 +954,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -964,7 +964,7 @@ define i16 @test_reduce_v16i16(<16 x i16> %a0) {
; X64-AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <16 x i16> %a0, <16 x i16> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -998,7 +998,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i8:
@@ -1015,7 +1015,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i8:
@@ -1031,7 +1031,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1048,7 +1048,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1066,7 +1066,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i8:
@@ -1083,7 +1083,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i8:
@@ -1099,7 +1099,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1116,7 +1116,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1133,7 +1133,7 @@ define i8 @test_reduce_v32i8(<32 x i8> %a0) {
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i8> %a0, <32 x i8> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1758,7 +1758,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE2-NEXT: pandn %xmm0, %xmm4
; X86-SSE2-NEXT: por %xmm2, %xmm4
; X86-SSE2-NEXT: movd %xmm4, %eax
-; X86-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v32i16:
@@ -1768,7 +1768,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-SSE42-NEXT: pminuw %xmm1, %xmm0
; X86-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X86-SSE42-NEXT: movd %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v32i16:
@@ -1780,7 +1780,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0
; X86-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1791,7 +1791,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X86-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -1850,7 +1850,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE2-NEXT: pandn %xmm0, %xmm4
; X64-SSE2-NEXT: por %xmm2, %xmm4
; X64-SSE2-NEXT: movd %xmm4, %eax
-; X64-SSE2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v32i16:
@@ -1860,7 +1860,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-SSE42-NEXT: pminuw %xmm1, %xmm0
; X64-SSE42-NEXT: phminposuw %xmm0, %xmm0
; X64-SSE42-NEXT: movd %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v32i16:
@@ -1872,7 +1872,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0
; X64-AVX1-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -1883,7 +1883,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX2-NEXT: vmovd %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -1895,7 +1895,7 @@ define i16 @test_reduce_v32i16(<32 x i16> %a0) {
; X64-AVX512-NEXT: vpminuw %xmm1, %xmm0, %xmm0
; X64-AVX512-NEXT: vphminposuw %xmm0, %xmm0
; X64-AVX512-NEXT: vmovd %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <32 x i16> %a0, <32 x i16> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -1934,7 +1934,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: pminub %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
-; X86-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE2-NEXT: retl
;
; X86-SSE42-LABEL: test_reduce_v64i8:
@@ -1953,7 +1953,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-SSE42-NEXT: psrlw $8, %xmm0
; X86-SSE42-NEXT: pminub %xmm1, %xmm0
; X86-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X86-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X86-SSE42-NEXT: retl
;
; X86-AVX1-LABEL: test_reduce_v64i8:
@@ -1972,7 +1972,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
@@ -1990,7 +1990,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X86-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X86-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X86-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X86-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X86-AVX2-NEXT: vzeroupper
; X86-AVX2-NEXT: retl
;
@@ -2010,7 +2010,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE2-NEXT: psrlw $8, %xmm0
; X64-SSE2-NEXT: pminub %xmm1, %xmm0
; X64-SSE2-NEXT: movd %xmm0, %eax
-; X64-SSE2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE2-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE2-NEXT: retq
;
; X64-SSE42-LABEL: test_reduce_v64i8:
@@ -2029,7 +2029,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-SSE42-NEXT: psrlw $8, %xmm0
; X64-SSE42-NEXT: pminub %xmm1, %xmm0
; X64-SSE42-NEXT: pextrb $0, %xmm0, %eax
-; X64-SSE42-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-SSE42-NEXT: ## kill: def %al killed %al killed %eax
; X64-SSE42-NEXT: retq
;
; X64-AVX1-LABEL: test_reduce_v64i8:
@@ -2048,7 +2048,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX1-NEXT: vpminub %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX1-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX1-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX1-NEXT: vzeroupper
; X64-AVX1-NEXT: retq
;
@@ -2066,7 +2066,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX2-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX2-NEXT: vpminub %ymm1, %ymm0, %ymm0
; X64-AVX2-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX2-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX2-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX2-NEXT: vzeroupper
; X64-AVX2-NEXT: retq
;
@@ -2085,7 +2085,7 @@ define i8 @test_reduce_v64i8(<64 x i8> %a0) {
; X64-AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; X64-AVX512-NEXT: vpminub %zmm1, %zmm0, %zmm0
; X64-AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; X64-AVX512-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; X64-AVX512-NEXT: ## kill: def %al killed %al killed %eax
; X64-AVX512-NEXT: vzeroupper
; X64-AVX512-NEXT: retq
%1 = shufflevector <64 x i8> %a0, <64 x i8> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/iabs.ll b/test/CodeGen/X86/iabs.ll
index 32c3d8149ea..95b0328ee73 100644
--- a/test/CodeGen/X86/iabs.ll
+++ b/test/CodeGen/X86/iabs.ll
@@ -41,7 +41,7 @@ define i16 @test_i16(i16 %a) nounwind {
; X86-NO-CMOV-NEXT: sarw $15, %cx
; X86-NO-CMOV-NEXT: addl %ecx, %eax
; X86-NO-CMOV-NEXT: xorl %ecx, %eax
-; X86-NO-CMOV-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NO-CMOV-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NO-CMOV-NEXT: retl
;
; X86-CMOV-LABEL: test_i16:
diff --git a/test/CodeGen/X86/illegal-bitfield-loadstore.ll b/test/CodeGen/X86/illegal-bitfield-loadstore.ll
index 84a59a65052..0bd84bbcad1 100644
--- a/test/CodeGen/X86/illegal-bitfield-loadstore.ll
+++ b/test/CodeGen/X86/illegal-bitfield-loadstore.ll
@@ -116,7 +116,7 @@ define void @i56_or(i56* %a) {
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
; X64-NEXT: movb %cl, 6(%rdi)
-; X64-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<kill> %rcx<def>
+; X64-NEXT: # kill: def %ecx killed %ecx killed %rcx def %rcx
; X64-NEXT: shll $16, %ecx
; X64-NEXT: orl %eax, %ecx
; X64-NEXT: shlq $32, %rcx
@@ -148,7 +148,7 @@ define void @i56_and_or(i56* %a) {
; X64-NEXT: movzwl 4(%rdi), %eax
; X64-NEXT: movzbl 6(%rdi), %ecx
; X64-NEXT: movb %cl, 6(%rdi)
-; X64-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<kill> %rcx<def>
+; X64-NEXT: # kill: def %ecx killed %ecx killed %rcx def %rcx
; X64-NEXT: shll $16, %ecx
; X64-NEXT: orl %eax, %ecx
; X64-NEXT: shlq $32, %rcx
@@ -186,7 +186,7 @@ define void @i56_insert_bit(i56* %a, i1 zeroext %bit) {
; X64-NEXT: movzwl 4(%rdi), %ecx
; X64-NEXT: movzbl 6(%rdi), %edx
; X64-NEXT: movb %dl, 6(%rdi)
-; X64-NEXT: # kill: %edx<def> %edx<kill> %rdx<kill> %rdx<def>
+; X64-NEXT: # kill: def %edx killed %edx killed %rdx def %rdx
; X64-NEXT: shll $16, %edx
; X64-NEXT: orl %ecx, %edx
; X64-NEXT: shlq $32, %rdx
diff --git a/test/CodeGen/X86/imul.ll b/test/CodeGen/X86/imul.ll
index a3322aff205..ff7df4f6b13 100644
--- a/test/CodeGen/X86/imul.ll
+++ b/test/CodeGen/X86/imul.ll
@@ -218,7 +218,7 @@ entry:
define i32 @test2(i32 %a) {
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: leal (%rax,%rdi), %eax
@@ -239,7 +239,7 @@ entry:
define i32 @test3(i32 %a) {
; X64-LABEL: test3:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: leal (%rax,%rdi), %eax
diff --git a/test/CodeGen/X86/inline-asm-fpstack.ll b/test/CodeGen/X86/inline-asm-fpstack.ll
index b07f830f9b1..c66121e10c7 100644
--- a/test/CodeGen/X86/inline-asm-fpstack.ll
+++ b/test/CodeGen/X86/inline-asm-fpstack.ll
@@ -437,9 +437,9 @@ entry:
; inline-asm instruction and the ST register was live across another
; inline-asm instruction.
;
-; INLINEASM <es:frndint> [sideeffect] [attdialect], $0:[regdef], %st0<imp-def,tied5>, $1:[reguse tiedto:$0], %st0<tied3>, $2:[clobber], %eflags<earlyclobber,imp-def,dead>
-; INLINEASM <es:fldcw $0> [sideeffect] [mayload] [attdialect], $0:[mem], %eax<undef>, 1, %noreg, 0, %noreg, $1:[clobber], %eflags<earlyclobber,imp-def,dead>
-; %fp0<def> = COPY %st0
+; INLINEASM <es:frndint> [sideeffect] [attdialect], $0:[regdef], %st0<imp-def,tied5>, $1:[reguse tiedto:$0], %st0<tied3>, $2:[clobber], early-clobber implicit dead %eflags
+; INLINEASM <es:fldcw $0> [sideeffect] [mayload] [attdialect], $0:[mem], undef %eax, 1, %noreg, 0, %noreg, $1:[clobber], early-clobber implicit dead %eflags
+; %fp0 = COPY %st0
%struct.fpu_t = type { [8 x x86_fp80], x86_fp80, %struct.anon1, %struct.anon2, i32, i8, [15 x i8] }
%struct.anon1 = type { i32, i32, i32 }
diff --git a/test/CodeGen/X86/lea-3.ll b/test/CodeGen/X86/lea-3.ll
index 94d11adc3ce..f32c782c8d7 100644
--- a/test/CodeGen/X86/lea-3.ll
+++ b/test/CodeGen/X86/lea-3.ll
@@ -36,25 +36,25 @@ define i64 @test2(i64 %a) {
define i32 @test(i32 %a) {
; LNX1-LABEL: test:
; LNX1: # %bb.0:
-; LNX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; LNX1-NEXT: # kill: def %edi killed %edi def %rdi
; LNX1-NEXT: leal (%rdi,%rdi,2), %eax
; LNX1-NEXT: retq
;
; LNX2-LABEL: test:
; LNX2: # %bb.0:
-; LNX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; LNX2-NEXT: # kill: def %edi killed %edi def %rdi
; LNX2-NEXT: leal (%rdi,%rdi,2), %eax
; LNX2-NEXT: retq
;
; NACL-LABEL: test:
; NACL: # %bb.0:
-; NACL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; NACL-NEXT: # kill: def %edi killed %edi def %rdi
; NACL-NEXT: leal (%rdi,%rdi,2), %eax
; NACL-NEXT: retq
;
; WIN-LABEL: test:
; WIN: # %bb.0:
-; WIN-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; WIN-NEXT: # kill: def %ecx killed %ecx def %rcx
; WIN-NEXT: leal (%rcx,%rcx,2), %eax
; WIN-NEXT: retq
%tmp2 = mul i32 %a, 3
diff --git a/test/CodeGen/X86/lea-opt-cse3.ll b/test/CodeGen/X86/lea-opt-cse3.ll
index 96e24a362ee..d0b5a281186 100644
--- a/test/CodeGen/X86/lea-opt-cse3.ll
+++ b/test/CodeGen/X86/lea-opt-cse3.ll
@@ -5,8 +5,8 @@
define i32 @foo(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
; X64-NEXT: leal 4(%rdi,%rsi,4), %eax
; X64-NEXT: imull %ecx, %eax
@@ -33,8 +33,8 @@ entry:
define i32 @foo1(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
; X64-NEXT: leal 4(%rdi,%rsi,8), %eax
; X64-NEXT: imull %ecx, %eax
@@ -61,8 +61,8 @@ entry:
define i32 @foo1_mult_basic_blocks(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1_mult_basic_blocks:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal 4(%rdi,%rsi,4), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $10, %ecx
@@ -113,8 +113,8 @@ exit:
define i32 @foo1_mult_basic_blocks_illegal_scale(i32 %a, i32 %b) local_unnamed_addr #0 {
; X64-LABEL: foo1_mult_basic_blocks_illegal_scale:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal 4(%rdi,%rsi,2), %ecx
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: cmpl $10, %ecx
diff --git a/test/CodeGen/X86/lea32-schedule.ll b/test/CodeGen/X86/lea32-schedule.ll
index e1bc9af65ed..f5654188546 100644
--- a/test/CodeGen/X86/lea32-schedule.ll
+++ b/test/CodeGen/X86/lea32-schedule.ll
@@ -14,13 +14,13 @@
define i32 @test_lea_offset(i32) {
; GENERIC-LABEL: test_lea_offset:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -32,43 +32,43 @@ define i32 @test_lea_offset(i32) {
;
; SLM-LABEL: test_lea_offset:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal -24(%rdi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_offset:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal -24(%rdi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal -24(%rdi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i32 %0, -24
@@ -78,13 +78,13 @@ define i32 @test_lea_offset(i32) {
define i32 @test_lea_offset_big(i32) {
; GENERIC-LABEL: test_lea_offset_big:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_offset_big:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -96,43 +96,43 @@ define i32 @test_lea_offset_big(i32) {
;
; SLM-LABEL: test_lea_offset_big:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal 1024(%rdi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_offset_big:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_offset_big:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_offset_big:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_offset_big:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_offset_big:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal 1024(%rdi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_offset_big:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal 1024(%rdi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = add nsw i32 %0, 1024
@@ -143,15 +143,15 @@ define i32 @test_lea_offset_big(i32) {
define i32 @test_lea_add(i32, i32) {
; GENERIC-LABEL: test_lea_add:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -163,50 +163,50 @@ define i32 @test_lea_add(i32, i32) {
;
; SLM-LABEL: test_lea_add:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %esi killed %esi def %rsi
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal (%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add nsw i32 %1, %0
@@ -216,16 +216,16 @@ define i32 @test_lea_add(i32, i32) {
define i32 @test_lea_add_offset(i32, i32) {
; GENERIC-LABEL: test_lea_add_offset:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $16, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_offset:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -237,54 +237,54 @@ define i32 @test_lea_add_offset(i32, i32) {
;
; SLM-LABEL: test_lea_add_offset:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %esi killed %esi def %rsi
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $16, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_offset:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $16, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_offset:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $16, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_offset:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $16, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_offset:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal 16(%rdi,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i32 %0, 16
@@ -295,8 +295,8 @@ define i32 @test_lea_add_offset(i32, i32) {
define i32 @test_lea_add_offset_big(i32, i32) {
; GENERIC-LABEL: test_lea_add_offset_big:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-4096, %eax # imm = 0xF000
; GENERIC-NEXT: # sched: [1:0.33]
@@ -304,8 +304,8 @@ define i32 @test_lea_add_offset_big(i32, i32) {
;
; ATOM-LABEL: test_lea_add_offset_big:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -317,15 +317,15 @@ define i32 @test_lea_add_offset_big(i32, i32) {
;
; SLM-LABEL: test_lea_add_offset_big:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %esi killed %esi def %rsi
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_offset_big:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-4096, %eax # imm = 0xF000
; SANDY-NEXT: # sched: [1:0.33]
@@ -333,8 +333,8 @@ define i32 @test_lea_add_offset_big(i32, i32) {
;
; HASWELL-LABEL: test_lea_add_offset_big:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-4096, %eax # imm = 0xF000
; HASWELL-NEXT: # sched: [1:0.25]
@@ -342,8 +342,8 @@ define i32 @test_lea_add_offset_big(i32, i32) {
;
; BROADWELL-LABEL: test_lea_add_offset_big:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-4096, %eax # imm = 0xF000
; BROADWELL-NEXT: # sched: [1:0.25]
@@ -351,8 +351,8 @@ define i32 @test_lea_add_offset_big(i32, i32) {
;
; SKYLAKE-LABEL: test_lea_add_offset_big:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-4096, %eax # imm = 0xF000
; SKYLAKE-NEXT: # sched: [1:0.25]
@@ -360,15 +360,15 @@ define i32 @test_lea_add_offset_big(i32, i32) {
;
; BTVER2-LABEL: test_lea_add_offset_big:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_offset_big:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal -4096(%rdi,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = add i32 %0, -4096
@@ -379,13 +379,13 @@ define i32 @test_lea_add_offset_big(i32, i32) {
define i32 @test_lea_mul(i32) {
; GENERIC-LABEL: test_lea_mul:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -397,43 +397,43 @@ define i32 @test_lea_mul(i32) {
;
; SLM-LABEL: test_lea_mul:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 3
@@ -443,14 +443,14 @@ define i32 @test_lea_mul(i32) {
define i32 @test_lea_mul_offset(i32) {
; GENERIC-LABEL: test_lea_mul_offset:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-32, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_mul_offset:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -462,47 +462,47 @@ define i32 @test_lea_mul_offset(i32) {
;
; SLM-LABEL: test_lea_mul_offset:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-32, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_mul_offset:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-32, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_mul_offset:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-32, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_mul_offset:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-32, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_mul_offset:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal -32(%rdi,%rdi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 3
@@ -513,7 +513,7 @@ define i32 @test_lea_mul_offset(i32) {
define i32 @test_lea_mul_offset_big(i32) {
; GENERIC-LABEL: test_lea_mul_offset_big:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $10000, %eax # imm = 0x2710
; GENERIC-NEXT: # sched: [1:0.33]
@@ -521,7 +521,7 @@ define i32 @test_lea_mul_offset_big(i32) {
;
; ATOM-LABEL: test_lea_mul_offset_big:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -533,13 +533,13 @@ define i32 @test_lea_mul_offset_big(i32) {
;
; SLM-LABEL: test_lea_mul_offset_big:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_mul_offset_big:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $10000, %eax # imm = 0x2710
; SANDY-NEXT: # sched: [1:0.33]
@@ -547,7 +547,7 @@ define i32 @test_lea_mul_offset_big(i32) {
;
; HASWELL-LABEL: test_lea_mul_offset_big:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $10000, %eax # imm = 0x2710
; HASWELL-NEXT: # sched: [1:0.25]
@@ -555,7 +555,7 @@ define i32 @test_lea_mul_offset_big(i32) {
;
; BROADWELL-LABEL: test_lea_mul_offset_big:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $10000, %eax # imm = 0x2710
; BROADWELL-NEXT: # sched: [1:0.25]
@@ -563,7 +563,7 @@ define i32 @test_lea_mul_offset_big(i32) {
;
; SKYLAKE-LABEL: test_lea_mul_offset_big:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $10000, %eax # imm = 0x2710
; SKYLAKE-NEXT: # sched: [1:0.25]
@@ -571,13 +571,13 @@ define i32 @test_lea_mul_offset_big(i32) {
;
; BTVER2-LABEL: test_lea_mul_offset_big:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_mul_offset_big:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal 10000(%rdi,%rdi,8), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%2 = mul nsw i32 %0, 9
@@ -588,15 +588,15 @@ define i32 @test_lea_mul_offset_big(i32) {
define i32 @test_lea_add_scale(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -608,50 +608,50 @@ define i32 @test_lea_add_scale(i32, i32) {
;
; SLM-LABEL: test_lea_add_scale:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %esi killed %esi def %rsi
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal (%rdi,%rsi,2), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 1
@@ -662,16 +662,16 @@ define i32 @test_lea_add_scale(i32, i32) {
define i32 @test_lea_add_scale_offset(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale_offset:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $96, %eax # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_lea_add_scale_offset:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -683,54 +683,54 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
;
; SLM-LABEL: test_lea_add_scale_offset:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %esi killed %esi def %rsi
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $96, %eax # sched: [1:0.33]
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_lea_add_scale_offset:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $96, %eax # sched: [1:0.25]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_lea_add_scale_offset:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $96, %eax # sched: [1:0.25]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_lea_add_scale_offset:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi,4), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $96, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_lea_add_scale_offset:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal 96(%rdi,%rsi,4), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 2
@@ -742,8 +742,8 @@ define i32 @test_lea_add_scale_offset(i32, i32) {
define i32 @test_lea_add_scale_offset_big(i32, i32) {
; GENERIC-LABEL: test_lea_add_scale_offset_big:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; GENERIC-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; GENERIC-NEXT: # kill: def %esi killed %esi def %rsi
+; GENERIC-NEXT: # kill: def %edi killed %edi def %rdi
; GENERIC-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; GENERIC-NEXT: addl $-1200, %eax # imm = 0xFB50
; GENERIC-NEXT: # sched: [1:0.33]
@@ -751,8 +751,8 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
;
; ATOM-LABEL: test_lea_add_scale_offset_big:
; ATOM: # %bb.0:
-; ATOM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ATOM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ATOM-NEXT: # kill: def %esi killed %esi def %rsi
+; ATOM-NEXT: # kill: def %edi killed %edi def %rdi
; ATOM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
; ATOM-NEXT: nop # sched: [1:0.50]
; ATOM-NEXT: nop # sched: [1:0.50]
@@ -764,15 +764,15 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
;
; SLM-LABEL: test_lea_add_scale_offset_big:
; SLM: # %bb.0:
-; SLM-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NEXT: # kill: def %esi killed %esi def %rsi
+; SLM-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_lea_add_scale_offset_big:
; SANDY: # %bb.0:
-; SANDY-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SANDY-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SANDY-NEXT: # kill: def %esi killed %esi def %rsi
+; SANDY-NEXT: # kill: def %edi killed %edi def %rdi
; SANDY-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; SANDY-NEXT: addl $-1200, %eax # imm = 0xFB50
; SANDY-NEXT: # sched: [1:0.33]
@@ -780,8 +780,8 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
;
; HASWELL-LABEL: test_lea_add_scale_offset_big:
; HASWELL: # %bb.0:
-; HASWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; HASWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HASWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; HASWELL-NEXT: # kill: def %edi killed %edi def %rdi
; HASWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; HASWELL-NEXT: addl $-1200, %eax # imm = 0xFB50
; HASWELL-NEXT: # sched: [1:0.25]
@@ -789,8 +789,8 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
;
; BROADWELL-LABEL: test_lea_add_scale_offset_big:
; BROADWELL: # %bb.0:
-; BROADWELL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BROADWELL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BROADWELL-NEXT: # kill: def %esi killed %esi def %rsi
+; BROADWELL-NEXT: # kill: def %edi killed %edi def %rdi
; BROADWELL-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; BROADWELL-NEXT: addl $-1200, %eax # imm = 0xFB50
; BROADWELL-NEXT: # sched: [1:0.25]
@@ -798,8 +798,8 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
;
; SKYLAKE-LABEL: test_lea_add_scale_offset_big:
; SKYLAKE: # %bb.0:
-; SKYLAKE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SKYLAKE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SKYLAKE-NEXT: # kill: def %esi killed %esi def %rsi
+; SKYLAKE-NEXT: # kill: def %edi killed %edi def %rdi
; SKYLAKE-NEXT: leal (%rdi,%rsi,8), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: addl $-1200, %eax # imm = 0xFB50
; SKYLAKE-NEXT: # sched: [1:0.25]
@@ -807,15 +807,15 @@ define i32 @test_lea_add_scale_offset_big(i32, i32) {
;
; BTVER2-LABEL: test_lea_add_scale_offset_big:
; BTVER2: # %bb.0:
-; BTVER2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; BTVER2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; BTVER2-NEXT: # kill: def %esi killed %esi def %rsi
+; BTVER2-NEXT: # kill: def %edi killed %edi def %rdi
; BTVER2-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_lea_add_scale_offset_big:
; ZNVER1: # %bb.0:
-; ZNVER1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ZNVER1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ZNVER1-NEXT: # kill: def %esi killed %esi def %rsi
+; ZNVER1-NEXT: # kill: def %edi killed %edi def %rdi
; ZNVER1-NEXT: leal -1200(%rdi,%rsi,8), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%3 = shl i32 %1, 3
diff --git a/test/CodeGen/X86/liveness-local-regalloc.ll b/test/CodeGen/X86/liveness-local-regalloc.ll
index 5301485353d..2eb5cc580da 100644
--- a/test/CodeGen/X86/liveness-local-regalloc.ll
+++ b/test/CodeGen/X86/liveness-local-regalloc.ll
@@ -61,8 +61,8 @@ infloop1: ; preds = %infloop1, %bb5
}
-; RAFast would forget to add a super-register <imp-def> when rewriting:
-; %10:sub_32bit<def,read-undef> = COPY %R9D<kill>
+; RAFast would forget to add a super-register implicit-def when rewriting:
+; %10:sub_32bit<def,read-undef> = COPY killed %R9D
; This trips up the machine code verifier.
define void @autogen_SD24657(i8*, i32*, i64*, i32, i64, i8) {
BB:
diff --git a/test/CodeGen/X86/loop-search.ll b/test/CodeGen/X86/loop-search.ll
index e0a81d28a70..88e9963e77f 100644
--- a/test/CodeGen/X86/loop-search.ll
+++ b/test/CodeGen/X86/loop-search.ll
@@ -25,15 +25,15 @@ define zeroext i1 @search(i32 %needle, i32* nocapture readonly %haystack, i32 %c
; ### FIXME: %bb.3 and LBB0_1 should be merged
; CHECK-NEXT: ## %bb.3:
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
; CHECK-NEXT: LBB0_1:
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
; CHECK-NEXT: LBB0_6:
; CHECK-NEXT: movb $1, %al
-; CHECK-NEXT: ## kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
entry:
%cmp5 = icmp sgt i32 %count, 0
diff --git a/test/CodeGen/X86/lzcnt-schedule.ll b/test/CodeGen/X86/lzcnt-schedule.ll
index 64874bdee81..b56bd555e78 100644
--- a/test/CodeGen/X86/lzcnt-schedule.ll
+++ b/test/CodeGen/X86/lzcnt-schedule.ll
@@ -13,7 +13,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; GENERIC-NEXT: lzcntw (%rsi), %cx
; GENERIC-NEXT: lzcntw %di, %ax
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctlz_i16:
@@ -21,7 +21,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; HASWELL-NEXT: lzcntw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctlz_i16:
@@ -29,7 +29,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; BROADWELL-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctlz_i16:
@@ -37,7 +37,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; SKYLAKE-NEXT: lzcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: lzcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctlz_i16:
@@ -45,7 +45,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; BTVER2-NEXT: lzcntw (%rsi), %cx
; BTVER2-NEXT: lzcntw %di, %ax
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctlz_i16:
@@ -53,7 +53,7 @@ define i16 @test_ctlz_i16(i16 zeroext %a0, i16 *%a1) {
; ZNVER1-NEXT: lzcntw (%rsi), %cx # sched: [6:0.50]
; ZNVER1-NEXT: lzcntw %di, %ax # sched: [2:0.25]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.ctlz.i16( i16 %1, i1 false )
diff --git a/test/CodeGen/X86/lzcnt-zext-cmp.ll b/test/CodeGen/X86/lzcnt-zext-cmp.ll
index 6123bdfc0db..9a31a8da2dd 100644
--- a/test/CodeGen/X86/lzcnt-zext-cmp.ll
+++ b/test/CodeGen/X86/lzcnt-zext-cmp.ll
@@ -84,7 +84,7 @@ define i16 @test_zext_cmp3(i16 %a, i16 %b) {
; ALL-NEXT: sete %cl
; ALL-NEXT: orb %al, %cl
; ALL-NEXT: movzbl %cl, %eax
-; ALL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ALL-NEXT: # kill: def %ax killed %ax killed %eax
; ALL-NEXT: retq
%cmp = icmp eq i16 %a, 0
%cmp1 = icmp eq i16 %b, 0
@@ -128,7 +128,7 @@ define i32 @test_zext_cmp5(i64 %a, i64 %b) {
; FASTLZCNT-NEXT: lzcntq %rsi, %rax
; FASTLZCNT-NEXT: orl %ecx, %eax
; FASTLZCNT-NEXT: shrl $6, %eax
-; FASTLZCNT-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; FASTLZCNT-NEXT: # kill: def %eax killed %eax killed %rax
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp5:
@@ -267,7 +267,7 @@ define i32 @test_zext_cmp9(i32 %a, i64 %b) {
; FASTLZCNT-NEXT: shrl $5, %ecx
; FASTLZCNT-NEXT: shrl $6, %eax
; FASTLZCNT-NEXT: orl %ecx, %eax
-; FASTLZCNT-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; FASTLZCNT-NEXT: # kill: def %eax killed %eax killed %rax
; FASTLZCNT-NEXT: retq
;
; NOFASTLZCNT-LABEL: test_zext_cmp9:
diff --git a/test/CodeGen/X86/machine-cp.ll b/test/CodeGen/X86/machine-cp.ll
index cbac8e31d9a..b8b9b05c318 100644
--- a/test/CodeGen/X86/machine-cp.ll
+++ b/test/CodeGen/X86/machine-cp.ll
@@ -94,7 +94,7 @@ while.end: ; preds = %while.body, %entry
; Check that copy propagation does not kill thing like:
; dst = copy src <-- do not kill that.
-; ... = op1 dst<undef>
+; ... = op1 undef dst
; ... = op2 dst <-- this is used here.
define <16 x float> @foo(<16 x float> %x) {
; CHECK-LABEL: foo:
diff --git a/test/CodeGen/X86/machine-cse.ll b/test/CodeGen/X86/machine-cse.ll
index e5e9e6c1163..0e332382c77 100644
--- a/test/CodeGen/X86/machine-cse.ll
+++ b/test/CodeGen/X86/machine-cse.ll
@@ -50,8 +50,8 @@ declare void @printf(...) nounwind
define void @commute(i32 %test_case, i32 %scale) nounwind ssp {
; CHECK-LABEL: commute:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: cmpl $2, %eax
; CHECK-NEXT: ja .LBB1_4
@@ -64,7 +64,7 @@ define void @commute(i32 %test_case, i32 %scale) nounwind ssp {
; CHECK-NEXT: imull %edi, %esi
; CHECK-NEXT: leal (%rsi,%rsi,2), %esi
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<kill>
+; CHECK-NEXT: # kill: def %edi killed %edi killed %rdi
; CHECK-NEXT: callq printf
; CHECK-NEXT: addq $8, %rsp
; CHECK-NEXT: .p2align 4, 0x90
diff --git a/test/CodeGen/X86/masked_gather_scatter.ll b/test/CodeGen/X86/masked_gather_scatter.ll
index d7bd9318e8e..4d03a7f1c0c 100644
--- a/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/test/CodeGen/X86/masked_gather_scatter.ll
@@ -811,26 +811,26 @@ declare <2 x double> @llvm.masked.gather.v2f64.v2p0f64(<2 x double*>, i32, <2 x
define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
; KNL_64-LABEL: test15:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL_64-NEXT: vmovdqa %xmm1, %xmm1
; KNL_64-NEXT: vpmovsxdq %ymm0, %zmm2
; KNL_64-NEXT: vpslld $31, %ymm1, %ymm0
; KNL_64-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL_64-NEXT: vgatherqps (%rdi,%zmm2,4), %ymm0 {%k1}
-; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test15:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL_32-NEXT: vmovdqa %xmm1, %xmm1
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpmovsxdq %ymm0, %zmm2
; KNL_32-NEXT: vpslld $31, %ymm1, %ymm0
; KNL_32-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL_32-NEXT: vgatherqps (%eax,%zmm2,4), %ymm0 {%k1}
-; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
@@ -861,8 +861,8 @@ define <4 x float> @test15(float* %base, <4 x i32> %ind, <4 x i1> %mask) {
define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x double> %src0) {
; KNL_64-LABEL: test16:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
-; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL_64-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_64-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -876,8 +876,8 @@ define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x
;
; KNL_32-LABEL: test16:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
-; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL_32-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_32-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -916,7 +916,7 @@ define <4 x double> @test16(double* %base, <4 x i32> %ind, <4 x i1> %mask, <4 x
define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x double> %src0) {
; KNL_64-LABEL: test17:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_64-NEXT: vmovdqa %xmm1, %xmm1
@@ -929,7 +929,7 @@ define <2 x double> @test17(double* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x
;
; KNL_32-LABEL: test17:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_32-NEXT: vmovdqa %xmm1, %xmm1
@@ -977,8 +977,8 @@ declare void @llvm.masked.scatter.v2f32.v2p0f32(<2 x float> , <2 x float*> , i32
define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
; KNL_64-LABEL: test18:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL_64-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL_64-NEXT: vmovdqa %xmm2, %xmm2
; KNL_64-NEXT: vpslld $31, %ymm2, %ymm2
; KNL_64-NEXT: vptestmd %zmm2, %zmm2, %k1
@@ -988,8 +988,8 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
;
; KNL_32-LABEL: test18:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
-; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL_32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL_32-NEXT: vmovdqa %xmm2, %xmm2
; KNL_32-NEXT: vpmovsxdq %ymm1, %zmm1
; KNL_32-NEXT: vpslld $31, %ymm2, %ymm2
@@ -1019,8 +1019,8 @@ define void @test18(<4 x i32>%a1, <4 x i32*> %ptr, <4 x i1>%mask) {
define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind) {
; KNL_64-LABEL: test19:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
-; KNL_64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL_64-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_64-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -1033,8 +1033,8 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
;
; KNL_32-LABEL: test19:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
-; KNL_32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL_32-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; KNL_32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_32-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -1071,8 +1071,8 @@ define void @test19(<4 x double>%a1, double* %ptr, <4 x i1>%mask, <4 x i64> %ind
define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
; KNL_64-LABEL: test20:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL_64-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm2, %xmm2
; KNL_64-NEXT: vpslld $31, %ymm2, %ymm2
@@ -1083,7 +1083,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
;
; KNL_32-LABEL: test20:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL_32-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; KNL_32-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,2],zero,zero
; KNL_32-NEXT: vmovaps %xmm2, %xmm2
@@ -1096,7 +1096,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
;
; SKX-LABEL: test20:
; SKX: # %bb.0:
-; SKX-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; SKX-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k1
; SKX-NEXT: vscatterqps %xmm0, (,%ymm1) {%k1}
@@ -1118,7 +1118,7 @@ define void @test20(<2 x float>%a1, <2 x float*> %ptr, <2 x i1> %mask) {
define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
; KNL_64-LABEL: test21:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
; KNL_64-NEXT: vmovdqa %xmm2, %xmm2
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vpsllq $63, %zmm2, %zmm2
@@ -1141,7 +1141,7 @@ define void @test21(<2 x i32>%a1, <2 x i32*> %ptr, <2 x i1>%mask) {
;
; SKX-LABEL: test21:
; SKX: # %bb.0:
-; SKX-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; SKX-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; SKX-NEXT: vpsllq $63, %xmm2, %xmm2
; SKX-NEXT: vptestmq %xmm2, %xmm2, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -1169,7 +1169,7 @@ declare <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*>, i32, <2 x i1
define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: test22:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
+; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %ymm2
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm1, %xmm1
@@ -1183,7 +1183,7 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
;
; KNL_32-LABEL: test22:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
+; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %ymm2
; KNL_32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_32-NEXT: vmovaps %xmm1, %xmm1
@@ -1223,8 +1223,8 @@ define <2 x float> @test22(float* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x fl
define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: test22a:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
-; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %ymm2
+; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL_64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm1, %xmm1
; KNL_64-NEXT: vpslld $31, %ymm1, %ymm1
@@ -1236,8 +1236,8 @@ define <2 x float> @test22a(float* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x f
;
; KNL_32-LABEL: test22a:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<def>
-; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %ymm2
+; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL_32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_32-NEXT: vmovaps %xmm1, %xmm1
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1331,7 +1331,7 @@ define <2 x i32> @test23(i32* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i32> %
define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32> %src0) {
; KNL_64-LABEL: test23b:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL_64-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; KNL_64-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm1, %xmm1
@@ -1344,7 +1344,7 @@ define <2 x i32> @test23b(i32* %base, <2 x i64> %ind, <2 x i1> %mask, <2 x i32>
;
; KNL_32-LABEL: test23b:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_32-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; KNL_32-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,2],zero,zero
@@ -1430,7 +1430,7 @@ define <2 x i32> @test24(i32* %base, <2 x i32> %ind) {
define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %src0) {
; KNL_64-LABEL: test25:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_64-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_64-NEXT: vmovdqa %xmm1, %xmm1
@@ -1443,7 +1443,7 @@ define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %
;
; KNL_32-LABEL: test25:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm2<def> %xmm2<kill> %zmm2<def>
+; KNL_32-NEXT: # kill: def %xmm2 killed %xmm2 def %zmm2
; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_32-NEXT: vmovdqa %xmm1, %xmm1
@@ -1484,7 +1484,7 @@ define <2 x i64> @test25(i64* %base, <2 x i32> %ind, <2 x i1> %mask, <2 x i64> %
define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
; KNL_64-LABEL: test26:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
; KNL_64-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_64-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_64-NEXT: movb $3, %al
@@ -1496,7 +1496,7 @@ define <2 x i64> @test26(i64* %base, <2 x i32> %ind, <2 x i64> %src0) {
;
; KNL_32-LABEL: test26:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; KNL_32-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
; KNL_32-NEXT: vpsllq $32, %xmm0, %xmm0
; KNL_32-NEXT: vpsraq $32, %zmm0, %zmm0
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -1541,7 +1541,7 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; KNL_64-NEXT: movb $3, %al
; KNL_64-NEXT: kmovw %eax, %k1
; KNL_64-NEXT: vgatherqps (%rdi,%zmm1,4), %ymm0 {%k1}
-; KNL_64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL_64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL_64-NEXT: vzeroupper
; KNL_64-NEXT: retq
;
@@ -1553,7 +1553,7 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
; KNL_32-NEXT: movb $3, %cl
; KNL_32-NEXT: kmovw %ecx, %k1
; KNL_32-NEXT: vgatherqps (%eax,%zmm1,4), %ymm0 {%k1}
-; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL_32-NEXT: vzeroupper
; KNL_32-NEXT: retl
;
@@ -1583,7 +1583,7 @@ define <2 x float> @test27(float* %base, <2 x i32> %ind) {
define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
; KNL_64-LABEL: test28:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
+; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
; KNL_64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; KNL_64-NEXT: movb $3, %al
; KNL_64-NEXT: kmovw %eax, %k1
@@ -1605,7 +1605,7 @@ define void @test28(<2 x i32>%a1, <2 x i32*> %ptr) {
;
; SKX-LABEL: test28:
; SKX: # %bb.0:
-; SKX-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; SKX-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; SKX-NEXT: movb $3, %al
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@@ -2373,7 +2373,7 @@ declare void @llvm.masked.scatter.v16f64.v16p0f64(<16 x double> %src0, <16 x dou
define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i64> %d) {
; KNL_64-LABEL: test_pr28312:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL_64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL_64-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_64-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_64-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -2394,7 +2394,7 @@ define <4 x i64> @test_pr28312(<4 x i64*> %p1, <4 x i1> %k, <4 x i1> %k2,<4 x i6
; KNL_32-NEXT: .cfi_def_cfa_register %ebp
; KNL_32-NEXT: andl $-32, %esp
; KNL_32-NEXT: subl $32, %esp
-; KNL_32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL_32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL_32-NEXT: vpslld $31, %xmm1, %xmm1
; KNL_32-NEXT: vpsrad $31, %xmm1, %xmm1
; KNL_32-NEXT: vpmovsxdq %xmm1, %ymm1
@@ -2566,7 +2566,7 @@ declare <1 x i32> @llvm.masked.gather.v1i32.v1p0i32(<1 x i32*>, i32, <1 x i1>, <
define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <2 x float> %src0) {
; KNL_64-LABEL: large_index:
; KNL_64: # %bb.0:
-; KNL_64-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; KNL_64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; KNL_64-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; KNL_64-NEXT: vmovaps %xmm0, %xmm0
; KNL_64-NEXT: vmovq %rcx, %xmm2
@@ -2581,7 +2581,7 @@ define <2 x float> @large_index(float* %base, <2 x i128> %ind, <2 x i1> %mask, <
;
; KNL_32-LABEL: large_index:
; KNL_32: # %bb.0:
-; KNL_32-NEXT: # kill: %xmm1<def> %xmm1<kill> %ymm1<def>
+; KNL_32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
; KNL_32-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; KNL_32-NEXT: vmovaps %xmm0, %xmm0
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
diff --git a/test/CodeGen/X86/masked_memop.ll b/test/CodeGen/X86/masked_memop.ll
index 26f8255ce97..82f097e4e0f 100644
--- a/test/CodeGen/X86/masked_memop.ll
+++ b/test/CodeGen/X86/masked_memop.ll
@@ -316,14 +316,14 @@ define <8 x float> @test11a(<8 x i32> %trigger, <8 x float>* %addr, <8 x float>
;
; AVX512F-LABEL: test11a:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $8, %k0, %k0
; AVX512F-NEXT: kshiftrw $8, %k0, %k1
; AVX512F-NEXT: vblendmps (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11a:
@@ -362,12 +362,12 @@ define <8 x i32> @test11b(<8 x i1> %mask, <8 x i32>* %addr, <8 x i32> %dst) {
;
; AVX512F-LABEL: test11b:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512F-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
; AVX512F-NEXT: vpmovsxwq %xmm0, %zmm0
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpblendmd (%rdi), %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11b:
@@ -407,7 +407,7 @@ define <8 x float> @test11c(<8 x i1> %mask, <8 x float>* %addr) {
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11c:
@@ -447,7 +447,7 @@ define <8 x i32> @test11d(<8 x i1> %mask, <8 x i32>* %addr) {
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1} {z}
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: test11d:
@@ -482,8 +482,8 @@ define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
;
; AVX512F-LABEL: test12:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpcmpeqd %zmm2, %zmm0, %k0
; AVX512F-NEXT: kshiftlw $8, %k0, %k0
@@ -816,11 +816,11 @@ define <8 x float> @mload_constmask_v8f32(<8 x float>* %addr, <8 x float> %dst)
;
; AVX512F-LABEL: mload_constmask_v8f32:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: movw $7, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovups (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v8f32:
@@ -868,11 +868,11 @@ define <8 x i32> @mload_constmask_v8i32(<8 x i32>* %addr, <8 x i32> %dst) {
;
; AVX512F-LABEL: mload_constmask_v8i32:
; AVX512F: ## %bb.0:
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: movw $135, %ax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vmovdqu32 (%rdi), %zmm0 {%k1}
-; AVX512F-NEXT: ## kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; SKX-LABEL: mload_constmask_v8i32:
diff --git a/test/CodeGen/X86/misched-copy.ll b/test/CodeGen/X86/misched-copy.ll
index fa1d6d8801c..d43ec941b70 100644
--- a/test/CodeGen/X86/misched-copy.ll
+++ b/test/CodeGen/X86/misched-copy.ll
@@ -9,8 +9,8 @@
; MUL_HiLo PhysReg def copies should be just below the mul.
;
; CHECK: *** Final schedule for %bb.1 ***
-; CHECK: %eax<def> = COPY
-; CHECK-NEXT: MUL32r %{{[0-9]+}}, %eax<imp-def>, %edx<imp-def>, %eflags<imp-def,dead>, %eax<imp-use>;
+; CHECK: %eax = COPY
+; CHECK-NEXT: MUL32r %{{[0-9]+}}, implicit-def %eax, implicit-def %edx, implicit-def dead %eflags, implicit %eax;
; CHECK-NEXT: COPY %e{{[ad]}}x
; CHECK-NEXT: COPY %e{{[ad]}}x
; CHECK: DIVSSrm
diff --git a/test/CodeGen/X86/movmsk.ll b/test/CodeGen/X86/movmsk.ll
index b670d33b98d..d2ee19d97ff 100644
--- a/test/CodeGen/X86/movmsk.ll
+++ b/test/CodeGen/X86/movmsk.ll
@@ -102,7 +102,7 @@ define void @float_call_signbit(double %n) {
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movq %xmm0, %rdi
; CHECK-NEXT: shrq $63, %rdi
-; CHECK-NEXT: ## kill: %edi<def> %edi<kill> %rdi<kill>
+; CHECK-NEXT: ## kill: def %edi killed %edi killed %rdi
; CHECK-NEXT: jmp _float_call_signbit_callee ## TAILCALL
entry:
%t0 = bitcast double %n to i64
diff --git a/test/CodeGen/X86/mul-constant-i16.ll b/test/CodeGen/X86/mul-constant-i16.ll
index cde94c9a095..2036eae670f 100644
--- a/test/CodeGen/X86/mul-constant-i16.ll
+++ b/test/CodeGen/X86/mul-constant-i16.ll
@@ -21,14 +21,14 @@ define i16 @test_mul_by_2(i16 %x) {
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_2:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 2
ret i16 %mul
@@ -39,14 +39,14 @@ define i16 @test_mul_by_3(i16 %x) {
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_3:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 3
ret i16 %mul
@@ -57,14 +57,14 @@ define i16 @test_mul_by_4(i16 %x) {
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_4:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (,%rdi,4), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 4
ret i16 %mul
@@ -75,14 +75,14 @@ define i16 @test_mul_by_5(i16 %x) {
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_5:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 5
ret i16 %mul
@@ -94,15 +94,15 @@ define i16 @test_mul_by_6(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_6:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 6
ret i16 %mul
@@ -114,15 +114,15 @@ define i16 @test_mul_by_7(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: leal (,%ecx,8), %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_7:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (,%rdi,8), %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 7
ret i16 %mul
@@ -133,14 +133,14 @@ define i16 @test_mul_by_8(i16 %x) {
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_8:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (,%rdi,8), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 8
ret i16 %mul
@@ -151,14 +151,14 @@ define i16 @test_mul_by_9(i16 %x) {
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_9:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 9
ret i16 %mul
@@ -170,15 +170,15 @@ define i16 @test_mul_by_10(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_10:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 10
ret i16 %mul
@@ -190,15 +190,15 @@ define i16 @test_mul_by_11(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,2), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_11:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,2), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 11
ret i16 %mul
@@ -210,15 +210,15 @@ define i16 @test_mul_by_12(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_12:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: shll $2, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 12
ret i16 %mul
@@ -230,15 +230,15 @@ define i16 @test_mul_by_13(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,2), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_13:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 13
ret i16 %mul
@@ -251,16 +251,16 @@ define i16 @test_mul_by_14(i16 %x) {
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_14:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 14
ret i16 %mul
@@ -272,15 +272,15 @@ define i16 @test_mul_by_15(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_15:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 15
ret i16 %mul
@@ -291,7 +291,7 @@ define i16 @test_mul_by_16(i16 %x) {
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $4, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_16:
@@ -310,16 +310,16 @@ define i16 @test_mul_by_17(i16 %x) {
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $4, %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_17:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $4, %eax
; X64-NEXT: leal (%rax,%rdi), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 17
ret i16 %mul
@@ -331,15 +331,15 @@ define i16 @test_mul_by_18(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl %eax, %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_18:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: addl %edi, %edi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 18
ret i16 %mul
@@ -352,16 +352,16 @@ define i16 @test_mul_by_19(i16 %x) {
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_19:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: shll $2, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 19
ret i16 %mul
@@ -373,15 +373,15 @@ define i16 @test_mul_by_20(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $2, %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_20:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: shll $2, %edi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 20
ret i16 %mul
@@ -393,15 +393,15 @@ define i16 @test_mul_by_21(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %ecx
; X86-NEXT: leal (%eax,%ecx,4), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_21:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 21
ret i16 %mul
@@ -414,16 +414,16 @@ define i16 @test_mul_by_22(i16 %x) {
; X86-NEXT: leal (%ecx,%ecx,4), %eax
; X86-NEXT: leal (%ecx,%eax,4), %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_22:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rdi,%rax,4), %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 22
ret i16 %mul
@@ -436,16 +436,16 @@ define i16 @test_mul_by_23(i16 %x) {
; X86-NEXT: leal (%ecx,%ecx,2), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_23:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
; X64-NEXT: shll $3, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 23
ret i16 %mul
@@ -457,15 +457,15 @@ define i16 @test_mul_by_24(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $3, %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_24:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: shll $3, %edi
; X64-NEXT: leal (%rdi,%rdi,2), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 24
ret i16 %mul
@@ -477,15 +477,15 @@ define i16 @test_mul_by_25(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
; X86-NEXT: leal (%eax,%eax,4), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_25:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,4), %eax
; X64-NEXT: leal (%rax,%rax,4), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 25
ret i16 %mul
@@ -498,16 +498,16 @@ define i16 @test_mul_by_26(i16 %x) {
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_26:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 26
ret i16 %mul
@@ -519,15 +519,15 @@ define i16 @test_mul_by_27(i16 %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: leal (%eax,%eax,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_27:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 27
ret i16 %mul
@@ -540,16 +540,16 @@ define i16 @test_mul_by_28(i16 %x) {
; X86-NEXT: leal (%ecx,%ecx,8), %eax
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_28:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 28
ret i16 %mul
@@ -563,17 +563,17 @@ define i16 @test_mul_by_29(i16 %x) {
; X86-NEXT: leal (%eax,%eax,2), %eax
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: addl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_29:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rdi,8), %eax
; X64-NEXT: leal (%rax,%rax,2), %eax
; X64-NEXT: addl %edi, %eax
; X64-NEXT: addl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 29
ret i16 %mul
@@ -587,7 +587,7 @@ define i16 @test_mul_by_30(i16 %x) {
; X86-NEXT: shll $5, %eax
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_30:
@@ -596,7 +596,7 @@ define i16 @test_mul_by_30(i16 %x) {
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 30
ret i16 %mul
@@ -609,7 +609,7 @@ define i16 @test_mul_by_31(i16 %x) {
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: shll $5, %eax
; X86-NEXT: subl %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_31:
@@ -617,7 +617,7 @@ define i16 @test_mul_by_31(i16 %x) {
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $5, %eax
; X64-NEXT: subl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 31
ret i16 %mul
@@ -628,7 +628,7 @@ define i16 @test_mul_by_32(i16 %x) {
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: shll $5, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_by_32:
@@ -648,16 +648,16 @@ define i16 @test_mul_spec(i16 %x) nounwind {
; X86-NEXT: leal 42(%eax,%eax,8), %ecx
; X86-NEXT: leal 2(%eax,%eax,4), %eax
; X86-NEXT: imull %ecx, %eax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: test_mul_spec:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal 42(%rdi,%rdi,8), %ecx
; X64-NEXT: leal 2(%rdi,%rdi,4), %eax
; X64-NEXT: imull %ecx, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 9
%add = add nsw i16 %mul, 42
diff --git a/test/CodeGen/X86/mul-constant-i32.ll b/test/CodeGen/X86/mul-constant-i32.ll
index f862b175351..a09167d1229 100644
--- a/test/CodeGen/X86/mul-constant-i32.ll
+++ b/test/CodeGen/X86/mul-constant-i32.ll
@@ -61,13 +61,13 @@ define i32 @test_mul_by_2(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_2:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_2:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -79,25 +79,25 @@ define i32 @test_mul_by_2(i32 %x) {
;
; HSW-NOOPT-LABEL: test_mul_by_2:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_2:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; JAG-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_2:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_2:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NOOPT-NEXT: leal (%rdi,%rdi), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 2
@@ -112,13 +112,13 @@ define i32 @test_mul_by_3(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_3:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_3:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -129,25 +129,25 @@ define i32 @test_mul_by_3(i32 %x) {
;
; HSW-NOOPT-LABEL: test_mul_by_3:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_3:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_3:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_3:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 3
@@ -163,13 +163,13 @@ define i32 @test_mul_by_4(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_4:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_4:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -181,25 +181,25 @@ define i32 @test_mul_by_4(i32 %x) {
;
; HSW-NOOPT-LABEL: test_mul_by_4:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; HSW-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_4:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; JAG-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_4:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_4:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NOOPT-NEXT: leal (,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 4
@@ -214,13 +214,13 @@ define i32 @test_mul_by_5(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_5:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_5:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -231,25 +231,25 @@ define i32 @test_mul_by_5(i32 %x) {
;
; HSW-NOOPT-LABEL: test_mul_by_5:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_5:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_5:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_5:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 5
@@ -266,14 +266,14 @@ define i32 @test_mul_by_6(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_6:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_6:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -295,7 +295,7 @@ define i32 @test_mul_by_6(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_6:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -318,14 +318,14 @@ define i32 @test_mul_by_7(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_7:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_7:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -347,7 +347,7 @@ define i32 @test_mul_by_7(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_7:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: subl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -369,13 +369,13 @@ define i32 @test_mul_by_8(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_8:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_8:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -387,25 +387,25 @@ define i32 @test_mul_by_8(i32 %x) {
;
; HSW-NOOPT-LABEL: test_mul_by_8:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; HSW-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_8:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; JAG-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_8:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_8:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NOOPT-NEXT: leal (,%rdi,8), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 8
@@ -420,13 +420,13 @@ define i32 @test_mul_by_9(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_9:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_9:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
;
@@ -437,25 +437,25 @@ define i32 @test_mul_by_9(i32 %x) {
;
; HSW-NOOPT-LABEL: test_mul_by_9:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; HSW-NOOPT-NEXT: retq # sched: [2:1.00]
;
; JAG-NOOPT-LABEL: test_mul_by_9:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; JAG-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: retq # sched: [4:1.00]
;
; X64-SLM-LABEL: test_mul_by_9:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
;
; SLM-NOOPT-LABEL: test_mul_by_9:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NOOPT-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: retq # sched: [4:1.00]
%mul = mul nsw i32 %x, 9
@@ -472,14 +472,14 @@ define i32 @test_mul_by_10(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_10:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_10:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -501,7 +501,7 @@ define i32 @test_mul_by_10(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_10:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -524,14 +524,14 @@ define i32 @test_mul_by_11(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_11:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_11:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -574,14 +574,14 @@ define i32 @test_mul_by_12(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_12:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_12:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -603,7 +603,7 @@ define i32 @test_mul_by_12(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_12:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -626,14 +626,14 @@ define i32 @test_mul_by_13(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_13:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_13:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -677,7 +677,7 @@ define i32 @test_mul_by_14(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_14:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
@@ -685,7 +685,7 @@ define i32 @test_mul_by_14(i32 %x) {
;
; X64-JAG-LABEL: test_mul_by_14:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
@@ -729,14 +729,14 @@ define i32 @test_mul_by_15(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_15:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_15:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -758,7 +758,7 @@ define i32 @test_mul_by_15(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_15:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -834,7 +834,7 @@ define i32 @test_mul_by_17(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_17:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: movl %edi, %eax # sched: [1:0.25]
; X64-HSW-NEXT: shll $4, %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rdi), %eax # sched: [1:0.50]
@@ -842,7 +842,7 @@ define i32 @test_mul_by_17(i32 %x) {
;
; X64-JAG-LABEL: test_mul_by_17:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: movl %edi, %eax # sched: [1:0.17]
; X64-JAG-NEXT: shll $4, %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rdi), %eax # sched: [1:0.50]
@@ -865,7 +865,7 @@ define i32 @test_mul_by_17(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_17:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: movl %edi, %eax # sched: [1:0.50]
; X64-SLM-NEXT: shll $4, %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rdi), %eax # sched: [1:1.00]
@@ -889,14 +889,14 @@ define i32 @test_mul_by_18(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_18:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: addl %edi, %edi # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_18:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -918,7 +918,7 @@ define i32 @test_mul_by_18(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_18:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: addl %edi, %edi # sched: [1:0.50]
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -942,7 +942,7 @@ define i32 @test_mul_by_19(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_19:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: shll $2, %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
@@ -950,7 +950,7 @@ define i32 @test_mul_by_19(i32 %x) {
;
; X64-JAG-LABEL: test_mul_by_19:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: shll $2, %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
@@ -994,14 +994,14 @@ define i32 @test_mul_by_20(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_20:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_20:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: shll $2, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1023,7 +1023,7 @@ define i32 @test_mul_by_20(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_20:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: shll $2, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -1046,14 +1046,14 @@ define i32 @test_mul_by_21(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_21:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_21:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1097,7 +1097,7 @@ define i32 @test_mul_by_22(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_22:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
@@ -1105,7 +1105,7 @@ define i32 @test_mul_by_22(i32 %x) {
;
; X64-JAG-LABEL: test_mul_by_22:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
@@ -1150,7 +1150,7 @@ define i32 @test_mul_by_23(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_23:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: shll $3, %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
@@ -1158,7 +1158,7 @@ define i32 @test_mul_by_23(i32 %x) {
;
; X64-JAG-LABEL: test_mul_by_23:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: shll $3, %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
@@ -1202,14 +1202,14 @@ define i32 @test_mul_by_24(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_24:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: shll $3, %edi # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_24:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: shll $3, %edi # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1231,7 +1231,7 @@ define i32 @test_mul_by_24(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_24:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: shll $3, %edi # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rdi,%rdi,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -1254,14 +1254,14 @@ define i32 @test_mul_by_25(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_25:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_25:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1283,7 +1283,7 @@ define i32 @test_mul_by_25(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_25:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -1307,7 +1307,7 @@ define i32 @test_mul_by_26(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_26:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: subl %edi, %eax # sched: [1:0.25]
@@ -1315,7 +1315,7 @@ define i32 @test_mul_by_26(i32 %x) {
;
; X64-JAG-LABEL: test_mul_by_26:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: subl %edi, %eax # sched: [1:0.50]
@@ -1359,14 +1359,14 @@ define i32 @test_mul_by_27(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_27:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: retq # sched: [2:1.00]
;
; X64-JAG-LABEL: test_mul_by_27:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: retq # sched: [4:1.00]
@@ -1388,7 +1388,7 @@ define i32 @test_mul_by_27(i32 %x) {
;
; X64-SLM-LABEL: test_mul_by_27:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00]
; X64-SLM-NEXT: leal (%rax,%rax,2), %eax # sched: [1:1.00]
; X64-SLM-NEXT: retq # sched: [4:1.00]
@@ -1412,7 +1412,7 @@ define i32 @test_mul_by_28(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_28:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
@@ -1420,7 +1420,7 @@ define i32 @test_mul_by_28(i32 %x) {
;
; X64-JAG-LABEL: test_mul_by_28:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
@@ -1466,7 +1466,7 @@ define i32 @test_mul_by_29(i32 %x) {
;
; X64-HSW-LABEL: test_mul_by_29:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-HSW-NEXT: addl %edi, %eax # sched: [1:0.25]
@@ -1475,7 +1475,7 @@ define i32 @test_mul_by_29(i32 %x) {
;
; X64-JAG-LABEL: test_mul_by_29:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
; X64-JAG-NEXT: leal (%rax,%rax,2), %eax # sched: [1:0.50]
; X64-JAG-NEXT: addl %edi, %eax # sched: [1:0.50]
@@ -1681,7 +1681,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
;
; X64-HSW-LABEL: test_mul_spec:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50]
; X64-HSW-NEXT: addl $42, %ecx # sched: [1:0.25]
; X64-HSW-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
@@ -1691,7 +1691,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
;
; X64-JAG-LABEL: test_mul_spec:
; X64-JAG: # %bb.0:
-; X64-JAG-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-JAG-NEXT: # kill: def %edi killed %edi def %rdi
; X64-JAG-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50]
; X64-JAG-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50]
; X64-JAG-NEXT: imull %ecx, %eax # sched: [3:1.00]
@@ -1707,7 +1707,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
;
; HSW-NOOPT-LABEL: test_mul_spec:
; HSW-NOOPT: # %bb.0:
-; HSW-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; HSW-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,8), %ecx # sched: [1:0.50]
; HSW-NOOPT-NEXT: addl $42, %ecx # sched: [1:0.25]
; HSW-NOOPT-NEXT: leal (%rdi,%rdi,4), %eax # sched: [1:0.50]
@@ -1717,7 +1717,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
;
; JAG-NOOPT-LABEL: test_mul_spec:
; JAG-NOOPT: # %bb.0:
-; JAG-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; JAG-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; JAG-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:0.50]
; JAG-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:0.50]
; JAG-NOOPT-NEXT: imull %ecx, %eax # sched: [3:1.00]
@@ -1725,7 +1725,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
;
; X64-SLM-LABEL: test_mul_spec:
; X64-SLM: # %bb.0:
-; X64-SLM-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SLM-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SLM-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00]
; X64-SLM-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00]
; X64-SLM-NEXT: imull %ecx, %eax # sched: [3:1.00]
@@ -1733,7 +1733,7 @@ define i32 @test_mul_spec(i32 %x) nounwind {
;
; SLM-NOOPT-LABEL: test_mul_spec:
; SLM-NOOPT: # %bb.0:
-; SLM-NOOPT-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SLM-NOOPT-NEXT: # kill: def %edi killed %edi def %rdi
; SLM-NOOPT-NEXT: leal 42(%rdi,%rdi,8), %ecx # sched: [1:1.00]
; SLM-NOOPT-NEXT: leal 2(%rdi,%rdi,4), %eax # sched: [1:1.00]
; SLM-NOOPT-NEXT: imull %ecx, %eax # sched: [3:1.00]
diff --git a/test/CodeGen/X86/mul-constant-result.ll b/test/CodeGen/X86/mul-constant-result.ll
index 0e7b877d431..bec0ed990dc 100644
--- a/test/CodeGen/X86/mul-constant-result.ll
+++ b/test/CodeGen/X86/mul-constant-result.ll
@@ -188,7 +188,7 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
;
; X64-HSW-LABEL: mult:
; X64-HSW: # %bb.0:
-; X64-HSW-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-HSW-NEXT: # kill: def %edi killed %edi def %rdi
; X64-HSW-NEXT: cmpl $1, %esi
; X64-HSW-NEXT: movl $1, %ecx
; X64-HSW-NEXT: movl %esi, %eax
@@ -202,60 +202,60 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: jmpq *.LJTI0_0(,%rdi,8)
; X64-HSW-NEXT: .LBB0_2:
; X64-HSW-NEXT: addl %eax, %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_36:
; X64-HSW-NEXT: xorl %eax, %eax
; X64-HSW-NEXT: .LBB0_37:
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_3:
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_4:
; X64-HSW-NEXT: shll $2, %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_5:
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_6:
; X64-HSW-NEXT: addl %eax, %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_7:
; X64-HSW-NEXT: leal (,%rax,8), %ecx
; X64-HSW-NEXT: jmp .LBB0_8
; X64-HSW-NEXT: .LBB0_9:
; X64-HSW-NEXT: shll $3, %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_10:
; X64-HSW-NEXT: leal (%rax,%rax,8), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_11:
; X64-HSW-NEXT: addl %eax, %eax
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_12:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
; X64-HSW-NEXT: leal (%rax,%rcx,2), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_13:
; X64-HSW-NEXT: shll $2, %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_14:
; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx
; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_15:
; X64-HSW-NEXT: leal (%rax,%rax,2), %ecx
@@ -263,11 +263,11 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: .LBB0_18:
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_19:
; X64-HSW-NEXT: shll $4, %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_20:
; X64-HSW-NEXT: movl %eax, %ecx
@@ -276,7 +276,7 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: .LBB0_21:
; X64-HSW-NEXT: addl %eax, %eax
; X64-HSW-NEXT: leal (%rax,%rax,8), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_22:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
@@ -285,12 +285,12 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: .LBB0_23:
; X64-HSW-NEXT: shll $2, %eax
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_24:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
; X64-HSW-NEXT: leal (%rax,%rcx,4), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_25:
; X64-HSW-NEXT: leal (%rax,%rax,4), %ecx
@@ -304,12 +304,12 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: .LBB0_27:
; X64-HSW-NEXT: shll $3, %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_28:
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
; X64-HSW-NEXT: leal (%rax,%rax,4), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_29:
; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
@@ -318,7 +318,7 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: .LBB0_30:
; X64-HSW-NEXT: leal (%rax,%rax,8), %eax
; X64-HSW-NEXT: leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_31:
; X64-HSW-NEXT: leal (%rax,%rax,8), %ecx
@@ -331,7 +331,7 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: .LBB0_17:
; X64-HSW-NEXT: addl %eax, %ecx
; X64-HSW-NEXT: movl %ecx, %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_33:
; X64-HSW-NEXT: movl %eax, %ecx
@@ -344,11 +344,11 @@ define i32 @mult(i32, i32) local_unnamed_addr #0 {
; X64-HSW-NEXT: .LBB0_8:
; X64-HSW-NEXT: subl %eax, %ecx
; X64-HSW-NEXT: movl %ecx, %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
; X64-HSW-NEXT: .LBB0_35:
; X64-HSW-NEXT: shll $5, %eax
-; X64-HSW-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; X64-HSW-NEXT: # kill: def %eax killed %eax killed %rax
; X64-HSW-NEXT: retq
%3 = icmp eq i32 %1, 0
%4 = icmp sgt i32 %1, 1
diff --git a/test/CodeGen/X86/negate-i1.ll b/test/CodeGen/X86/negate-i1.ll
index 2c1b0bba9dc..c9ca52b9275 100644
--- a/test/CodeGen/X86/negate-i1.ll
+++ b/test/CodeGen/X86/negate-i1.ll
@@ -49,7 +49,7 @@ define i16 @select_i16_neg1_or_0(i1 %a) {
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: negl %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
%b = sext i1 %a to i16
ret i16 %b
@@ -66,7 +66,7 @@ define i16 @select_i16_neg1_or_0_zeroext(i1 zeroext %a) {
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-NEXT: negl %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
%b = sext i1 %a to i16
ret i16 %b
@@ -109,7 +109,7 @@ define i32 @select_i32_neg1_or_0_zeroext(i1 zeroext %a) {
define i64 @select_i64_neg1_or_0(i1 %a) {
; X64-LABEL: select_i64_neg1_or_0:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: andl $1, %edi
; X64-NEXT: negq %rdi
; X64-NEXT: movq %rdi, %rax
diff --git a/test/CodeGen/X86/norex-subreg.ll b/test/CodeGen/X86/norex-subreg.ll
index 66e5ca1e30c..205fb4e0011 100644
--- a/test/CodeGen/X86/norex-subreg.ll
+++ b/test/CodeGen/X86/norex-subreg.ll
@@ -4,8 +4,8 @@ target triple = "x86_64-apple-macosx10.7"
; This test case extracts a sub_8bit_hi sub-register:
;
-; %r8b<def> = COPY %bh, %ebx<imp-use,kill>
-; %esi<def> = MOVZX32_NOREXrr8 %r8b<kill>
+; %r8b = COPY %bh, implicit killed %ebx
+; %esi = MOVZX32_NOREXrr8 killed %r8b
;
; The register allocation above is invalid, %bh can only be encoded without an
; REX prefix, so the destination register must be GR8_NOREX. The code above
@@ -41,8 +41,8 @@ entry:
; This test case extracts a sub_8bit_hi sub-register:
;
-; %2<def> = COPY %1:sub_8bit_hi; GR8:%2 GR64_ABCD:%1
-; TEST8ri %2, 1, %eflags<imp-def>; GR8:%2
+; %2 = COPY %1:sub_8bit_hi; GR8:%2 GR64_ABCD:%1
+; TEST8ri %2, 1, implicit-def %eflags; GR8:%2
;
; %2 must be constrained to GR8_NOREX, or the COPY could become impossible.
;
diff --git a/test/CodeGen/X86/oddshuffles.ll b/test/CodeGen/X86/oddshuffles.ll
index d06d824f3d4..df97973aecb 100644
--- a/test/CodeGen/X86/oddshuffles.ll
+++ b/test/CodeGen/X86/oddshuffles.ll
@@ -30,7 +30,7 @@ define void @v3i64(<2 x i64> %a, <2 x i64> %b, <3 x i64>* %p) nounwind {
;
; AVX2-LABEL: v3i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
; AVX2-NEXT: vpextrq $1, %xmm0, 16(%rdi)
@@ -65,7 +65,7 @@ define void @v3f64(<2 x double> %a, <2 x double> %b, <3 x double>* %p) nounwind
;
; AVX2-LABEL: v3f64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,3]
; AVX2-NEXT: vmovhpd %xmm0, 16(%rdi)
@@ -205,7 +205,7 @@ define void @v5i32(<4 x i32> %a, <4 x i32> %b, <5 x i32>* %p) nounwind {
;
; AVX2-LABEL: v5i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u>
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
@@ -255,7 +255,7 @@ define void @v5f32(<4 x float> %a, <4 x float> %b, <5 x float>* %p) nounwind {
;
; AVX2-LABEL: v5f32:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,5,1,6,3,u,u,u>
; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1
@@ -421,7 +421,7 @@ define void @v7i32(<4 x i32> %a, <4 x i32> %b, <7 x i32>* %p) nounwind {
;
; AVX2-LABEL: v7i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vmovaps {{.*#+}} ymm2 = <0,6,3,6,1,7,4,u>
; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0
@@ -1697,7 +1697,7 @@ define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; AVX1-NEXT: vmovaps %ymm1, 32(%rdi)
; AVX1-NEXT: vmovaps %ymm1, (%rdi)
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1716,7 +1716,7 @@ define <2 x double> @wrongorder(<4 x double> %A, <8 x double>* %P) #0 {
; XOP-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; XOP-NEXT: vmovaps %ymm1, 32(%rdi)
; XOP-NEXT: vmovaps %ymm1, (%rdi)
-; XOP-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; XOP-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%shuffle = shufflevector <4 x double> %A, <4 x double> %A, <8 x i32> zeroinitializer
diff --git a/test/CodeGen/X86/or-lea.ll b/test/CodeGen/X86/or-lea.ll
index ee1f6585eb7..9447ceb3c4f 100644
--- a/test/CodeGen/X86/or-lea.ll
+++ b/test/CodeGen/X86/or-lea.ll
@@ -9,8 +9,8 @@
define i32 @or_shift1_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
; CHECK-NEXT: retq
@@ -24,8 +24,8 @@ define i32 @or_shift1_and1(i32 %x, i32 %y) {
define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift1_and1_swapped:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,2), %eax
; CHECK-NEXT: retq
@@ -39,8 +39,8 @@ define i32 @or_shift1_and1_swapped(i32 %x, i32 %y) {
define i32 @or_shift2_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift2_and1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,4), %eax
; CHECK-NEXT: retq
@@ -54,8 +54,8 @@ define i32 @or_shift2_and1(i32 %x, i32 %y) {
define i32 @or_shift3_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
; CHECK-NEXT: retq
@@ -69,8 +69,8 @@ define i32 @or_shift3_and1(i32 %x, i32 %y) {
define i32 @or_shift3_and7(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and7:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: andl $7, %esi
; CHECK-NEXT: leal (%rsi,%rdi,8), %eax
; CHECK-NEXT: retq
@@ -86,8 +86,8 @@ define i32 @or_shift3_and7(i32 %x, i32 %y) {
define i32 @or_shift4_and1(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift4_and1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: shll $4, %edi
; CHECK-NEXT: andl $1, %esi
; CHECK-NEXT: leal (%rsi,%rdi), %eax
@@ -104,7 +104,7 @@ define i32 @or_shift4_and1(i32 %x, i32 %y) {
define i32 @or_shift3_and8(i32 %x, i32 %y) {
; CHECK-LABEL: or_shift3_and8:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal (,%rdi,8), %eax
; CHECK-NEXT: andl $8, %esi
; CHECK-NEXT: orl %esi, %eax
diff --git a/test/CodeGen/X86/phys_subreg_coalesce-3.ll b/test/CodeGen/X86/phys_subreg_coalesce-3.ll
index 7839936c40a..50700968321 100644
--- a/test/CodeGen/X86/phys_subreg_coalesce-3.ll
+++ b/test/CodeGen/X86/phys_subreg_coalesce-3.ll
@@ -2,9 +2,9 @@
; rdar://5571034
; This requires physreg joining, %13 is live everywhere:
-; 304L %cl<def> = COPY %13:sub_8bit; GR32_ABCD:%13
-; 320L %15<def> = COPY %19; GR32:%15 GR32_NOSP:%19
-; 336L %15<def> = SAR32rCL %15, %eflags<imp-def,dead>, %cl<imp-use,kill>; GR32:%15
+; 304L %cl = COPY %13:sub_8bit; GR32_ABCD:%13
+; 320L %15 = COPY %19; GR32:%15 GR32_NOSP:%19
+; 336L %15 = SAR32rCL %15, implicit dead %eflags, implicit killed %cl; GR32:%15
define void @foo(i32* nocapture %quadrant, i32* nocapture %ptr, i32 %bbSize, i32 %bbStart, i32 %shifts) nounwind ssp {
; CHECK-LABEL: foo:
diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll
index 76b5b508711..f7d236ef805 100644
--- a/test/CodeGen/X86/pmul.ll
+++ b/test/CodeGen/X86/pmul.ll
@@ -63,7 +63,7 @@ define <16 x i8> @mul_v16i8c(<16 x i8> %i) nounwind {
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
@@ -206,7 +206,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind {
; AVX512BW-NEXT: vpmovsxbw %xmm0, %ymm0
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
entry:
diff --git a/test/CodeGen/X86/popcnt-schedule.ll b/test/CodeGen/X86/popcnt-schedule.ll
index b8a75dc6b2b..704d4ac5ccb 100644
--- a/test/CodeGen/X86/popcnt-schedule.ll
+++ b/test/CodeGen/X86/popcnt-schedule.ll
@@ -17,7 +17,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; GENERIC-NEXT: popcntw (%rsi), %cx # sched: [9:1.00]
; GENERIC-NEXT: popcntw %di, %ax # sched: [3:1.00]
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SLM-LABEL: test_ctpop_i16:
@@ -25,7 +25,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; SLM-NEXT: popcntw (%rsi), %cx # sched: [6:1.00]
; SLM-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; SLM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SLM-NEXT: # kill: def %ax killed %ax killed %eax
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_ctpop_i16:
@@ -33,7 +33,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; SANDY-NEXT: popcntw (%rsi), %cx # sched: [9:1.00]
; SANDY-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SANDY-NEXT: # kill: def %ax killed %ax killed %eax
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_ctpop_i16:
@@ -41,7 +41,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; HASWELL-NEXT: popcntw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: popcntw %di, %ax # sched: [3:1.00]
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_ctpop_i16:
@@ -49,7 +49,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; BROADWELL-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: popcntw %di, %ax # sched: [3:1.00]
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_ctpop_i16:
@@ -57,7 +57,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; SKYLAKE-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: popcntw %di, %ax # sched: [3:1.00]
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_ctpop_i16:
@@ -65,7 +65,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; BTVER2-NEXT: popcntw (%rsi), %cx # sched: [8:1.00]
; BTVER2-NEXT: popcntw %di, %ax # sched: [3:1.00]
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_ctpop_i16:
@@ -73,7 +73,7 @@ define i16 @test_ctpop_i16(i16 zeroext %a0, i16 *%a1) {
; ZNVER1-NEXT: popcntw (%rsi), %cx # sched: [10:1.00]
; ZNVER1-NEXT: popcntw %di, %ax # sched: [3:1.00]
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = load i16, i16 *%a1
%2 = tail call i16 @llvm.ctpop.i16( i16 %1 )
diff --git a/test/CodeGen/X86/popcnt.ll b/test/CodeGen/X86/popcnt.ll
index 8f078fdcf0d..d7622c8d0ca 100644
--- a/test/CodeGen/X86/popcnt.ll
+++ b/test/CodeGen/X86/popcnt.ll
@@ -44,14 +44,14 @@ define i8 @cnt8(i8 %x) nounwind readnone {
; X32-POPCNT: # %bb.0:
; X32-POPCNT-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X32-POPCNT-NEXT: popcntl %eax, %eax
-; X32-POPCNT-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X32-POPCNT-NEXT: # kill: def %al killed %al killed %eax
; X32-POPCNT-NEXT: retl
;
; X64-POPCNT-LABEL: cnt8:
; X64-POPCNT: # %bb.0:
; X64-POPCNT-NEXT: movzbl %dil, %eax
; X64-POPCNT-NEXT: popcntl %eax, %eax
-; X64-POPCNT-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-POPCNT-NEXT: # kill: def %al killed %al killed %eax
; X64-POPCNT-NEXT: retq
%cnt = tail call i8 @llvm.ctpop.i8(i8 %x)
ret i8 %cnt
@@ -79,7 +79,7 @@ define i16 @cnt16(i16 %x) nounwind readnone {
; X32-NEXT: shll $8, %eax
; X32-NEXT: addl %ecx, %eax
; X32-NEXT: movzbl %ah, %eax
-; X32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: # kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: cnt16:
@@ -102,7 +102,7 @@ define i16 @cnt16(i16 %x) nounwind readnone {
; X64-NEXT: shll $8, %ecx
; X64-NEXT: addl %eax, %ecx
; X64-NEXT: movzbl %ch, %eax # NOREX
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
;
; X32-POPCNT-LABEL: cnt16:
diff --git a/test/CodeGen/X86/pr22970.ll b/test/CodeGen/X86/pr22970.ll
index cd0d1f80fbb..4daa8d926ec 100644
--- a/test/CodeGen/X86/pr22970.ll
+++ b/test/CodeGen/X86/pr22970.ll
@@ -13,7 +13,7 @@ define i32 @PR22970_i32(i32* nocapture readonly, i32) {
;
; X64-LABEL: PR22970_i32:
; X64: # %bb.0:
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
; X64-NEXT: andl $4095, %esi # imm = 0xFFF
; X64-NEXT: movl 32(%rdi,%rsi,4), %eax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/pr28173.ll b/test/CodeGen/X86/pr28173.ll
index ca455a129d3..f181217910f 100644
--- a/test/CodeGen/X86/pr28173.ll
+++ b/test/CodeGen/X86/pr28173.ll
@@ -27,7 +27,7 @@ define i16 @foo16(i1 zeroext %i) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $65534, %eax # imm = 0xFFFE
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
br label %bb
@@ -45,7 +45,7 @@ define i16 @foo16_1(i1 zeroext %i, i32 %j) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: orl $2, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
br label %bb
diff --git a/test/CodeGen/X86/pr28560.ll b/test/CodeGen/X86/pr28560.ll
index bb95a59db82..d9da9ac9e88 100644
--- a/test/CodeGen/X86/pr28560.ll
+++ b/test/CodeGen/X86/pr28560.ll
@@ -1,6 +1,6 @@
; RUN: llc -mtriple=i686-pc-linux -print-after=postrapseudos < %s 2>&1 | FileCheck %s
-; CHECK: MOV8rr %{{[a-d]}}l, %e[[R:[a-d]]]x<imp-use,kill>, %e[[R]]x<imp-def>
+; CHECK: MOV8rr %{{[a-d]}}l, implicit killed %e[[R:[a-d]]]x, implicit-def %e[[R]]x
define i32 @foo(i32 %i, i32 %k, i8* %p) {
%f = icmp ne i32 %i, %k
%s = zext i1 %f to i8
diff --git a/test/CodeGen/X86/pr29061.ll b/test/CodeGen/X86/pr29061.ll
index 93da35d60bf..9c29429af7d 100644
--- a/test/CodeGen/X86/pr29061.ll
+++ b/test/CodeGen/X86/pr29061.ll
@@ -11,7 +11,7 @@ define void @t1(i8 signext %c) {
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %edi, -8
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %edi
-; CHECK-NEXT: # kill: %di<def> %di<kill> %edi<kill>
+; CHECK-NEXT: # kill: def %di killed %di killed %edi
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: popl %edi
@@ -28,7 +28,7 @@ define void @t2(i8 signext %c) {
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: .cfi_offset %esi, -8
; CHECK-NEXT: movzbl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT: # kill: %si<def> %si<kill> %esi<kill>
+; CHECK-NEXT: # kill: def %si killed %si killed %esi
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: popl %esi
diff --git a/test/CodeGen/X86/pr32282.ll b/test/CodeGen/X86/pr32282.ll
index 78dcb168e95..1c4d48db711 100644
--- a/test/CodeGen/X86/pr32282.ll
+++ b/test/CodeGen/X86/pr32282.ll
@@ -64,7 +64,7 @@ define void @foo() {
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: divl %ecx
-; X64-NEXT: # kill: %eax<def> %eax<kill> %rax<def>
+; X64-NEXT: # kill: def %eax killed %eax def %rax
; X64-NEXT: .LBB0_3:
; X64-NEXT: testq %rax, %rax
; X64-NEXT: setne -{{[0-9]+}}(%rsp)
diff --git a/test/CodeGen/X86/pr32329.ll b/test/CodeGen/X86/pr32329.ll
index bc7fe8c0047..f6c3b5cf799 100644
--- a/test/CodeGen/X86/pr32329.ll
+++ b/test/CodeGen/X86/pr32329.ll
@@ -78,7 +78,7 @@ define void @foo() local_unnamed_addr {
; X64-NEXT: imull %esi, %ecx
; X64-NEXT: addl $-1437483407, %ecx # imm = 0xAA51BE71
; X64-NEXT: movl $9, %edx
-; X64-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X64-NEXT: # kill: def %cl killed %cl killed %ecx
; X64-NEXT: shlq %cl, %rdx
; X64-NEXT: movq %rdx, {{.*}}(%rip)
; X64-NEXT: cmpl %eax, %esi
diff --git a/test/CodeGen/X86/pr32345.ll b/test/CodeGen/X86/pr32345.ll
index 78f78341839..99666c994a3 100644
--- a/test/CodeGen/X86/pr32345.ll
+++ b/test/CodeGen/X86/pr32345.ll
@@ -27,8 +27,8 @@ define void @foo() {
; X640-NEXT: movzwl var_27, %ecx
; X640-NEXT: subl $16610, %ecx # imm = 0x40E2
; X640-NEXT: movl %ecx, %ecx
-; X640-NEXT: # kill: %rcx<def> %ecx<kill>
-; X640-NEXT: # kill: %cl<def> %rcx<kill>
+; X640-NEXT: # kill: def %rcx killed %ecx
+; X640-NEXT: # kill: def %cl killed %rcx
; X640-NEXT: sarq %cl, %rsi
; X640-NEXT: movb %sil, %cl
; X640-NEXT: movb %cl, (%rax)
@@ -104,7 +104,7 @@ define void @foo() {
; X64-NEXT: movzwl %ax, %eax
; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-NEXT: addl $-16610, %ecx # imm = 0xBF1E
-; X64-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X64-NEXT: # kill: def %cl killed %cl killed %ecx
; X64-NEXT: shrq %cl, %rax
; X64-NEXT: movb %al, (%rax)
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/pr34653.ll b/test/CodeGen/X86/pr34653.ll
index d888c566c7f..990cd9ac8b2 100644
--- a/test/CodeGen/X86/pr34653.ll
+++ b/test/CodeGen/X86/pr34653.ll
@@ -64,7 +64,7 @@ define void @pr34653() {
; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
; CHECK-NEXT: vpermilpd {{.*#+}} xmm11 = xmm11[1,0]
; CHECK-NEXT: vpermilpd {{.*#+}} xmm13 = xmm13[1,0]
-; CHECK-NEXT: # kill: %ymm10<def> %ymm10<kill> %zmm10<kill>
+; CHECK-NEXT: # kill: def %ymm10 killed %ymm10 killed %zmm10
; CHECK-NEXT: vextractf128 $1, %ymm10, %xmm10
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm10, %xmm0
@@ -75,7 +75,7 @@ define void @pr34653() {
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT: # kill: %ymm9<def> %ymm9<kill> %zmm9<kill>
+; CHECK-NEXT: # kill: def %ymm9 killed %ymm9 killed %zmm9
; CHECK-NEXT: vextractf128 $1, %ymm9, %xmm9
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm9, %xmm0
@@ -88,7 +88,7 @@ define void @pr34653() {
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT: # kill: %ymm8<def> %ymm8<kill> %zmm8<kill>
+; CHECK-NEXT: # kill: def %ymm8 killed %ymm8 killed %zmm8
; CHECK-NEXT: vextractf128 $1, %ymm8, %xmm8
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm8, %xmm0
@@ -101,7 +101,7 @@ define void @pr34653() {
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; CHECK-NEXT: # kill: %ymm7<def> %ymm7<kill> %zmm7<kill>
+; CHECK-NEXT: # kill: def %ymm7 killed %ymm7 killed %zmm7
; CHECK-NEXT: vextractf128 $1, %ymm7, %xmm7
; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill
; CHECK-NEXT: vmovaps %xmm7, %xmm0
diff --git a/test/CodeGen/X86/promote-vec3.ll b/test/CodeGen/X86/promote-vec3.ll
index 5c6eb70b3ef..085e14ecb3b 100644
--- a/test/CodeGen/X86/promote-vec3.ll
+++ b/test/CodeGen/X86/promote-vec3.ll
@@ -19,9 +19,9 @@ define <3 x i16> @zext_i8(<3 x i8>) {
; SSE3-NEXT: pextrw $2, %xmm0, %ecx
; SSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE3-NEXT: movd %xmm0, %eax
-; SSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
-; SSE3-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
-; SSE3-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
+; SSE3-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE3-NEXT: # kill: def %dx killed %dx killed %edx
+; SSE3-NEXT: # kill: def %cx killed %cx killed %ecx
; SSE3-NEXT: retl
;
; SSE41-LABEL: zext_i8:
@@ -33,9 +33,9 @@ define <3 x i16> @zext_i8(<3 x i8>) {
; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: pextrw $2, %xmm0, %edx
; SSE41-NEXT: pextrw $4, %xmm0, %ecx
-; SSE41-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
-; SSE41-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
-; SSE41-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
+; SSE41-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE41-NEXT: # kill: def %dx killed %dx killed %edx
+; SSE41-NEXT: # kill: def %cx killed %cx killed %ecx
; SSE41-NEXT: retl
;
; AVX-32-LABEL: zext_i8:
@@ -47,9 +47,9 @@ define <3 x i16> @zext_i8(<3 x i8>) {
; AVX-32-NEXT: vmovd %xmm0, %eax
; AVX-32-NEXT: vpextrw $2, %xmm0, %edx
; AVX-32-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
-; AVX-32-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
-; AVX-32-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
+; AVX-32-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-32-NEXT: # kill: def %dx killed %dx killed %edx
+; AVX-32-NEXT: # kill: def %cx killed %cx killed %ecx
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: zext_i8:
@@ -61,9 +61,9 @@ define <3 x i16> @zext_i8(<3 x i8>) {
; AVX-64-NEXT: vmovd %xmm0, %eax
; AVX-64-NEXT: vpextrw $2, %xmm0, %edx
; AVX-64-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
-; AVX-64-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
-; AVX-64-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
+; AVX-64-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-64-NEXT: # kill: def %dx killed %dx killed %edx
+; AVX-64-NEXT: # kill: def %cx killed %cx killed %ecx
; AVX-64-NEXT: retq
%2 = zext <3 x i8> %0 to <3 x i16>
ret <3 x i16> %2
@@ -85,9 +85,9 @@ define <3 x i16> @sext_i8(<3 x i8>) {
; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: pextrw $2, %xmm0, %edx
; SSE3-NEXT: pextrw $4, %xmm0, %ecx
-; SSE3-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
-; SSE3-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
-; SSE3-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
+; SSE3-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE3-NEXT: # kill: def %dx killed %dx killed %edx
+; SSE3-NEXT: # kill: def %cx killed %cx killed %ecx
; SSE3-NEXT: retl
;
; SSE41-LABEL: sext_i8:
@@ -100,9 +100,9 @@ define <3 x i16> @sext_i8(<3 x i8>) {
; SSE41-NEXT: movd %xmm0, %eax
; SSE41-NEXT: pextrw $2, %xmm0, %edx
; SSE41-NEXT: pextrw $4, %xmm0, %ecx
-; SSE41-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
-; SSE41-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
-; SSE41-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
+; SSE41-NEXT: # kill: def %ax killed %ax killed %eax
+; SSE41-NEXT: # kill: def %dx killed %dx killed %edx
+; SSE41-NEXT: # kill: def %cx killed %cx killed %ecx
; SSE41-NEXT: retl
;
; AVX-32-LABEL: sext_i8:
@@ -115,9 +115,9 @@ define <3 x i16> @sext_i8(<3 x i8>) {
; AVX-32-NEXT: vmovd %xmm0, %eax
; AVX-32-NEXT: vpextrw $2, %xmm0, %edx
; AVX-32-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-32-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
-; AVX-32-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
-; AVX-32-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
+; AVX-32-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-32-NEXT: # kill: def %dx killed %dx killed %edx
+; AVX-32-NEXT: # kill: def %cx killed %cx killed %ecx
; AVX-32-NEXT: retl
;
; AVX-64-LABEL: sext_i8:
@@ -130,9 +130,9 @@ define <3 x i16> @sext_i8(<3 x i8>) {
; AVX-64-NEXT: vmovd %xmm0, %eax
; AVX-64-NEXT: vpextrw $2, %xmm0, %edx
; AVX-64-NEXT: vpextrw $4, %xmm0, %ecx
-; AVX-64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
-; AVX-64-NEXT: # kill: %dx<def> %dx<kill> %edx<kill>
-; AVX-64-NEXT: # kill: %cx<def> %cx<kill> %ecx<kill>
+; AVX-64-NEXT: # kill: def %ax killed %ax killed %eax
+; AVX-64-NEXT: # kill: def %dx killed %dx killed %edx
+; AVX-64-NEXT: # kill: def %cx killed %cx killed %ecx
; AVX-64-NEXT: retq
%2 = sext <3 x i8> %0 to <3 x i16>
ret <3 x i16> %2
diff --git a/test/CodeGen/X86/psubus.ll b/test/CodeGen/X86/psubus.ll
index 6a38e564b72..8642bc596f3 100644
--- a/test/CodeGen/X86/psubus.ll
+++ b/test/CodeGen/X86/psubus.ll
@@ -1925,7 +1925,7 @@ define <8 x i16> @psubus_8i64_max(<8 x i16> %x, <8 x i64> %y) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
diff --git a/test/CodeGen/X86/reduce-trunc-shl.ll b/test/CodeGen/X86/reduce-trunc-shl.ll
index 58835c9a495..90fc2822de5 100644
--- a/test/CodeGen/X86/reduce-trunc-shl.ll
+++ b/test/CodeGen/X86/reduce-trunc-shl.ll
@@ -43,7 +43,7 @@ define <8 x i16> @trunc_shl_v8i16_v8i32(<8 x i32> %a) {
; AVX2-NEXT: vpslld $17, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%shl = shl <8 x i32> %a, <i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17, i32 17>
diff --git a/test/CodeGen/X86/remat-phys-dead.ll b/test/CodeGen/X86/remat-phys-dead.ll
index e057aadb907..90bbe20a883 100644
--- a/test/CodeGen/X86/remat-phys-dead.ll
+++ b/test/CodeGen/X86/remat-phys-dead.ll
@@ -9,7 +9,7 @@
define i8 @test_remat() {
ret i8 0
; CHECK: REGISTER COALESCING
-; CHECK: Remat: %eax<def,dead> = MOV32r0 %eflags<imp-def,dead>, %al<imp-def>
+; CHECK: Remat: dead %eax = MOV32r0 implicit-def dead %eflags, implicit-def %al
}
; On the other hand, if it's already the correct width, we really shouldn't be
@@ -18,6 +18,6 @@ define i8 @test_remat() {
define i32 @test_remat32() {
ret i32 0
; CHECK: REGISTER COALESCING
-; CHECK: Remat: %eax<def> = MOV32r0 %eflags<imp-def,dead>
+; CHECK: Remat: %eax = MOV32r0 implicit-def dead %eflags
}
diff --git a/test/CodeGen/X86/sar_fold64.ll b/test/CodeGen/X86/sar_fold64.ll
index 447f021442d..2c6229a0dec 100644
--- a/test/CodeGen/X86/sar_fold64.ll
+++ b/test/CodeGen/X86/sar_fold64.ll
@@ -6,7 +6,7 @@ define i32 @shl48sar47(i64 %a) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: movswq %di, %rax
; CHECK-NEXT: addl %eax, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
; CHECK-NEXT: retq
%1 = shl i64 %a, 48
%2 = ashr exact i64 %1, 47
@@ -19,7 +19,7 @@ define i32 @shl48sar49(i64 %a) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: movswq %di, %rax
; CHECK-NEXT: shrq %rax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
; CHECK-NEXT: retq
%1 = shl i64 %a, 48
%2 = ashr exact i64 %1, 49
@@ -32,7 +32,7 @@ define i32 @shl56sar55(i64 %a) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: movsbq %dil, %rax
; CHECK-NEXT: addl %eax, %eax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
; CHECK-NEXT: retq
%1 = shl i64 %a, 56
%2 = ashr exact i64 %1, 55
@@ -45,7 +45,7 @@ define i32 @shl56sar57(i64 %a) #0 {
; CHECK: # %bb.0:
; CHECK-NEXT: movsbq %dil, %rax
; CHECK-NEXT: shrq %rax
-; CHECK-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
; CHECK-NEXT: retq
%1 = shl i64 %a, 56
%2 = ashr exact i64 %1, 57
diff --git a/test/CodeGen/X86/schedule-x86_64.ll b/test/CodeGen/X86/schedule-x86_64.ll
index a157e25e251..c37c6d2389e 100644
--- a/test/CodeGen/X86/schedule-x86_64.ll
+++ b/test/CodeGen/X86/schedule-x86_64.ll
@@ -23,7 +23,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; GENERIC-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsf16:
@@ -33,7 +33,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; ATOM-NEXT: bsfw (%rsi), %cx # sched: [16:8.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; ATOM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ATOM-NEXT: # kill: def %ax killed %ax killed %eax
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsf16:
@@ -43,7 +43,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; SLM-NEXT: bsfw (%rsi), %cx # sched: [4:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; SLM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SLM-NEXT: # kill: def %ax killed %ax killed %eax
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsf16:
@@ -53,7 +53,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; SANDY-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SANDY-NEXT: # kill: def %ax killed %ax killed %eax
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsf16:
@@ -63,7 +63,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; HASWELL-NEXT: bsfw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsf16:
@@ -73,7 +73,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; BROADWELL-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsf16:
@@ -83,7 +83,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; SKYLAKE-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsf16:
@@ -93,7 +93,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; SKX-NEXT: bsfw (%rsi), %cx # sched: [8:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsf16:
@@ -103,7 +103,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; BTVER2-NEXT: bsfw (%rsi), %cx # sched: [4:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsf16:
@@ -113,7 +113,7 @@ define i16 @test_bsf16(i16 %a0, i16* %a1) optsize {
; ZNVER1-NEXT: bsfw (%rsi), %cx # sched: [7:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call { i16, i16 } asm sideeffect "bsf $2, $0 \0A\09 bsf $3, $1", "=r,=r,r,*m,~{dirflag},~{fpsr},~{flags}"(i16 %a0, i16* %a1)
%2 = extractvalue { i16, i16 } %1, 0
@@ -322,7 +322,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; GENERIC-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; GENERIC-NEXT: #NO_APP
; GENERIC-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_bsr16:
@@ -332,7 +332,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; ATOM-NEXT: bsrw (%rsi), %cx # sched: [16:8.00]
; ATOM-NEXT: #NO_APP
; ATOM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; ATOM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ATOM-NEXT: # kill: def %ax killed %ax killed %eax
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_bsr16:
@@ -342,7 +342,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; SLM-NEXT: bsrw (%rsi), %cx # sched: [4:1.00]
; SLM-NEXT: #NO_APP
; SLM-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; SLM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SLM-NEXT: # kill: def %ax killed %ax killed %eax
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_bsr16:
@@ -352,7 +352,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; SANDY-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; SANDY-NEXT: #NO_APP
; SANDY-NEXT: orl %ecx, %eax # sched: [1:0.33]
-; SANDY-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SANDY-NEXT: # kill: def %ax killed %ax killed %eax
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_bsr16:
@@ -362,7 +362,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; HASWELL-NEXT: bsrw (%rsi), %cx # sched: [3:1.00]
; HASWELL-NEXT: #NO_APP
; HASWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_bsr16:
@@ -372,7 +372,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; BROADWELL-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; BROADWELL-NEXT: #NO_APP
; BROADWELL-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_bsr16:
@@ -382,7 +382,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; SKYLAKE-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; SKYLAKE-NEXT: #NO_APP
; SKYLAKE-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_bsr16:
@@ -392,7 +392,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; SKX-NEXT: bsrw (%rsi), %cx # sched: [8:1.00]
; SKX-NEXT: #NO_APP
; SKX-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_bsr16:
@@ -402,7 +402,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; BTVER2-NEXT: bsrw (%rsi), %cx # sched: [4:1.00]
; BTVER2-NEXT: #NO_APP
; BTVER2-NEXT: orl %ecx, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_bsr16:
@@ -412,7 +412,7 @@ define i16 @test_bsr16(i16 %a0, i16* %a1) optsize {
; ZNVER1-NEXT: bsrw (%rsi), %cx # sched: [7:0.50]
; ZNVER1-NEXT: #NO_APP
; ZNVER1-NEXT: orl %ecx, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call { i16, i16 } asm sideeffect "bsr $2, $0 \0A\09 bsr $3, $1", "=r,=r,r,*m,~{dirflag},~{fpsr},~{flags}"(i16 %a0, i16* %a1)
%2 = extractvalue { i16, i16 } %1, 0
diff --git a/test/CodeGen/X86/select.ll b/test/CodeGen/X86/select.ll
index 9c76975fc88..d3a8d9d2af4 100644
--- a/test/CodeGen/X86/select.ll
+++ b/test/CodeGen/X86/select.ll
@@ -145,7 +145,7 @@ define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
; MCU-NEXT: fucompp
; MCU-NEXT: fnstsw %ax
; MCU-NEXT: xorl %edx, %edx
-; MCU-NEXT: # kill: %ah<def> %ah<kill> %ax<kill>
+; MCU-NEXT: # kill: def %ah killed %ah killed %ax
; MCU-NEXT: sahf
; MCU-NEXT: seta %dl
; MCU-NEXT: movb (%ecx,%edx,4), %al
@@ -798,14 +798,14 @@ define i16 @test17(i16 %x) nounwind {
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negw %di
; GENERIC-NEXT: sbbl %eax, %eax
-; GENERIC-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: ## kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test17:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negw %di
; ATOM-NEXT: sbbl %eax, %eax
-; ATOM-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; ATOM-NEXT: ## kill: def %ax killed %ax killed %eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
@@ -816,7 +816,7 @@ define i16 @test17(i16 %x) nounwind {
; MCU: # %bb.0: # %entry
; MCU-NEXT: negw %ax
; MCU-NEXT: sbbl %eax, %eax
-; MCU-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; MCU-NEXT: # kill: def %ax killed %ax killed %eax
; MCU-NEXT: retl
entry:
%cmp = icmp ne i16 %x, 0
@@ -1027,7 +1027,7 @@ define void @test19() {
; MCU-NEXT: cmpl %eax, %ecx
; MCU-NEXT: fucom %st(0)
; MCU-NEXT: fnstsw %ax
-; MCU-NEXT: # kill: %ah<def> %ah<kill> %ax<kill>
+; MCU-NEXT: # kill: def %ah killed %ah killed %ax
; MCU-NEXT: sahf
; MCU-NEXT: jp .LBB24_4
; MCU-NEXT: # %bb.5: # %CF244
@@ -1073,7 +1073,7 @@ define i16 @select_xor_1(i16 %A, i8 %cond) {
; MCU-NEXT: negl %edx
; MCU-NEXT: andl $43, %edx
; MCU-NEXT: xorl %edx, %eax
-; MCU-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; MCU-NEXT: # kill: def %ax killed %ax killed %eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
diff --git a/test/CodeGen/X86/select_const.ll b/test/CodeGen/X86/select_const.ll
index ee74986ab5d..d78f94db71a 100644
--- a/test/CodeGen/X86/select_const.ll
+++ b/test/CodeGen/X86/select_const.ll
@@ -74,7 +74,7 @@ define i32 @select_1_or_0_signext(i1 signext %cond) {
define i32 @select_0_or_neg1(i1 %cond) {
; CHECK-LABEL: select_0_or_neg1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: retq
@@ -85,7 +85,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_0_or_neg1_zeroext:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 0, i32 -1
@@ -139,7 +139,7 @@ define i32 @select_neg1_or_0_signext(i1 signext %cond) {
define i32 @select_Cplus1_C(i1 %cond) {
; CHECK-LABEL: select_Cplus1_C:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: leal 41(%rdi), %eax
; CHECK-NEXT: retq
@@ -150,7 +150,7 @@ define i32 @select_Cplus1_C(i1 %cond) {
define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_Cplus1_C_zeroext:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal 41(%rdi), %eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i32 42, i32 41
@@ -287,7 +287,7 @@ define i16 @sel_neg1_1(i32 %x) {
; CHECK-NEXT: cmpl $43, %edi
; CHECK-NEXT: setl %al
; CHECK-NEXT: leal -1(,%rax,4), %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%cmp = icmp sgt i32 %x, 42
%sel = select i1 %cmp, i16 -1, i16 3
@@ -344,7 +344,7 @@ define i16 @select_pow2_diff_invert(i1 zeroext %cond) {
; CHECK-NEXT: movzbl %dil, %eax
; CHECK-NEXT: shll $6, %eax
; CHECK-NEXT: orl $7, %eax
-; CHECK-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%sel = select i1 %cond, i16 7, i16 71
ret i16 %sel
diff --git a/test/CodeGen/X86/setcc-lowering.ll b/test/CodeGen/X86/setcc-lowering.ll
index c422862ac14..33003daabe9 100644
--- a/test/CodeGen/X86/setcc-lowering.ll
+++ b/test/CodeGen/X86/setcc-lowering.ll
@@ -23,7 +23,7 @@ define <8 x i16> @pr25080(<8 x i32> %a) {
;
; KNL-32-LABEL: pr25080:
; KNL-32: # %bb.0: # %entry
-; KNL-32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL-32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL-32-NEXT: vpbroadcastd {{.*#+}} ymm1 = [8388607,8388607,8388607,8388607,8388607,8388607,8388607,8388607]
; KNL-32-NEXT: vptestnmd %zmm1, %zmm0, %k0
; KNL-32-NEXT: movb $15, %al
@@ -31,7 +31,7 @@ define <8 x i16> @pr25080(<8 x i32> %a) {
; KNL-32-NEXT: korw %k1, %k0, %k1
; KNL-32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; KNL-32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; KNL-32-NEXT: retl
entry:
%0 = trunc <8 x i32> %a to <8 x i23>
diff --git a/test/CodeGen/X86/sext-i1.ll b/test/CodeGen/X86/sext-i1.ll
index 5b3897df32a..bb8a4bcec8d 100644
--- a/test/CodeGen/X86/sext-i1.ll
+++ b/test/CodeGen/X86/sext-i1.ll
@@ -124,7 +124,7 @@ define i32 @select_0_or_1s(i1 %cond) {
;
; X64-LABEL: select_0_or_1s:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: andl $1, %edi
; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: retq
@@ -144,7 +144,7 @@ define i32 @select_0_or_1s_zeroext(i1 zeroext %cond) {
;
; X64-LABEL: select_0_or_1s_zeroext:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: retq
%not = xor i1 %cond, 1
diff --git a/test/CodeGen/X86/shift-combine.ll b/test/CodeGen/X86/shift-combine.ll
index 772dc583d81..0f2966f962b 100644
--- a/test/CodeGen/X86/shift-combine.ll
+++ b/test/CodeGen/X86/shift-combine.ll
@@ -14,7 +14,7 @@ define i32 @test_lshr_and(i32 %x) {
;
; X64-LABEL: test_lshr_and:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: shrl $2, %edi
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl array(,%rdi,4), %eax
@@ -102,7 +102,7 @@ define i32* @test_exact4(i32 %a, i32 %b, i32* %x) {
;
; X64-LABEL: test_exact4:
; X64: # %bb.0:
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
@@ -124,7 +124,7 @@ define i32* @test_exact5(i32 %a, i32 %b, i32* %x) {
;
; X64-LABEL: test_exact5:
; X64: # %bb.0:
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
@@ -145,7 +145,7 @@ define i32* @test_exact6(i32 %a, i32 %b, i32* %x) {
;
; X64-LABEL: test_exact6:
; X64: # %bb.0:
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
; X64-NEXT: subl %edi, %esi
; X64-NEXT: leaq (%rsi,%rdx), %rax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/shift-double.ll b/test/CodeGen/X86/shift-double.ll
index 0ca04eff661..f7ea2e339c3 100644
--- a/test/CodeGen/X86/shift-double.ll
+++ b/test/CodeGen/X86/shift-double.ll
@@ -278,7 +278,7 @@ define i32 @test11(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl $31, %ecx
-; X86-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X86-NEXT: # kill: def %cl killed %cl killed %ecx
; X86-NEXT: shldl %cl, %edx, %eax
; X86-NEXT: retl
;
@@ -304,7 +304,7 @@ define i32 @test12(i32 %hi, i32 %lo, i32 %bits) nounwind {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl $31, %ecx
-; X86-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X86-NEXT: # kill: def %cl killed %cl killed %ecx
; X86-NEXT: shrdl %cl, %edx, %eax
; X86-NEXT: retl
;
diff --git a/test/CodeGen/X86/shrink-compare.ll b/test/CodeGen/X86/shrink-compare.ll
index 251d68f296b..32dcf426825 100644
--- a/test/CodeGen/X86/shrink-compare.ll
+++ b/test/CodeGen/X86/shrink-compare.ll
@@ -72,11 +72,11 @@ define i1 @test4(i64 %a, i32 %b) {
; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: je .LBB3_1
; CHECK-NEXT: # %bb.2: # %lor.end
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
; CHECK-NEXT: .LBB3_1: # %lor.rhs
; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: retq
entry:
%tobool = icmp ne i32 %b, 0
diff --git a/test/CodeGen/X86/shuffle-vs-trunc-256.ll b/test/CodeGen/X86/shuffle-vs-trunc-256.ll
index f4008dcbcf1..59a8aa47246 100644
--- a/test/CodeGen/X86/shuffle-vs-trunc-256.ll
+++ b/test/CodeGen/X86/shuffle-vs-trunc-256.ll
@@ -760,7 +760,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -770,7 +770,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -780,7 +780,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512VL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -790,7 +790,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -801,7 +801,7 @@ define <16 x i8> @negative(<32 x i8> %v, <32 x i8> %w) nounwind {
; AVX512BWVL-NEXT: kmovd %eax, %k1
; AVX512BWVL-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1}
; AVX512BWVL-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
-; AVX512BWVL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BWVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
%strided.vec = shufflevector <32 x i8> %v, <32 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
diff --git a/test/CodeGen/X86/sse2-schedule.ll b/test/CodeGen/X86/sse2-schedule.ll
index 4f0fcf524b5..3f32d294874 100644
--- a/test/CodeGen/X86/sse2-schedule.ll
+++ b/test/CodeGen/X86/sse2-schedule.ll
@@ -5485,61 +5485,61 @@ define i16 @test_pextrw(<8 x i16> %a0) {
; GENERIC-LABEL: test_pextrw:
; GENERIC: # %bb.0:
; GENERIC-NEXT: pextrw $6, %xmm0, %eax # sched: [3:1.00]
-; GENERIC-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; ATOM-LABEL: test_pextrw:
; ATOM: # %bb.0:
; ATOM-NEXT: pextrw $6, %xmm0, %eax # sched: [4:2.00]
-; ATOM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ATOM-NEXT: # kill: def %ax killed %ax killed %eax
; ATOM-NEXT: retq # sched: [79:39.50]
;
; SLM-LABEL: test_pextrw:
; SLM: # %bb.0:
; SLM-NEXT: pextrw $6, %xmm0, %eax # sched: [4:1.00]
-; SLM-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SLM-NEXT: # kill: def %ax killed %ax killed %eax
; SLM-NEXT: retq # sched: [4:1.00]
;
; SANDY-LABEL: test_pextrw:
; SANDY: # %bb.0:
; SANDY-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
-; SANDY-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SANDY-NEXT: # kill: def %ax killed %ax killed %eax
; SANDY-NEXT: retq # sched: [1:1.00]
;
; HASWELL-LABEL: test_pextrw:
; HASWELL: # %bb.0:
; HASWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00]
-; HASWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; HASWELL-NEXT: # kill: def %ax killed %ax killed %eax
; HASWELL-NEXT: retq # sched: [2:1.00]
;
; BROADWELL-LABEL: test_pextrw:
; BROADWELL: # %bb.0:
; BROADWELL-NEXT: vpextrw $6, %xmm0, %eax # sched: [2:1.00]
-; BROADWELL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BROADWELL-NEXT: # kill: def %ax killed %ax killed %eax
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
; SKYLAKE-LABEL: test_pextrw:
; SKYLAKE: # %bb.0:
; SKYLAKE-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
-; SKYLAKE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKYLAKE-NEXT: # kill: def %ax killed %ax killed %eax
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
; SKX-LABEL: test_pextrw:
; SKX: # %bb.0:
; SKX-NEXT: vpextrw $6, %xmm0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SKX-NEXT: # kill: def %ax killed %ax killed %eax
; SKX-NEXT: retq # sched: [7:1.00]
;
; BTVER2-LABEL: test_pextrw:
; BTVER2: # %bb.0:
; BTVER2-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.50]
-; BTVER2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; BTVER2-NEXT: # kill: def %ax killed %ax killed %eax
; BTVER2-NEXT: retq # sched: [4:1.00]
;
; ZNVER1-LABEL: test_pextrw:
; ZNVER1: # %bb.0:
; ZNVER1-NEXT: vpextrw $6, %xmm0, %eax # sched: [1:0.25]
-; ZNVER1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ZNVER1-NEXT: # kill: def %ax killed %ax killed %eax
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = extractelement <8 x i16> %a0, i32 6
ret i16 %1
diff --git a/test/CodeGen/X86/sse42-schedule.ll b/test/CodeGen/X86/sse42-schedule.ll
index cac7cb3f023..0461a21f5e1 100644
--- a/test/CodeGen/X86/sse42-schedule.ll
+++ b/test/CodeGen/X86/sse42-schedule.ll
@@ -370,7 +370,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-NEXT: movl $7, %eax # sched: [1:0.33]
; GENERIC-NEXT: movl $7, %edx # sched: [1:0.33]
; GENERIC-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [4:2.33]
-; GENERIC-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; GENERIC-NEXT: # kill: def %ecx killed %ecx def %rcx
; GENERIC-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -383,7 +383,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SLM-NEXT: movl $7, %edx # sched: [1:0.50]
; SLM-NEXT: movl %ecx, %esi # sched: [1:0.50]
; SLM-NEXT: pcmpestri $7, (%rdi), %xmm0 # sched: [21:21.00]
-; SLM-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SLM-NEXT: # kill: def %ecx killed %ecx def %rcx
; SLM-NEXT: leal (%rcx,%rsi), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
@@ -396,7 +396,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-NEXT: movl $7, %eax # sched: [1:0.33]
; SANDY-NEXT: movl $7, %edx # sched: [1:0.33]
; SANDY-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [4:2.33]
-; SANDY-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SANDY-NEXT: # kill: def %ecx killed %ecx def %rcx
; SANDY-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
@@ -409,7 +409,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; HASWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; HASWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; HASWELL-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [18:4.00]
-; HASWELL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; HASWELL-NEXT: # kill: def %ecx killed %ecx def %rcx
; HASWELL-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
@@ -422,7 +422,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BROADWELL-NEXT: movl $7, %eax # sched: [1:0.25]
; BROADWELL-NEXT: movl $7, %edx # sched: [1:0.25]
; BROADWELL-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [23:4.00]
-; BROADWELL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; BROADWELL-NEXT: # kill: def %ecx killed %ecx def %rcx
; BROADWELL-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
@@ -435,7 +435,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKYLAKE-NEXT: movl $7, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: movl $7, %edx # sched: [1:0.25]
; SKYLAKE-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00]
-; SKYLAKE-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SKYLAKE-NEXT: # kill: def %ecx killed %ecx def %rcx
; SKYLAKE-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
@@ -448,7 +448,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKX-NEXT: movl $7, %eax # sched: [1:0.25]
; SKX-NEXT: movl $7, %edx # sched: [1:0.25]
; SKX-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [24:4.00]
-; SKX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SKX-NEXT: # kill: def %ecx killed %ecx def %rcx
; SKX-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
@@ -461,7 +461,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: movl $7, %edx # sched: [1:0.17]
; BTVER2-NEXT: movl %ecx, %esi # sched: [1:0.17]
; BTVER2-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [19:10.00]
-; BTVER2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; BTVER2-NEXT: # kill: def %ecx killed %ecx def %rcx
; BTVER2-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
@@ -474,7 +474,7 @@ define i32 @test_pcmpestri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ZNVER1-NEXT: movl $7, %edx # sched: [1:0.25]
; ZNVER1-NEXT: movl %ecx, %esi # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpestri $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; ZNVER1-NEXT: # kill: def %ecx killed %ecx def %rcx
; ZNVER1-NEXT: leal (%rcx,%rsi), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpestri128(<16 x i8> %a0, i32 7, <16 x i8> %a1, i32 7, i8 7)
@@ -588,7 +588,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; GENERIC-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; GENERIC-NEXT: movl %ecx, %eax # sched: [1:0.33]
; GENERIC-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
-; GENERIC-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; GENERIC-NEXT: # kill: def %ecx killed %ecx def %rcx
; GENERIC-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -597,7 +597,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SLM-NEXT: pcmpistri $7, %xmm1, %xmm0 # sched: [17:17.00]
; SLM-NEXT: movl %ecx, %eax # sched: [1:0.50]
; SLM-NEXT: pcmpistri $7, (%rdi), %xmm0 # sched: [17:17.00]
-; SLM-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SLM-NEXT: # kill: def %ecx killed %ecx def %rcx
; SLM-NEXT: leal (%rcx,%rax), %eax # sched: [1:1.00]
; SLM-NEXT: retq # sched: [4:1.00]
;
@@ -606,7 +606,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SANDY-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; SANDY-NEXT: movl %ecx, %eax # sched: [1:0.33]
; SANDY-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [17:3.00]
-; SANDY-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SANDY-NEXT: # kill: def %ecx killed %ecx def %rcx
; SANDY-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; SANDY-NEXT: retq # sched: [1:1.00]
;
@@ -615,7 +615,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; HASWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; HASWELL-NEXT: movl %ecx, %eax # sched: [1:0.25]
; HASWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [11:3.00]
-; HASWELL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; HASWELL-NEXT: # kill: def %ecx killed %ecx def %rcx
; HASWELL-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; HASWELL-NEXT: retq # sched: [2:1.00]
;
@@ -624,7 +624,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BROADWELL-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [11:3.00]
; BROADWELL-NEXT: movl %ecx, %eax # sched: [1:0.25]
; BROADWELL-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
-; BROADWELL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; BROADWELL-NEXT: # kill: def %ecx killed %ecx def %rcx
; BROADWELL-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; BROADWELL-NEXT: retq # sched: [7:1.00]
;
@@ -633,7 +633,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKYLAKE-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKYLAKE-NEXT: movl %ecx, %eax # sched: [1:0.25]
; SKYLAKE-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
-; SKYLAKE-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SKYLAKE-NEXT: # kill: def %ecx killed %ecx def %rcx
; SKYLAKE-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; SKYLAKE-NEXT: retq # sched: [7:1.00]
;
@@ -642,7 +642,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; SKX-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [10:3.00]
; SKX-NEXT: movl %ecx, %eax # sched: [1:0.25]
; SKX-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [16:3.00]
-; SKX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; SKX-NEXT: # kill: def %ecx killed %ecx def %rcx
; SKX-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; SKX-NEXT: retq # sched: [7:1.00]
;
@@ -651,7 +651,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; BTVER2-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [7:2.00]
; BTVER2-NEXT: movl %ecx, %eax # sched: [1:0.17]
; BTVER2-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [12:2.00]
-; BTVER2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; BTVER2-NEXT: # kill: def %ecx killed %ecx def %rcx
; BTVER2-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.50]
; BTVER2-NEXT: retq # sched: [4:1.00]
;
@@ -660,7 +660,7 @@ define i32 @test_pcmpistri(<16 x i8> %a0, <16 x i8> %a1, <16 x i8> *%a2) {
; ZNVER1-NEXT: vpcmpistri $7, %xmm1, %xmm0 # sched: [100:?]
; ZNVER1-NEXT: movl %ecx, %eax # sched: [1:0.25]
; ZNVER1-NEXT: vpcmpistri $7, (%rdi), %xmm0 # sched: [100:?]
-; ZNVER1-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
+; ZNVER1-NEXT: # kill: def %ecx killed %ecx def %rcx
; ZNVER1-NEXT: leal (%rcx,%rax), %eax # sched: [1:0.25]
; ZNVER1-NEXT: retq # sched: [1:0.50]
%1 = call i32 @llvm.x86.sse42.pcmpistri128(<16 x i8> %a0, <16 x i8> %a1, i8 7)
diff --git a/test/CodeGen/X86/subvector-broadcast.ll b/test/CodeGen/X86/subvector-broadcast.ll
index e3c91ffaaa0..33cf2f453ba 100644
--- a/test/CodeGen/X86/subvector-broadcast.ll
+++ b/test/CodeGen/X86/subvector-broadcast.ll
@@ -1145,13 +1145,13 @@ entry:
define <4 x double> @reg_broadcast_2f64_4f64(<2 x double> %a0) nounwind {
; X32-LABEL: reg_broadcast_2f64_4f64:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_2f64_4f64:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x double> %a0, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1161,28 +1161,28 @@ define <4 x double> @reg_broadcast_2f64_4f64(<2 x double> %a0) nounwind {
define <8 x double> @reg_broadcast_2f64_8f64(<2 x double> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_2f64_8f64:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_2f64_8f64:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_2f64_8f64:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_2f64_8f64:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1198,7 +1198,7 @@ define <8 x double> @reg_broadcast_4f64_8f64(<4 x double> %a0) nounwind {
;
; X32-AVX512-LABEL: reg_broadcast_4f64_8f64:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
@@ -1209,7 +1209,7 @@ define <8 x double> @reg_broadcast_4f64_8f64(<4 x double> %a0) nounwind {
;
; X64-AVX512-LABEL: reg_broadcast_4f64_8f64:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <4 x double> %a0, <4 x double> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -1219,13 +1219,13 @@ define <8 x double> @reg_broadcast_4f64_8f64(<4 x double> %a0) nounwind {
define <4 x i64> @reg_broadcast_2i64_4i64(<2 x i64> %a0) nounwind {
; X32-LABEL: reg_broadcast_2i64_4i64:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_2i64_4i64:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1235,28 +1235,28 @@ define <4 x i64> @reg_broadcast_2i64_4i64(<2 x i64> %a0) nounwind {
define <8 x i64> @reg_broadcast_2i64_8i64(<2 x i64> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_2i64_8i64:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_2i64_8i64:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_2i64_8i64:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_2i64_8i64:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1272,7 +1272,7 @@ define <8 x i64> @reg_broadcast_4i64_8i64(<4 x i64> %a0) nounwind {
;
; X32-AVX512-LABEL: reg_broadcast_4i64_8i64:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
@@ -1283,7 +1283,7 @@ define <8 x i64> @reg_broadcast_4i64_8i64(<4 x i64> %a0) nounwind {
;
; X64-AVX512-LABEL: reg_broadcast_4i64_8i64:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -1293,13 +1293,13 @@ define <8 x i64> @reg_broadcast_4i64_8i64(<4 x i64> %a0) nounwind {
define <8 x float> @reg_broadcast_4f32_8f32(<4 x float> %a0) nounwind {
; X32-LABEL: reg_broadcast_4f32_8f32:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_4f32_8f32:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -1309,28 +1309,28 @@ define <8 x float> @reg_broadcast_4f32_8f32(<4 x float> %a0) nounwind {
define <16 x float> @reg_broadcast_4f32_16f32(<4 x float> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4f32_16f32:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4f32_16f32:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4f32_16f32:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4f32_16f32:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1346,7 +1346,7 @@ define <16 x float> @reg_broadcast_8f32_16f32(<8 x float> %a0) nounwind {
;
; X32-AVX512-LABEL: reg_broadcast_8f32_16f32:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
@@ -1357,7 +1357,7 @@ define <16 x float> @reg_broadcast_8f32_16f32(<8 x float> %a0) nounwind {
;
; X64-AVX512-LABEL: reg_broadcast_8f32_16f32:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <8 x float> %a0, <8 x float> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1367,13 +1367,13 @@ define <16 x float> @reg_broadcast_8f32_16f32(<8 x float> %a0) nounwind {
define <8 x i32> @reg_broadcast_4i32_8i32(<4 x i32> %a0) nounwind {
; X32-LABEL: reg_broadcast_4i32_8i32:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_4i32_8i32:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3>
@@ -1383,28 +1383,28 @@ define <8 x i32> @reg_broadcast_4i32_8i32(<4 x i32> %a0) nounwind {
define <16 x i32> @reg_broadcast_4i32_16i32(<4 x i32> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_4i32_16i32:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512-LABEL: reg_broadcast_4i32_16i32:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_4i32_16i32:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512-LABEL: reg_broadcast_4i32_16i32:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
@@ -1420,7 +1420,7 @@ define <16 x i32> @reg_broadcast_8i32_16i32(<8 x i32> %a0) nounwind {
;
; X32-AVX512-LABEL: reg_broadcast_8i32_16i32:
; X32-AVX512: # %bb.0:
-; X32-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X32-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X32-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512-NEXT: retl
;
@@ -1431,7 +1431,7 @@ define <16 x i32> @reg_broadcast_8i32_16i32(<8 x i32> %a0) nounwind {
;
; X64-AVX512-LABEL: reg_broadcast_8i32_16i32:
; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X64-AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X64-AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512-NEXT: retq
%1 = shufflevector <8 x i32> %a0, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1441,13 +1441,13 @@ define <16 x i32> @reg_broadcast_8i32_16i32(<8 x i32> %a0) nounwind {
define <16 x i16> @reg_broadcast_8i16_16i16(<8 x i16> %a0) nounwind {
; X32-LABEL: reg_broadcast_8i16_16i16:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_8i16_16i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1457,56 +1457,56 @@ define <16 x i16> @reg_broadcast_8i16_16i16(<8 x i16> %a0) nounwind {
define <32 x i16> @reg_broadcast_8i16_32i16(<8 x i16> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX512F: # %bb.0:
-; X32-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX512BW: # %bb.0:
-; X32-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_8i16_32i16:
; X32-AVX512DQ: # %bb.0:
-; X32-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX512F: # %bb.0:
-; X64-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX512BW: # %bb.0:
-; X64-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_8i16_32i16:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
@@ -1527,7 +1527,7 @@ define <32 x i16> @reg_broadcast_16i16_32i16(<16 x i16> %a0) nounwind {
;
; X32-AVX512BW-LABEL: reg_broadcast_16i16_32i16:
; X32-AVX512BW: # %bb.0:
-; X32-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X32-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
@@ -1548,7 +1548,7 @@ define <32 x i16> @reg_broadcast_16i16_32i16(<16 x i16> %a0) nounwind {
;
; X64-AVX512BW-LABEL: reg_broadcast_16i16_32i16:
; X64-AVX512BW: # %bb.0:
-; X64-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X64-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
@@ -1563,13 +1563,13 @@ define <32 x i16> @reg_broadcast_16i16_32i16(<16 x i16> %a0) nounwind {
define <32 x i8> @reg_broadcast_16i8_32i8(<16 x i8> %a0) nounwind {
; X32-LABEL: reg_broadcast_16i8_32i8:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: reg_broadcast_16i8_32i8:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -1579,56 +1579,56 @@ define <32 x i8> @reg_broadcast_16i8_32i8(<16 x i8> %a0) nounwind {
define <64 x i8> @reg_broadcast_16i8_64i8(<16 x i8> %a0) nounwind {
; X32-AVX-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX-NEXT: retl
;
; X32-AVX512F-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX512F: # %bb.0:
-; X32-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512F-NEXT: retl
;
; X32-AVX512BW-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX512BW: # %bb.0:
-; X32-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
; X32-AVX512DQ-LABEL: reg_broadcast_16i8_64i8:
; X32-AVX512DQ: # %bb.0:
-; X32-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X32-AVX512DQ-NEXT: retl
;
; X64-AVX-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX-NEXT: retq
;
; X64-AVX512F-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX512F: # %bb.0:
-; X64-AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512F-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512F-NEXT: retq
;
; X64-AVX512BW-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX512BW: # %bb.0:
-; X64-AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
; X64-AVX512DQ-LABEL: reg_broadcast_16i8_64i8:
; X64-AVX512DQ: # %bb.0:
-; X64-AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X64-AVX512DQ-NEXT: vmovaps %ymm0, %ymm1
; X64-AVX512DQ-NEXT: retq
@@ -1649,7 +1649,7 @@ define <64 x i8> @reg_broadcast_32i8_64i8(<32 x i8> %a0) nounwind {
;
; X32-AVX512BW-LABEL: reg_broadcast_32i8_64i8:
; X32-AVX512BW: # %bb.0:
-; X32-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X32-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X32-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X32-AVX512BW-NEXT: retl
;
@@ -1670,7 +1670,7 @@ define <64 x i8> @reg_broadcast_32i8_64i8(<32 x i8> %a0) nounwind {
;
; X64-AVX512BW-LABEL: reg_broadcast_32i8_64i8:
; X64-AVX512BW: # %bb.0:
-; X64-AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; X64-AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; X64-AVX512BW-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; X64-AVX512BW-NEXT: retq
;
diff --git a/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll b/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
index 862c421f63f..74084df8bb7 100644
--- a/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
+++ b/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll
@@ -28,7 +28,7 @@ define i32 @test__blcfill_u32(i32 %a0) {
;
; X64-LABEL: test__blcfill_u32:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: andl %edi, %eax
; X64-NEXT: retq
@@ -48,7 +48,7 @@ define i32 @test__blci_u32(i32 %a0) {
;
; X64-LABEL: test__blci_u32:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: xorl $-1, %eax
; X64-NEXT: orl %edi, %eax
@@ -93,7 +93,7 @@ define i32 @test__blcmsk_u32(i32 %a0) {
;
; X64-LABEL: test__blcmsk_u32:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: xorl %edi, %eax
; X64-NEXT: retq
@@ -112,7 +112,7 @@ define i32 @test__blcs_u32(i32 %a0) {
;
; X64-LABEL: test__blcs_u32:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal 1(%rdi), %eax
; X64-NEXT: orl %edi, %eax
; X64-NEXT: retq
diff --git a/test/CodeGen/X86/tbm_patterns.ll b/test/CodeGen/X86/tbm_patterns.ll
index b629d2e7f4d..5cf98b9b73a 100644
--- a/test/CodeGen/X86/tbm_patterns.ll
+++ b/test/CodeGen/X86/tbm_patterns.ll
@@ -151,7 +151,7 @@ define i32 @test_x86_tbm_blcfill_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blcfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcfill_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: testl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
@@ -230,7 +230,7 @@ define i32 @test_x86_tbm_blci_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blci_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blci_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: notl %eax
; CHECK-NEXT: orl %edi, %eax
@@ -419,7 +419,7 @@ define i32 @test_x86_tbm_blcmsk_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blcmsk_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcmsk_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: xorl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
@@ -496,7 +496,7 @@ define i32 @test_x86_tbm_blcs_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blcs_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blcs_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal 1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
@@ -573,7 +573,7 @@ define i32 @test_x86_tbm_blsfill_u32_z(i32 %a, i32 %b) nounwind {
define i32 @test_x86_tbm_blsfill_u32_z2(i32 %a, i32 %b, i32 %c) nounwind {
; CHECK-LABEL: test_x86_tbm_blsfill_u32_z2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: cmovnel %edx, %esi
diff --git a/test/CodeGen/X86/umul-with-overflow.ll b/test/CodeGen/X86/umul-with-overflow.ll
index 22e1057b803..5a57f9f1297 100644
--- a/test/CodeGen/X86/umul-with-overflow.ll
+++ b/test/CodeGen/X86/umul-with-overflow.ll
@@ -35,7 +35,7 @@ define i32 @test2(i32 %a, i32 %b) nounwind readnone {
;
; X64-LABEL: test2:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: addl %esi, %edi
; X64-NEXT: leal (%rdi,%rdi), %eax
; X64-NEXT: retq
@@ -57,8 +57,8 @@ define i32 @test3(i32 %a, i32 %b) nounwind readnone {
;
; X64-LABEL: test3:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: leal (%rdi,%rsi), %eax
; X64-NEXT: movl $4, %ecx
; X64-NEXT: mull %ecx
diff --git a/test/CodeGen/X86/urem-i8-constant.ll b/test/CodeGen/X86/urem-i8-constant.ll
index 7405a48de78..3e0ed75fc49 100644
--- a/test/CodeGen/X86/urem-i8-constant.ll
+++ b/test/CodeGen/X86/urem-i8-constant.ll
@@ -11,7 +11,7 @@ define i8 @foo(i8 %tmp325) {
; CHECK-NEXT: shrl $12, %eax
; CHECK-NEXT: movzwl %ax, %eax
; CHECK-NEXT: movb $37, %dl
-; CHECK-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; CHECK-NEXT: # kill: def %al killed %al killed %eax
; CHECK-NEXT: mulb %dl
; CHECK-NEXT: subb %al, %cl
; CHECK-NEXT: movl %ecx, %eax
diff --git a/test/CodeGen/X86/urem-power-of-two.ll b/test/CodeGen/X86/urem-power-of-two.ll
index 8dc5e5338aa..2610beda415 100644
--- a/test/CodeGen/X86/urem-power-of-two.ll
+++ b/test/CodeGen/X86/urem-power-of-two.ll
@@ -56,7 +56,7 @@ define i16 @shift_right_pow_2(i16 %x, i16 %y) {
; X86-NEXT: shrl %cl, %eax
; X86-NEXT: decl %eax
; X86-NEXT: andw {{[0-9]+}}(%esp), %ax
-; X86-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X86-NEXT: # kill: def %ax killed %ax killed %eax
; X86-NEXT: retl
;
; X64-LABEL: shift_right_pow_2:
@@ -66,7 +66,7 @@ define i16 @shift_right_pow_2(i16 %x, i16 %y) {
; X64-NEXT: shrl %cl, %eax
; X64-NEXT: decl %eax
; X64-NEXT: andl %edi, %eax
-; X64-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: # kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
%shr = lshr i16 -32768, %y
%urem = urem i16 %x, %shr
@@ -81,20 +81,20 @@ define i8 @and_pow_2(i8 %x, i8 %y) {
; X86-NEXT: movb {{[0-9]+}}(%esp), %cl
; X86-NEXT: andb $4, %cl
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X86-NEXT: # kill: def %eax killed %eax def %ax
; X86-NEXT: divb %cl
; X86-NEXT: movzbl %ah, %eax # NOREX
-; X86-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X86-NEXT: # kill: def %al killed %al killed %eax
; X86-NEXT: retl
;
; X64-LABEL: and_pow_2:
; X64: # %bb.0:
; X64-NEXT: andb $4, %sil
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: %eax<def> %eax<kill> %ax<def>
+; X64-NEXT: # kill: def %eax killed %eax def %ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %ah, %eax # NOREX
-; X64-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; X64-NEXT: # kill: def %al killed %al killed %eax
; X64-NEXT: retq
%and = and i8 %y, 4
%urem = urem i8 %x, %and
diff --git a/test/CodeGen/X86/vec_cmp_uint-128.ll b/test/CodeGen/X86/vec_cmp_uint-128.ll
index 70f6a1ff677..4dbe444e138 100644
--- a/test/CodeGen/X86/vec_cmp_uint-128.ll
+++ b/test/CodeGen/X86/vec_cmp_uint-128.ll
@@ -297,8 +297,8 @@ define <2 x i64> @ge_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
;
; AVX512-LABEL: ge_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm1
; AVX512-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -633,8 +633,8 @@ define <2 x i64> @le_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
;
; AVX512-LABEL: le_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm1
; AVX512-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/vec_fp_to_int.ll b/test/CodeGen/X86/vec_fp_to_int.ll
index 1f1575368af..bdfc96ba97d 100644
--- a/test/CodeGen/X86/vec_fp_to_int.ll
+++ b/test/CodeGen/X86/vec_fp_to_int.ll
@@ -60,9 +60,9 @@ define <2 x i64> @fptosi_2f64_to_2i64(<2 x double> %a) {
;
; AVX512DQ-LABEL: fptosi_2f64_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -115,7 +115,7 @@ define <4 x i32> @fptosi_4f64_to_2i32(<2 x double> %a) {
;
; AVX-LABEL: fptosi_4f64_to_2i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX-NEXT: vcvttpd2dq %ymm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
@@ -217,9 +217,9 @@ define <4 x i64> @fptosi_4f64_to_4i64(<4 x double> %a) {
;
; AVX512DQ-LABEL: fptosi_4f64_to_4i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f64_to_4i64:
@@ -321,9 +321,9 @@ define <2 x i64> @fptoui_2f64_to_2i64(<2 x double> %a) {
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -388,7 +388,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
;
; AVX512F-LABEL: fptoui_2f64_to_4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512F-NEXT: vzeroupper
@@ -401,7 +401,7 @@ define <4 x i32> @fptoui_2f64_to_4i32(<2 x double> %a) {
;
; AVX512DQ-LABEL: fptoui_2f64_to_4i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512DQ-NEXT: vzeroupper
@@ -467,9 +467,9 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
;
; AVX512F-LABEL: fptoui_2f64_to_2i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -480,9 +480,9 @@ define <4 x i32> @fptoui_2f64_to_2i32(<2 x double> %a) {
;
; AVX512DQ-LABEL: fptoui_2f64_to_2i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -542,30 +542,30 @@ define <4 x i32> @fptoui_4f64_to_2i32(<2 x double> %a) {
;
; AVX512F-LABEL: fptoui_4f64_to_2i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_4f64_to_2i32:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512VL-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: fptoui_4f64_to_2i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_2i32:
; AVX512VLDQ: # %bb.0:
-; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512VLDQ-NEXT: vcvttpd2udq %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -736,9 +736,9 @@ define <4 x i64> @fptoui_4f64_to_4i64(<4 x double> %a) {
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvttpd2uqq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f64_to_4i64:
@@ -812,9 +812,9 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
;
; AVX512F-LABEL: fptoui_4f64_to_4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -826,9 +826,9 @@ define <4 x i32> @fptoui_4f64_to_4i32(<4 x double> %a) {
;
; AVX512DQ-LABEL: fptoui_4f64_to_4i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvttpd2udq %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -980,16 +980,16 @@ define <2 x i64> @fptosi_4f32_to_2i64(<4 x float> %a) {
;
; AVX512DQ-LABEL: fptosi_4f32_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_2i64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %xmm0, %ymm0
-; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <4 x float> %a to <4 x i64>
@@ -1108,7 +1108,7 @@ define <4 x i64> @fptosi_4f32_to_4i64(<8 x float> %a) {
; AVX512DQ-LABEL: fptosi_4f32_to_4i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_4f32_to_4i64:
@@ -1216,13 +1216,13 @@ define <4 x i64> @fptosi_8f32_to_4i64(<8 x float> %a) {
; AVX512DQ-LABEL: fptosi_8f32_to_4i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptosi_8f32_to_4i64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512VLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512VLDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptosi <8 x float> %a to <8 x i64>
%shuf = shufflevector <8 x i64> %cvt, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1283,7 +1283,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
;
; AVX512F-LABEL: fptoui_2f32_to_2i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512F-NEXT: vzeroupper
@@ -1297,7 +1297,7 @@ define <2 x i32> @fptoui_2f32_to_2i32(<2 x float> %a) {
;
; AVX512DQ-LABEL: fptoui_2f32_to_2i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX512DQ-NEXT: vzeroupper
@@ -1351,9 +1351,9 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
;
; AVX512F-LABEL: fptoui_4f32_to_4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1364,9 +1364,9 @@ define <4 x i32> @fptoui_4f32_to_4i32(<4 x float> %a) {
;
; AVX512DQ-LABEL: fptoui_4f32_to_4i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -1535,16 +1535,16 @@ define <2 x i64> @fptoui_4f32_to_2i64(<4 x float> %a) {
;
; AVX512DQ-LABEL: fptoui_4f32_to_2i64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_2i64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %xmm0, %ymm0
-; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <4 x float> %a to <4 x i64>
@@ -1648,9 +1648,9 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
;
; AVX512F-LABEL: fptoui_8f32_to_8i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: fptoui_8f32_to_8i32:
@@ -1660,9 +1660,9 @@ define <8 x i32> @fptoui_8f32_to_8i32(<8 x float> %a) {
;
; AVX512DQ-LABEL: fptoui_8f32_to_8i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvttps2udq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_8i32:
@@ -1839,7 +1839,7 @@ define <4 x i64> @fptoui_4f32_to_4i64(<8 x float> %a) {
; AVX512DQ-LABEL: fptoui_4f32_to_4i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_4f32_to_4i64:
@@ -2017,13 +2017,13 @@ define <4 x i64> @fptoui_8f32_to_4i64(<8 x float> %a) {
; AVX512DQ-LABEL: fptoui_8f32_to_4i64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: fptoui_8f32_to_4i64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvttps2uqq %ymm0, %zmm0
-; AVX512VLDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512VLDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512VLDQ-NEXT: retq
%cvt = fptoui <8 x float> %a to <8 x i64>
%shuf = shufflevector <8 x i64> %cvt, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
diff --git a/test/CodeGen/X86/vec_ins_extract-1.ll b/test/CodeGen/X86/vec_ins_extract-1.ll
index 66dd74acf1b..949ef569f65 100644
--- a/test/CodeGen/X86/vec_ins_extract-1.ll
+++ b/test/CodeGen/X86/vec_ins_extract-1.ll
@@ -22,7 +22,7 @@ define i32 @t0(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
;
; X64-LABEL: t0:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl $76, -24(%rsp,%rdi,4)
@@ -51,7 +51,7 @@ define i32 @t1(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
;
; X64-LABEL: t1:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: movl $76, %eax
; X64-NEXT: pinsrd $0, %eax, %xmm0
; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -79,7 +79,7 @@ define <4 x i32> @t2(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
;
; X64-LABEL: t2:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: pinsrd $0, -24(%rsp,%rdi,4), %xmm0
@@ -106,7 +106,7 @@ define <4 x i32> @t3(i32 inreg %t7, <4 x i32> inreg %t8) nounwind {
;
; X64-LABEL: t3:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: movss %xmm0, -24(%rsp,%rdi,4)
diff --git a/test/CodeGen/X86/vec_insert-4.ll b/test/CodeGen/X86/vec_insert-4.ll
index 674abbc39f7..06021659630 100644
--- a/test/CodeGen/X86/vec_insert-4.ll
+++ b/test/CodeGen/X86/vec_insert-4.ll
@@ -26,7 +26,7 @@ define <8 x float> @f(<8 x float> %a, i32 %b) nounwind {
; X64-NEXT: movq %rsp, %rbp
; X64-NEXT: andq $-32, %rsp
; X64-NEXT: subq $64, %rsp
-; X64-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: ## kill: def %edi killed %edi def %rdi
; X64-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
; X64-NEXT: movaps %xmm0, (%rsp)
; X64-NEXT: andl $7, %edi
diff --git a/test/CodeGen/X86/vec_insert-5.ll b/test/CodeGen/X86/vec_insert-5.ll
index 17d66f99674..d4a0c82e793 100644
--- a/test/CodeGen/X86/vec_insert-5.ll
+++ b/test/CodeGen/X86/vec_insert-5.ll
@@ -17,7 +17,7 @@ define void @t1(i32 %a, x86_mmx* %P) nounwind {
;
; X64-LABEL: t1:
; X64: # %bb.0:
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: shll $12, %edi
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
diff --git a/test/CodeGen/X86/vec_insert-8.ll b/test/CodeGen/X86/vec_insert-8.ll
index 71585474969..a421ff29263 100644
--- a/test/CodeGen/X86/vec_insert-8.ll
+++ b/test/CodeGen/X86/vec_insert-8.ll
@@ -23,7 +23,7 @@ define <4 x i32> @var_insert(<4 x i32> %x, i32 %val, i32 %idx) nounwind {
;
; X64-LABEL: var_insert:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
+; X64-NEXT: # kill: def %esi killed %esi def %rsi
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %esi
; X64-NEXT: movl %edi, -24(%rsp,%rsi,4)
@@ -51,7 +51,7 @@ define i32 @var_extract(<4 x i32> %x, i32 %idx) nounwind {
;
; X64-LABEL: var_extract:
; X64: # %bb.0: # %entry
-; X64-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: # kill: def %edi killed %edi def %rdi
; X64-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl -24(%rsp,%rdi,4), %eax
diff --git a/test/CodeGen/X86/vec_insert-mmx.ll b/test/CodeGen/X86/vec_insert-mmx.ll
index 81bb25a1e74..39e21e90f01 100644
--- a/test/CodeGen/X86/vec_insert-mmx.ll
+++ b/test/CodeGen/X86/vec_insert-mmx.ll
@@ -16,7 +16,7 @@ define x86_mmx @t0(i32 %A) nounwind {
;
; X64-LABEL: t0:
; X64: ## %bb.0:
-; X64-NEXT: ## kill: %edi<def> %edi<kill> %rdi<def>
+; X64-NEXT: ## kill: def %edi killed %edi def %rdi
; X64-NEXT: movq %rdi, %xmm0
; X64-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
diff --git a/test/CodeGen/X86/vec_int_to_fp.ll b/test/CodeGen/X86/vec_int_to_fp.ll
index 0ab320c63aa..30ba7276043 100644
--- a/test/CodeGen/X86/vec_int_to_fp.ll
+++ b/test/CodeGen/X86/vec_int_to_fp.ll
@@ -58,9 +58,9 @@ define <2 x double> @sitofp_2i64_to_2f64(<2 x i64> %a) {
;
; AVX512DQ-LABEL: sitofp_2i64_to_2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -96,7 +96,7 @@ define <2 x double> @sitofp_4i32_to_2f64(<4 x i32> %a) {
; AVX-LABEL: sitofp_4i32_to_2f64:
; AVX: # %bb.0:
; AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%cvt = sitofp <4 x i32> %a to <4 x double>
@@ -134,7 +134,7 @@ define <2 x double> @sitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -142,7 +142,7 @@ define <2 x double> @sitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -150,7 +150,7 @@ define <2 x double> @sitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x double>
@@ -190,7 +190,7 @@ define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -198,7 +198,7 @@ define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -206,7 +206,7 @@ define <2 x double> @sitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x double>
@@ -301,9 +301,9 @@ define <4 x double> @sitofp_4i64_to_4f64(<4 x i64> %a) {
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f64:
@@ -377,7 +377,7 @@ define <4 x double> @sitofp_8i16_to_4f64(<8 x i16> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x double>
%shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -432,7 +432,7 @@ define <4 x double> @sitofp_16i8_to_4f64(<16 x i8> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x double>
%shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -492,9 +492,9 @@ define <2 x double> @uitofp_2i64_to_2f64(<2 x i64> %a) {
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -531,9 +531,9 @@ define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
;
; AVX512F-LABEL: uitofp_2i32_to_2f64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -544,9 +544,9 @@ define <2 x double> @uitofp_2i32_to_2f64(<4 x i32> %a) {
;
; AVX512DQ-LABEL: uitofp_2i32_to_2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -580,7 +580,7 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -594,37 +594,37 @@ define <2 x double> @uitofp_4i32_to_2f64(<4 x i32> %a) {
; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX2-NEXT: vaddpd %ymm0, %ymm1, %ymm0
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: uitofp_4i32_to_2f64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_2f64:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vcvtudq2pd %xmm0, %ymm0
-; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
; AVX512DQ-LABEL: uitofp_4i32_to_2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_2f64:
; AVX512VLDQ: # %bb.0:
; AVX512VLDQ-NEXT: vcvtudq2pd %xmm0, %ymm0
-; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
%cvt = uitofp <4 x i32> %a to <4 x double>
@@ -662,7 +662,7 @@ define <2 x double> @uitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -670,7 +670,7 @@ define <2 x double> @uitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -678,7 +678,7 @@ define <2 x double> @uitofp_8i16_to_2f64(<8 x i16> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x double>
@@ -718,7 +718,7 @@ define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -726,7 +726,7 @@ define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2pd %xmm0, %ymm0
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -734,7 +734,7 @@ define <2 x double> @uitofp_16i8_to_2f64(<16 x i8> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x double>
@@ -823,9 +823,9 @@ define <4 x double> @uitofp_4i64_to_4f64(<4 x i64> %a) {
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f64:
@@ -883,9 +883,9 @@ define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
;
; AVX512F-LABEL: uitofp_4i32_to_4f64:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_4i32_to_4f64:
@@ -895,9 +895,9 @@ define <4 x double> @uitofp_4i32_to_4f64(<4 x i32> %a) {
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i32_to_4f64:
@@ -956,7 +956,7 @@ define <4 x double> @uitofp_8i16_to_4f64(<8 x i16> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x double>
%shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1013,7 +1013,7 @@ define <4 x double> @uitofp_16i8_to_4f64(<16 x i8> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2pd %ymm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x double>
%shuf = shufflevector <16 x double> %cvt, <16 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -1072,9 +1072,9 @@ define <4 x float> @sitofp_2i64_to_4f32(<2 x i64> %a) {
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -1131,7 +1131,7 @@ define <4 x float> @sitofp_2i64_to_4f32_zero(<2 x i64> %a) {
;
; AVX512DQ-LABEL: sitofp_2i64_to_4f32_zero:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512DQ-NEXT: vzeroupper
@@ -1197,15 +1197,15 @@ define <4 x float> @sitofp_4i64_to_4f32_undef(<2 x i64> %a) {
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32_undef:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_4i64_to_4f32_undef:
; AVX512VLDQ: # %bb.0:
-; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512VLDQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -1261,7 +1261,7 @@ define <4 x float> @sitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1269,7 +1269,7 @@ define <4 x float> @sitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1277,7 +1277,7 @@ define <4 x float> @sitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <8 x i16> %a to <8 x float>
@@ -1320,7 +1320,7 @@ define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -1328,7 +1328,7 @@ define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovsxbd %xmm0, %ymm0
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1336,7 +1336,7 @@ define <4 x float> @sitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x float>
@@ -1436,9 +1436,9 @@ define <4 x float> @sitofp_4i64_to_4f32(<4 x i64> %a) {
;
; AVX512DQ-LABEL: sitofp_4i64_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -1576,7 +1576,7 @@ define <8 x float> @sitofp_16i8_to_8f32(<16 x i8> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%cvt = sitofp <16 x i8> %a to <16 x float>
%shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1691,9 +1691,9 @@ define <4 x float> @uitofp_2i64_to_4f32(<2 x i64> %a) {
;
; AVX512DQ-LABEL: uitofp_2i64_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -1800,7 +1800,7 @@ define <4 x float> @uitofp_2i64_to_2f32(<2 x i64> %a) {
;
; AVX512DQ-LABEL: uitofp_2i64_to_2f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX512DQ-NEXT: vzeroupper
@@ -1927,15 +1927,15 @@ define <4 x float> @uitofp_4i64_to_4f32_undef(<2 x i64> %a) {
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32_undef:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_4i64_to_4f32_undef:
; AVX512VLDQ: # %bb.0:
-; AVX512VLDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512VLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512VLDQ-NEXT: vcvtuqq2ps %ymm0, %xmm0
; AVX512VLDQ-NEXT: vzeroupper
; AVX512VLDQ-NEXT: retq
@@ -1979,9 +1979,9 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
;
; AVX512F-LABEL: uitofp_4i32_to_4f32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1992,9 +1992,9 @@ define <4 x float> @uitofp_4i32_to_4f32(<4 x i32> %a) {
;
; AVX512DQ-LABEL: uitofp_4i32_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2039,7 +2039,7 @@ define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -2047,7 +2047,7 @@ define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2055,7 +2055,7 @@ define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <8 x i16> %a to <8 x float>
@@ -2098,7 +2098,7 @@ define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -2106,7 +2106,7 @@ define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX2: # %bb.0:
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2114,7 +2114,7 @@ define <4 x float> @uitofp_16i8_to_4f32(<16 x i8> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x float>
@@ -2361,9 +2361,9 @@ define <4 x float> @uitofp_4i64_to_4f32(<4 x i64> %a) {
;
; AVX512DQ-LABEL: uitofp_4i64_to_4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2425,9 +2425,9 @@ define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
;
; AVX512F-LABEL: uitofp_8i32_to_8f32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_8i32_to_8f32:
@@ -2437,9 +2437,9 @@ define <8 x float> @uitofp_8i32_to_8f32(<8 x i32> %a) {
;
; AVX512DQ-LABEL: uitofp_8i32_to_8f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_8i32_to_8f32:
@@ -2556,7 +2556,7 @@ define <8 x float> @uitofp_16i8_to_8f32(<16 x i8> %a) {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; AVX512-NEXT: vcvtdq2ps %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%cvt = uitofp <16 x i8> %a to <16 x float>
%shuf = shufflevector <16 x float> %cvt, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2614,7 +2614,7 @@ define <2 x double> @sitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2778,7 +2778,7 @@ define <4 x double> @sitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: sitofp_load_4i64_to_4f64:
@@ -2910,7 +2910,7 @@ define <2 x double> @uitofp_load_2i64_to_2f64(<2 x i64> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2952,7 +2952,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -2965,7 +2965,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -3108,7 +3108,7 @@ define <4 x double> @uitofp_load_4i64_to_4f64(<4 x i64> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i64_to_4f64:
@@ -3172,7 +3172,7 @@ define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_4i32_to_4f64:
@@ -3184,7 +3184,7 @@ define <4 x double> @uitofp_load_4i32_to_4f64(<4 x i32> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_4i32_to_4f64:
@@ -3342,7 +3342,7 @@ define <4 x float> @sitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -3933,7 +3933,7 @@ define <4 x float> @uitofp_load_4i64_to_4f32(<4 x i64> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -3986,7 +3986,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %xmm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -3999,7 +3999,7 @@ define <4 x float> @uitofp_load_4i32_to_4f32(<4 x i32> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %xmm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -4575,7 +4575,7 @@ define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; AVX512F: # %bb.0:
; AVX512F-NEXT: vmovaps (%rdi), %ymm0
; AVX512F-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: uitofp_load_8i32_to_8f32:
@@ -4587,7 +4587,7 @@ define <8 x float> @uitofp_load_8i32_to_8f32(<8 x i32> *%a) {
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovaps (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtudq2ps %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512DQ-NEXT: retq
;
; AVX512VLDQ-LABEL: uitofp_load_8i32_to_8f32:
diff --git a/test/CodeGen/X86/vec_minmax_sint.ll b/test/CodeGen/X86/vec_minmax_sint.ll
index 67887dbe8fc..df1699aa00d 100644
--- a/test/CodeGen/X86/vec_minmax_sint.ll
+++ b/test/CodeGen/X86/vec_minmax_sint.ll
@@ -72,10 +72,10 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; AVX512-LABEL: max_gt_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <2 x i64> %a, %b
@@ -183,10 +183,10 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512-LABEL: max_gt_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%1 = icmp sgt <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -479,10 +479,10 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; AVX512-LABEL: max_ge_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sge <2 x i64> %a, %b
@@ -608,10 +608,10 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512-LABEL: max_ge_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmaxsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%1 = icmp sge <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -905,10 +905,10 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; AVX512-LABEL: min_lt_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp slt <2 x i64> %a, %b
@@ -1017,10 +1017,10 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512-LABEL: min_lt_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%1 = icmp slt <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -1306,10 +1306,10 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; AVX512-LABEL: min_le_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sle <2 x i64> %a, %b
@@ -1434,10 +1434,10 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512-LABEL: min_le_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpminsq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%1 = icmp sle <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
diff --git a/test/CodeGen/X86/vec_minmax_uint.ll b/test/CodeGen/X86/vec_minmax_uint.ll
index cf764a2f346..294d10c1cee 100644
--- a/test/CodeGen/X86/vec_minmax_uint.ll
+++ b/test/CodeGen/X86/vec_minmax_uint.ll
@@ -82,10 +82,10 @@ define <2 x i64> @max_gt_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; AVX512-LABEL: max_gt_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ugt <2 x i64> %a, %b
@@ -208,10 +208,10 @@ define <4 x i64> @max_gt_v4i64(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512-LABEL: max_gt_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%1 = icmp ugt <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -526,10 +526,10 @@ define <2 x i64> @max_ge_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; AVX512-LABEL: max_ge_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp uge <2 x i64> %a, %b
@@ -669,10 +669,10 @@ define <4 x i64> @max_ge_v4i64(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512-LABEL: max_ge_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmaxuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%1 = icmp uge <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -980,10 +980,10 @@ define <2 x i64> @min_lt_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; AVX512-LABEL: min_lt_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ult <2 x i64> %a, %b
@@ -1106,10 +1106,10 @@ define <4 x i64> @min_lt_v4i64(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512-LABEL: min_lt_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%1 = icmp ult <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
@@ -1423,10 +1423,10 @@ define <2 x i64> @min_le_v2i64(<2 x i64> %a, <2 x i64> %b) {
;
; AVX512-LABEL: min_le_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp ule <2 x i64> %a, %b
@@ -1566,10 +1566,10 @@ define <4 x i64> @min_le_v4i64(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512-LABEL: min_le_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpminuq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%1 = icmp ule <4 x i64> %a, %b
%2 = select <4 x i1> %1, <4 x i64> %a, <4 x i64> %b
diff --git a/test/CodeGen/X86/vec_ss_load_fold.ll b/test/CodeGen/X86/vec_ss_load_fold.ll
index ef8afbe934e..87634a9c708 100644
--- a/test/CodeGen/X86/vec_ss_load_fold.ll
+++ b/test/CodeGen/X86/vec_ss_load_fold.ll
@@ -17,7 +17,7 @@ define i16 @test1(float %f) nounwind {
; X32-NEXT: minss LCPI0_2, %xmm0
; X32-NEXT: maxss %xmm1, %xmm0
; X32-NEXT: cvttss2si %xmm0, %eax
-; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: ## kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test1:
@@ -29,7 +29,7 @@ define i16 @test1(float %f) nounwind {
; X64-NEXT: minss {{.*}}(%rip), %xmm0
; X64-NEXT: maxss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
-; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
;
; X32_AVX1-LABEL: test1:
@@ -42,7 +42,7 @@ define i16 @test1(float %f) nounwind {
; X32_AVX1-NEXT: vminss LCPI0_2, %xmm0, %xmm0
; X32_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX1-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X32_AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X32_AVX1-NEXT: retl
;
; X64_AVX1-LABEL: test1:
@@ -54,7 +54,7 @@ define i16 @test1(float %f) nounwind {
; X64_AVX1-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64_AVX1-NEXT: vcvttss2si %xmm0, %eax
-; X64_AVX1-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64_AVX1-NEXT: ## kill: def %ax killed %ax killed %eax
; X64_AVX1-NEXT: retq
;
; X32_AVX512-LABEL: test1:
@@ -67,7 +67,7 @@ define i16 @test1(float %f) nounwind {
; X32_AVX512-NEXT: vminss LCPI0_2, %xmm0, %xmm0
; X32_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX512-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X32_AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X32_AVX512-NEXT: retl
;
; X64_AVX512-LABEL: test1:
@@ -79,7 +79,7 @@ define i16 @test1(float %f) nounwind {
; X64_AVX512-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64_AVX512-NEXT: vcvttss2si %xmm0, %eax
-; X64_AVX512-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64_AVX512-NEXT: ## kill: def %ax killed %ax killed %eax
; X64_AVX512-NEXT: retq
%tmp = insertelement <4 x float> undef, float %f, i32 0 ; <<4 x float>> [#uses=1]
%tmp10 = insertelement <4 x float> %tmp, float 0.000000e+00, i32 1 ; <<4 x float>> [#uses=1]
@@ -104,7 +104,7 @@ define i16 @test2(float %f) nounwind {
; X32-NEXT: xorps %xmm1, %xmm1
; X32-NEXT: maxss %xmm1, %xmm0
; X32-NEXT: cvttss2si %xmm0, %eax
-; X32-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X32-NEXT: ## kill: def %ax killed %ax killed %eax
; X32-NEXT: retl
;
; X64-LABEL: test2:
@@ -115,7 +115,7 @@ define i16 @test2(float %f) nounwind {
; X64-NEXT: xorps %xmm1, %xmm1
; X64-NEXT: maxss %xmm1, %xmm0
; X64-NEXT: cvttss2si %xmm0, %eax
-; X64-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64-NEXT: ## kill: def %ax killed %ax killed %eax
; X64-NEXT: retq
;
; X32_AVX-LABEL: test2:
@@ -127,7 +127,7 @@ define i16 @test2(float %f) nounwind {
; X32_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX-NEXT: vcvttss2si %xmm0, %eax
-; X32_AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X32_AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X32_AVX-NEXT: retl
;
; X64_AVX-LABEL: test2:
@@ -138,7 +138,7 @@ define i16 @test2(float %f) nounwind {
; X64_AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64_AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64_AVX-NEXT: vcvttss2si %xmm0, %eax
-; X64_AVX-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; X64_AVX-NEXT: ## kill: def %ax killed %ax killed %eax
; X64_AVX-NEXT: retq
%tmp28 = fsub float %f, 1.000000e+00 ; <float> [#uses=1]
%tmp37 = fmul float %tmp28, 5.000000e-01 ; <float> [#uses=1]
diff --git a/test/CodeGen/X86/vector-bitreverse.ll b/test/CodeGen/X86/vector-bitreverse.ll
index 51b5219f302..1e8a693054d 100644
--- a/test/CodeGen/X86/vector-bitreverse.ll
+++ b/test/CodeGen/X86/vector-bitreverse.ll
@@ -53,7 +53,7 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vpextrb $0, %xmm0, %eax
-; XOP-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; XOP-NEXT: # kill: def %al killed %al killed %eax
; XOP-NEXT: retq
%b = call i8 @llvm.bitreverse.i8(i8 %a)
ret i8 %b
@@ -62,7 +62,7 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind {
define i16 @test_bitreverse_i16(i16 %a) nounwind {
; SSE-LABEL: test_bitreverse_i16:
; SSE: # %bb.0:
-; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE-NEXT: # kill: def %edi killed %edi def %rdi
; SSE-NEXT: rolw $8, %di
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $3855, %eax # imm = 0xF0F
@@ -80,12 +80,12 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; SSE-NEXT: andl $43690, %eax # imm = 0xAAAA
; SSE-NEXT: shrl %eax
; SSE-NEXT: leal (%rax,%rcx,2), %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_bitreverse_i16:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX-NEXT: # kill: def %edi killed %edi def %rdi
; AVX-NEXT: rolw $8, %di
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $3855, %eax # imm = 0xF0F
@@ -103,7 +103,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; AVX-NEXT: andl $43690, %eax # imm = 0xAAAA
; AVX-NEXT: shrl %eax
; AVX-NEXT: leal (%rax,%rcx,2), %eax
-; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-NEXT: # kill: def %ax killed %ax killed %eax
; AVX-NEXT: retq
;
; XOP-LABEL: test_bitreverse_i16:
@@ -111,7 +111,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; XOP-NEXT: vmovd %edi, %xmm0
; XOP-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
; XOP-NEXT: vmovd %xmm0, %eax
-; XOP-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; XOP-NEXT: # kill: def %ax killed %ax killed %eax
; XOP-NEXT: retq
%b = call i16 @llvm.bitreverse.i16(i16 %a)
ret i16 %b
@@ -120,7 +120,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
define i32 @test_bitreverse_i32(i32 %a) nounwind {
; SSE-LABEL: test_bitreverse_i32:
; SSE: # %bb.0:
-; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE-NEXT: # kill: def %edi killed %edi def %rdi
; SSE-NEXT: bswapl %edi
; SSE-NEXT: movl %edi, %eax
; SSE-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
@@ -142,7 +142,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
;
; AVX-LABEL: test_bitreverse_i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX-NEXT: # kill: def %edi killed %edi def %rdi
; AVX-NEXT: bswapl %edi
; AVX-NEXT: movl %edi, %eax
; AVX-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
diff --git a/test/CodeGen/X86/vector-compare-all_of.ll b/test/CodeGen/X86/vector-compare-all_of.ll
index e45c88837c6..a055307c5df 100644
--- a/test/CodeGen/X86/vector-compare-all_of.ll
+++ b/test/CodeGen/X86/vector-compare-all_of.ll
@@ -624,7 +624,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
@@ -635,7 +635,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX-NEXT: movl $-1, %eax
; AVX-NEXT: cmovnel %ecx, %eax
-; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-NEXT: # kill: def %ax killed %ax killed %eax
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
@@ -649,7 +649,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512-NEXT: retq
%c = icmp sgt <8 x i16> %a0, %a1
%s = sext <8 x i1> %c to <8 x i16>
@@ -674,7 +674,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
@@ -692,7 +692,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -703,7 +703,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: cmpl $-1, %ecx
; AVX2-NEXT: cmovel %ecx, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -720,7 +720,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
@@ -748,7 +748,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
@@ -763,7 +763,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX1-NEXT: movl $-1, %eax
; AVX1-NEXT: cmovnel %ecx, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -777,7 +777,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX2-NEXT: movl $-1, %eax
; AVX2-NEXT: cmovnel %ecx, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -795,7 +795,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: movsbl %al, %eax
-; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
@@ -822,7 +822,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE-NEXT: # kill: def %al killed %al killed %eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
@@ -833,7 +833,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX-NEXT: movl $-1, %eax
; AVX-NEXT: cmovnel %ecx, %eax
-; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX-NEXT: # kill: def %al killed %al killed %eax
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
@@ -849,7 +849,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %al killed %al killed %eax
; AVX512-NEXT: retq
%c = icmp sgt <16 x i8> %a0, %a1
%s = sext <16 x i1> %c to <16 x i8>
@@ -876,7 +876,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE-NEXT: movl $-1, %eax
; SSE-NEXT: cmovnel %ecx, %eax
-; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE-NEXT: # kill: def %al killed %al killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
@@ -896,7 +896,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -907,7 +907,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: cmpl $-1, %ecx
; AVX2-NEXT: cmovel %ecx, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -926,7 +926,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %al killed %al killed %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <32 x i8> %a0, %a1
diff --git a/test/CodeGen/X86/vector-compare-any_of.ll b/test/CodeGen/X86/vector-compare-any_of.ll
index d49e4b7ae8b..54d01461c14 100644
--- a/test/CodeGen/X86/vector-compare-any_of.ll
+++ b/test/CodeGen/X86/vector-compare-any_of.ll
@@ -578,7 +578,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8i16_sext:
@@ -587,7 +587,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
; AVX-NEXT: sbbl %eax, %eax
-; AVX-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX-NEXT: # kill: def %ax killed %ax killed %eax
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8i16_sext:
@@ -601,7 +601,7 @@ define i16 @test_v8i16_sext(<8 x i16> %a0, <8 x i16> %a1) {
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512-NEXT: retq
%c = icmp sgt <8 x i16> %a0, %a1
%s = sext <8 x i1> %c to <8 x i16>
@@ -624,7 +624,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_sext:
@@ -642,7 +642,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vmovd %xmm0, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -652,7 +652,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbl %eax, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -669,7 +669,7 @@ define i16 @test_v16i16_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
@@ -695,7 +695,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; SSE-NEXT: # kill: def %ax killed %ax killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v16i16_legal_sext:
@@ -708,7 +708,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: negl %eax
; AVX1-NEXT: sbbl %eax, %eax
-; AVX1-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %ax killed %ax killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -720,7 +720,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbl %eax, %eax
-; AVX2-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %ax killed %ax killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -738,7 +738,7 @@ define i16 @test_v16i16_legal_sext(<16 x i16> %a0, <16 x i16> %a1) {
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
; AVX512-NEXT: movsbl %al, %eax
-; AVX512-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <16 x i16> %a0, %a1
@@ -763,7 +763,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE-NEXT: # kill: def %al killed %al killed %eax
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16i8_sext:
@@ -772,7 +772,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: negl %eax
; AVX-NEXT: sbbl %eax, %eax
-; AVX-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX-NEXT: # kill: def %al killed %al killed %eax
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16i8_sext:
@@ -788,7 +788,7 @@ define i8 @test_v16i8_sext(<16 x i8> %a0, <16 x i8> %a1) {
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %al killed %al killed %eax
; AVX512-NEXT: retq
%c = icmp sgt <16 x i8> %a0, %a1
%s = sext <16 x i1> %c to <16 x i8>
@@ -813,7 +813,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: negl %eax
; SSE-NEXT: sbbl %eax, %eax
-; SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; SSE-NEXT: # kill: def %al killed %al killed %eax
; SSE-NEXT: retq
;
; AVX1-LABEL: test_v32i8_sext:
@@ -833,7 +833,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpextrb $0, %xmm0, %eax
-; AVX1-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX1-NEXT: # kill: def %al killed %al killed %eax
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -843,7 +843,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX2-NEXT: vpmovmskb %ymm0, %eax
; AVX2-NEXT: negl %eax
; AVX2-NEXT: sbbl %eax, %eax
-; AVX2-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX2-NEXT: # kill: def %al killed %al killed %eax
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -862,7 +862,7 @@ define i8 @test_v32i8_sext(<32 x i8> %a0, <32 x i8> %a1) {
; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpextrb $0, %xmm0, %eax
-; AVX512-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512-NEXT: # kill: def %al killed %al killed %eax
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%c = icmp sgt <32 x i8> %a0, %a1
diff --git a/test/CodeGen/X86/vector-compare-results.ll b/test/CodeGen/X86/vector-compare-results.ll
index 6ac0c7b3d33..df885abd5f7 100644
--- a/test/CodeGen/X86/vector-compare-results.ll
+++ b/test/CodeGen/X86/vector-compare-results.ll
@@ -145,7 +145,7 @@ define <4 x i1> @test_cmp_v4f64(<4 x double> %a0, <4 x double> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <4 x double> %a0, %a1
@@ -181,7 +181,7 @@ define <8 x i1> @test_cmp_v8f32(<8 x float> %a0, <8 x float> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = fcmp ogt <8 x float> %a0, %a1
@@ -244,7 +244,7 @@ define <4 x i1> @test_cmp_v4i64(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <4 x i64> %a0, %a1
@@ -281,7 +281,7 @@ define <8 x i1> @test_cmp_v8i32(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = icmp sgt <8 x i32> %a0, %a1
@@ -334,7 +334,7 @@ define <16 x i1> @test_cmp_v16i16(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i16> %a0, %a1
@@ -617,7 +617,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX512F-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -626,7 +626,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -634,7 +634,7 @@ define <8 x i1> @test_cmp_v8f64(<8 x double> %a0, <8 x double> %a1) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <8 x double> %a0, %a1
@@ -697,7 +697,7 @@ define <16 x i1> @test_cmp_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x float> %a0, %a1
@@ -796,7 +796,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512F-NEXT: vpcmpgtq %zmm1, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -805,7 +805,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512DQ-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -813,7 +813,7 @@ define <8 x i1> @test_cmp_v8i64(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <8 x i64> %a0, %a1
@@ -879,7 +879,7 @@ define <16 x i1> @test_cmp_v16i32(<16 x i32> %a0, <16 x i32> %a1) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i32> %a0, %a1
@@ -1150,7 +1150,7 @@ define <32 x i1> @test_cmp_v32i16(<32 x i16> %a0, <32 x i16> %a1) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpgtw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i16> %a0, %a1
ret <32 x i1> %1
@@ -1977,7 +1977,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vmovdqa %xmm4, %xmm2
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -1988,7 +1988,7 @@ define <64 x i1> @test_cmp_v64i8(<64 x i8> %a0, <64 x i8> %a1) nounwind {
; AVX512DQ-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vextracti128 $1, %ymm4, %xmm3
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vmovdqa %xmm4, %xmm2
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
@@ -2098,7 +2098,7 @@ define <16 x i1> @test_cmp_v16f64(<16 x double> %a0, <16 x double> %a1) nounwind
; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = fcmp ogt <16 x double> %a0, %a1
@@ -2661,7 +2661,7 @@ define <32 x i1> @test_cmp_v32f32(<32 x float> %a0, <32 x float> %a1) nounwind {
; AVX512BW-NEXT: vcmpltps %zmm1, %zmm3, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x float> %a0, %a1
ret <32 x i1> %1
@@ -2857,7 +2857,7 @@ define <16 x i1> @test_cmp_v16i64(<16 x i64> %a0, <16 x i64> %a1) nounwind {
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = icmp sgt <16 x i64> %a0, %a1
@@ -3412,7 +3412,7 @@ define <32 x i1> @test_cmp_v32i32(<32 x i32> %a0, <32 x i32> %a1) nounwind {
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i32> %a0, %a1
ret <32 x i1> %1
@@ -4539,8 +4539,8 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; AVX512F-NEXT: vpcmpgtb %ymm0, %ymm6, %ymm0
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
-; AVX512F-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512F-NEXT: # kill: def %xmm2 killed %xmm2 killed %ymm2
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -4826,8 +4826,8 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
; AVX512DQ-NEXT: vpcmpgtb %ymm0, %ymm6, %ymm0
; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX512DQ-NEXT: vextracti128 $1, %ymm2, %xmm3
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
-; AVX512DQ-NEXT: # kill: %xmm2<def> %xmm2<kill> %ymm2<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def %xmm2 killed %xmm2 killed %ymm2
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -7284,7 +7284,7 @@ define <32 x i1> @test_cmp_v32f64(<32 x double> %a0, <32 x double> %a1) nounwind
; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
%1 = fcmp ogt <32 x double> %a0, %a1
ret <32 x i1> %1
@@ -8169,7 +8169,7 @@ define <32 x i1> @test_cmp_v32i64(<32 x i64> %a0, <32 x i64> %a1) nounwind {
; AVX512BW-NEXT: kunpckbw %k1, %k2, %k1
; AVX512BW-NEXT: kunpckwd %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
%1 = icmp sgt <32 x i64> %a0, %a1
ret <32 x i1> %1
diff --git a/test/CodeGen/X86/vector-extend-inreg.ll b/test/CodeGen/X86/vector-extend-inreg.ll
index 6741e2abb11..8d55e5da05d 100644
--- a/test/CodeGen/X86/vector-extend-inreg.ll
+++ b/test/CodeGen/X86/vector-extend-inreg.ll
@@ -47,7 +47,7 @@ define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) noun
; X64-SSE-NEXT: movq %rsp, %rbp
; X64-SSE-NEXT: andq $-128, %rsp
; X64-SSE-NEXT: subq $256, %rsp # imm = 0x100
-; X64-SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-SSE-NEXT: # kill: def %edi killed %edi def %rdi
; X64-SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero
; X64-SSE-NEXT: xorps %xmm0, %xmm0
; X64-SSE-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
@@ -99,7 +99,7 @@ define i64 @extract_any_extend_vector_inreg_v16i64(<16 x i64> %a0, i32 %a1) noun
; X64-AVX-NEXT: movq %rsp, %rbp
; X64-AVX-NEXT: andq $-128, %rsp
; X64-AVX-NEXT: subq $256, %rsp # imm = 0x100
-; X64-AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; X64-AVX-NEXT: # kill: def %edi killed %edi def %rdi
; X64-AVX-NEXT: vpermpd {{.*#+}} ymm0 = ymm3[3,1,2,3]
; X64-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X64-AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
diff --git a/test/CodeGen/X86/vector-half-conversions.ll b/test/CodeGen/X86/vector-half-conversions.ll
index 0df55fee6ee..44fe38fa86b 100644
--- a/test/CodeGen/X86/vector-half-conversions.ll
+++ b/test/CodeGen/X86/vector-half-conversions.ll
@@ -28,7 +28,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movswl %ax, %esi
-; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: shrq $48, %rdx
@@ -55,7 +55,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movswl %ax, %esi
-; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: shrq $48, %rdx
@@ -82,7 +82,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
; AVX512F-NEXT: movswl %ax, %esi
-; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: shrq $32, %rcx
; AVX512F-NEXT: shrq $48, %rdx
@@ -109,7 +109,7 @@ define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: movq %rax, %rdx
; AVX512VL-NEXT: movswl %ax, %esi
-; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: shrq $48, %rdx
@@ -140,7 +140,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movswl %ax, %esi
-; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: shrq $48, %rdx
@@ -166,7 +166,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movswl %ax, %esi
-; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: shrq $48, %rdx
@@ -192,7 +192,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
; AVX512F-NEXT: movswl %ax, %esi
-; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: shrq $32, %rcx
; AVX512F-NEXT: shrq $48, %rdx
@@ -220,7 +220,7 @@ define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: movq %rax, %rdx
; AVX512VL-NEXT: movswl %ax, %esi
-; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: shrq $48, %rdx
@@ -252,7 +252,7 @@ define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) nounwind {
; ALL-NEXT: movq %rdx, %r8
; ALL-NEXT: movq %rdx, %r10
; ALL-NEXT: movswl %dx, %r9d
-; ALL-NEXT: # kill: %edx<def> %edx<kill> %rdx<kill>
+; ALL-NEXT: # kill: def %edx killed %edx killed %rdx
; ALL-NEXT: shrl $16, %edx
; ALL-NEXT: shrq $32, %r8
; ALL-NEXT: shrq $48, %r10
@@ -260,7 +260,7 @@ define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) nounwind {
; ALL-NEXT: movq %rdi, %rax
; ALL-NEXT: movq %rdi, %rsi
; ALL-NEXT: movswl %di, %ecx
-; ALL-NEXT: # kill: %edi<def> %edi<kill> %rdi<kill>
+; ALL-NEXT: # kill: def %edi killed %edi killed %rdi
; ALL-NEXT: shrl $16, %edi
; ALL-NEXT: shrq $32, %rax
; ALL-NEXT: shrq $48, %rsi
@@ -313,7 +313,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm9
; AVX1-NEXT: movswl %ax, %ecx
-; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm10
@@ -328,7 +328,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm13
; AVX1-NEXT: movswl %ax, %ecx
-; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm14
@@ -343,7 +343,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm3
; AVX1-NEXT: movswl %ax, %ecx
-; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm4
@@ -408,7 +408,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm9
; AVX2-NEXT: movswl %ax, %ecx
-; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm10
@@ -423,7 +423,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm13
; AVX2-NEXT: movswl %ax, %ecx
-; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm14
@@ -438,7 +438,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm3
; AVX2-NEXT: movswl %ax, %ecx
-; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm4
@@ -503,7 +503,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: movswl %cx, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm9
; AVX512F-NEXT: movswl %ax, %ecx
-; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: cwtl
; AVX512F-NEXT: vmovd %eax, %xmm11
@@ -518,7 +518,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: movswl %cx, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm14
; AVX512F-NEXT: movswl %ax, %ecx
-; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: cwtl
; AVX512F-NEXT: vmovd %eax, %xmm15
@@ -533,7 +533,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX512F-NEXT: movswl %cx, %ecx
; AVX512F-NEXT: vmovd %ecx, %xmm1
; AVX512F-NEXT: movswl %ax, %ecx
-; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: cwtl
; AVX512F-NEXT: vmovd %eax, %xmm4
@@ -599,7 +599,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX512VL-NEXT: movswl %cx, %ecx
; AVX512VL-NEXT: vmovd %ecx, %xmm9
; AVX512VL-NEXT: movswl %ax, %ecx
-; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: cwtl
; AVX512VL-NEXT: vmovd %eax, %xmm11
@@ -614,7 +614,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX512VL-NEXT: movswl %cx, %ecx
; AVX512VL-NEXT: vmovd %ecx, %xmm14
; AVX512VL-NEXT: movswl %ax, %ecx
-; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: cwtl
; AVX512VL-NEXT: vmovd %eax, %xmm15
@@ -629,7 +629,7 @@ define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
; AVX512VL-NEXT: movswl %cx, %ecx
; AVX512VL-NEXT: vmovd %ecx, %xmm18
; AVX512VL-NEXT: movswl %ax, %ecx
-; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: cwtl
; AVX512VL-NEXT: vmovd %eax, %xmm19
@@ -735,7 +735,7 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: movswl %ax, %esi
-; AVX1-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX1-NEXT: # kill: def %eax killed %eax killed %rax
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: shrq $48, %rdx
@@ -761,7 +761,7 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: movq %rax, %rdx
; AVX2-NEXT: movswl %ax, %esi
-; AVX2-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX2-NEXT: # kill: def %eax killed %eax killed %rax
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: shrq $48, %rdx
@@ -787,7 +787,7 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX512F-NEXT: movq %rax, %rcx
; AVX512F-NEXT: movq %rax, %rdx
; AVX512F-NEXT: movswl %ax, %esi
-; AVX512F-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512F-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512F-NEXT: shrl $16, %eax
; AVX512F-NEXT: shrq $32, %rcx
; AVX512F-NEXT: shrq $48, %rdx
@@ -815,7 +815,7 @@ define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) nounwind {
; AVX512VL-NEXT: movq %rax, %rcx
; AVX512VL-NEXT: movq %rax, %rdx
; AVX512VL-NEXT: movswl %ax, %esi
-; AVX512VL-NEXT: # kill: %eax<def> %eax<kill> %rax<kill>
+; AVX512VL-NEXT: # kill: def %eax killed %eax killed %rax
; AVX512VL-NEXT: shrl $16, %eax
; AVX512VL-NEXT: shrq $32, %rcx
; AVX512VL-NEXT: shrq $48, %rdx
@@ -2061,7 +2061,7 @@ define i16 @cvt_f32_to_i16(float %a0) nounwind {
; ALL: # %bb.0:
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %eax
-; ALL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; ALL-NEXT: # kill: def %ax killed %ax killed %eax
; ALL-NEXT: retq
%1 = fptrunc float %a0 to half
%2 = bitcast half %1 to i16
@@ -3139,7 +3139,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
@@ -3176,7 +3176,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
@@ -3213,7 +3213,7 @@ define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
@@ -3255,7 +3255,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
@@ -3293,7 +3293,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
@@ -3331,7 +3331,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX512F-NEXT: movl %eax, %ebx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r14d
@@ -3369,7 +3369,7 @@ define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: movl %eax, %ebx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r14d
@@ -3414,7 +3414,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
@@ -3452,7 +3452,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
@@ -3490,7 +3490,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX512F-NEXT: movl %eax, %ebx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r14d
@@ -3528,7 +3528,7 @@ define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
; AVX512VL-NEXT: movl %eax, %ebx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r14d
@@ -3577,7 +3577,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
@@ -3602,7 +3602,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
@@ -3644,7 +3644,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
@@ -3669,7 +3669,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
@@ -3710,7 +3710,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
@@ -3738,7 +3738,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
@@ -3836,7 +3836,7 @@ define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
@@ -3874,7 +3874,7 @@ define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
@@ -3912,7 +3912,7 @@ define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) nounwind {
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
@@ -3949,7 +3949,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
@@ -3991,7 +3991,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %ebx
@@ -4033,7 +4033,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX512F-NEXT: movl %eax, %ebp
; AVX512F-NEXT: shll $16, %ebp
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %ebx
@@ -4075,7 +4075,7 @@ define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) noun
; AVX512VL-NEXT: movl %eax, %ebp
; AVX512VL-NEXT: shll $16, %ebp
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %ebx
@@ -4125,7 +4125,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
@@ -4167,7 +4167,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %ebx
@@ -4209,7 +4209,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX512F-NEXT: movl %eax, %ebp
; AVX512F-NEXT: shll $16, %ebp
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %ebx
@@ -4251,7 +4251,7 @@ define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) nounw
; AVX512VL-NEXT: movl %eax, %ebp
; AVX512VL-NEXT: shll $16, %ebp
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
-; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %ebx
@@ -4324,7 +4324,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r13d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
@@ -4332,7 +4332,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r14d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
@@ -4392,7 +4392,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r13d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
@@ -4400,7 +4400,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r14d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
@@ -4462,7 +4462,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r13d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
@@ -4470,7 +4470,7 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) nounwind {
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r14d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
diff --git a/test/CodeGen/X86/vector-lzcnt-128.ll b/test/CodeGen/X86/vector-lzcnt-128.ll
index 59f8fe4e103..4abace0d938 100644
--- a/test/CodeGen/X86/vector-lzcnt-128.ll
+++ b/test/CodeGen/X86/vector-lzcnt-128.ll
@@ -233,9 +233,9 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
;
; AVX512CD-LABEL: testv2i64:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
@@ -499,9 +499,9 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
;
; AVX512CD-LABEL: testv2i64u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
@@ -747,9 +747,9 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
;
; AVX512CD-LABEL: testv4i32:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
@@ -989,9 +989,9 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
;
; AVX512CD-LABEL: testv4i32u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512CD-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512CD-NEXT: vzeroupper
; AVX512CD-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-lzcnt-256.ll b/test/CodeGen/X86/vector-lzcnt-256.ll
index 55f797a2cc1..73f7b3c2ad8 100644
--- a/test/CodeGen/X86/vector-lzcnt-256.ll
+++ b/test/CodeGen/X86/vector-lzcnt-256.ll
@@ -162,9 +162,9 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
;
; AVX512CD-LABEL: testv4i64:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64:
@@ -354,9 +354,9 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
;
; AVX512CD-LABEL: testv4i64u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512CD-NEXT: vplzcntq %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv4i64u:
@@ -521,9 +521,9 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
;
; AVX512CD-LABEL: testv8i32:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32:
@@ -683,9 +683,9 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
;
; AVX512CD-LABEL: testv8i32u:
; AVX512CD: # %bb.0:
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512CD-NEXT: vplzcntd %zmm0, %zmm0
-; AVX512CD-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512CD-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512CD-NEXT: retq
;
; X32-AVX-LABEL: testv8i32u:
diff --git a/test/CodeGen/X86/vector-popcnt-128.ll b/test/CodeGen/X86/vector-popcnt-128.ll
index c316121e24e..688285889b7 100644
--- a/test/CodeGen/X86/vector-popcnt-128.ll
+++ b/test/CodeGen/X86/vector-popcnt-128.ll
@@ -114,9 +114,9 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv2i64:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -284,9 +284,9 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv4i32:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -450,9 +450,9 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
;
; BITALG_NOVLX-LABEL: testv8i16:
; BITALG_NOVLX: # %bb.0:
-; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
@@ -567,9 +567,9 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
;
; BITALG_NOVLX-LABEL: testv16i8:
; BITALG_NOVLX: # %bb.0:
-; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-popcnt-256.ll b/test/CodeGen/X86/vector-popcnt-256.ll
index 48d16601e4f..ee03ab705eb 100644
--- a/test/CodeGen/X86/vector-popcnt-256.ll
+++ b/test/CodeGen/X86/vector-popcnt-256.ll
@@ -45,9 +45,9 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv4i64:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i64:
@@ -133,9 +133,9 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
;
; AVX512VPOPCNTDQ-LABEL: testv8i32:
; AVX512VPOPCNTDQ: # %bb.0:
-; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i32:
@@ -228,9 +228,9 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
;
; BITALG_NOVLX-LABEL: testv16i16:
; BITALG_NOVLX: # %bb.0:
-; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16:
@@ -288,9 +288,9 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
;
; BITALG_NOVLX-LABEL: testv32i8:
; BITALG_NOVLX: # %bb.0:
-; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8:
diff --git a/test/CodeGen/X86/vector-rotate-128.ll b/test/CodeGen/X86/vector-rotate-128.ll
index 8b2fbf7c0b1..b40c9eddd46 100644
--- a/test/CodeGen/X86/vector-rotate-128.ll
+++ b/test/CodeGen/X86/vector-rotate-128.ll
@@ -78,10 +78,10 @@ define <2 x i64> @var_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
;
; AVX512BW-LABEL: var_rotate_v2i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -219,10 +219,10 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; AVX512BW-LABEL: var_rotate_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -462,8 +462,8 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
;
; AVX512BW-LABEL: var_rotate_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %xmm1, %xmm2, %xmm2
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
@@ -838,10 +838,10 @@ define <2 x i64> @constant_rotate_v2i64(<2 x i64> %a) nounwind {
;
; AVX512BW-LABEL: constant_rotate_v2i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,14]
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -940,10 +940,10 @@ define <4 x i32> @constant_rotate_v4i32(<4 x i32> %a) nounwind {
;
; AVX512BW-LABEL: constant_rotate_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [4,5,6,7]
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1067,7 +1067,7 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
;
; AVX512BW-LABEL: constant_rotate_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [16,15,14,13,12,11,10,9]
@@ -1350,9 +1350,9 @@ define <2 x i64> @splatconstant_rotate_v2i64(<2 x i64> %a) nounwind {
;
; AVX512BW-LABEL: splatconstant_rotate_v2i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1397,9 +1397,9 @@ define <4 x i32> @splatconstant_rotate_v4i32(<4 x i32> %a) nounwind {
;
; AVX512BW-LABEL: splatconstant_rotate_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1535,7 +1535,7 @@ define <2 x i64> @splatconstant_rotate_mask_v2i64(<2 x i64> %a) nounwind {
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v2i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -1587,7 +1587,7 @@ define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/vector-rotate-256.ll b/test/CodeGen/X86/vector-rotate-256.ll
index 954d0b0f31c..46bac267185 100644
--- a/test/CodeGen/X86/vector-rotate-256.ll
+++ b/test/CodeGen/X86/vector-rotate-256.ll
@@ -50,10 +50,10 @@ define <4 x i64> @var_rotate_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; AVX512BW-LABEL: var_rotate_v4i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v4i64:
@@ -141,10 +141,10 @@ define <8 x i32> @var_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
;
; AVX512BW-LABEL: var_rotate_v8i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: var_rotate_v8i32:
@@ -271,8 +271,8 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
;
; AVX512BW-LABEL: var_rotate_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; AVX512BW-NEXT: vpsubw %ymm1, %ymm2, %ymm2
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
@@ -479,10 +479,10 @@ define <4 x i64> @constant_rotate_v4i64(<4 x i64> %a) nounwind {
;
; AVX512BW-LABEL: constant_rotate_v4i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,14,50,60]
; AVX512BW-NEXT: vprolvq %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v4i64:
@@ -545,10 +545,10 @@ define <8 x i32> @constant_rotate_v8i32(<8 x i32> %a) nounwind {
;
; AVX512BW-LABEL: constant_rotate_v8i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [4,5,6,7,8,9,10,11]
; AVX512BW-NEXT: vprolvd %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: constant_rotate_v8i32:
@@ -623,7 +623,7 @@ define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
;
; AVX512BW-LABEL: constant_rotate_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
@@ -800,9 +800,9 @@ define <4 x i64> @splatconstant_rotate_v4i64(<4 x i64> %a) nounwind {
;
; AVX512BW-LABEL: splatconstant_rotate_v4i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vprolq $14, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v4i64:
@@ -853,9 +853,9 @@ define <8 x i32> @splatconstant_rotate_v8i32(<8 x i32> %a) nounwind {
;
; AVX512BW-LABEL: splatconstant_rotate_v8i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_rotate_v8i32:
@@ -1012,7 +1012,7 @@ define <4 x i64> @splatconstant_rotate_mask_v4i64(<4 x i64> %a) nounwind {
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v4i64:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vprolq $15, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: retq
@@ -1074,7 +1074,7 @@ define <8 x i32> @splatconstant_rotate_mask_v8i32(<8 x i32> %a) nounwind {
;
; AVX512BW-LABEL: splatconstant_rotate_mask_v8i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vprold $4, %zmm0, %zmm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: retq
diff --git a/test/CodeGen/X86/vector-sext.ll b/test/CodeGen/X86/vector-sext.ll
index 24444ecdc19..d46514a6dc7 100644
--- a/test/CodeGen/X86/vector-sext.ll
+++ b/test/CodeGen/X86/vector-sext.ll
@@ -1245,7 +1245,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1254,7 +1254,7 @@ define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1436,7 +1436,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -1445,7 +1445,7 @@ define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1641,7 +1641,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_4i1_to_4i64:
@@ -1649,7 +1649,7 @@ define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_4i1_to_4i64:
@@ -1999,7 +1999,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -2008,7 +2008,7 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2402,7 +2402,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; AVX512F-NEXT: movzbl (%rdi), %eax
; AVX512F-NEXT: kmovw %eax, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: load_sext_8i1_to_8i32:
@@ -2410,7 +2410,7 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_8i1_to_8i32:
@@ -2920,7 +2920,7 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: kmovw (%rdi), %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3441,7 +3441,7 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: kmovw (%rdi), %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_16i1_to_16i16:
@@ -4296,7 +4296,7 @@ define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: kmovd (%rdi), %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: load_sext_32i1_to_32i8:
@@ -5029,7 +5029,7 @@ define <32 x i8> @sext_32xi1_to_32xi8(<32 x i16> %c1, <32 x i16> %c2)nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; X32-SSE41-LABEL: sext_32xi1_to_32xi8:
diff --git a/test/CodeGen/X86/vector-shift-ashr-128.ll b/test/CodeGen/X86/vector-shift-ashr-128.ll
index a37b8602459..ea33f22cc07 100644
--- a/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -82,10 +82,10 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
;
; AVX512-LABEL: var_shift_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -336,16 +336,16 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQ-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -651,9 +651,9 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
;
; AVX512-LABEL: splatvar_shift_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -1087,10 +1087,10 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
;
; AVX512-LABEL: constant_shift_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [1,7]
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -1265,16 +1265,16 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX512DQ-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX512DQ-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1564,9 +1564,9 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
;
; AVX512-LABEL: splatconstant_shift_v2i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-shift-ashr-256.ll b/test/CodeGen/X86/vector-shift-ashr-256.ll
index cb2c816758c..a99c70ebd7d 100644
--- a/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -75,10 +75,10 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; AVX512-LABEL: var_shift_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: var_shift_v4i64:
@@ -309,10 +309,10 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
;
; AVX512BW-LABEL: var_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
@@ -696,9 +696,9 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
;
; AVX512-LABEL: splatvar_shift_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpsraq %xmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatvar_shift_v4i64:
@@ -1170,10 +1170,10 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
;
; AVX512-LABEL: constant_shift_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [1,7,31,62]
; AVX512-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: constant_shift_v4i64:
@@ -1360,10 +1360,10 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
;
; AVX512BW-LABEL: constant_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsravw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
@@ -1702,9 +1702,9 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
;
; AVX512-LABEL: splatconstant_shift_v4i64:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpsraq $7, %zmm0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
;
; AVX512VL-LABEL: splatconstant_shift_v4i64:
diff --git a/test/CodeGen/X86/vector-shift-lshr-128.ll b/test/CodeGen/X86/vector-shift-lshr-128.ll
index 9dc332799eb..307cf287219 100644
--- a/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -290,7 +290,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -307,16 +307,16 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1010,7 +1010,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1026,16 +1026,16 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: constant_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-shift-lshr-256.ll b/test/CodeGen/X86/vector-shift-lshr-256.ll
index 7429ae003ea..0192c8ac05d 100644
--- a/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -272,10 +272,10 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
;
; AVX512BW-LABEL: var_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
@@ -1091,10 +1091,10 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
;
; AVX512BW-LABEL: constant_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
diff --git a/test/CodeGen/X86/vector-shift-shl-128.ll b/test/CodeGen/X86/vector-shift-shl-128.ll
index ef4c8855182..b518ad5fcff 100644
--- a/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -247,7 +247,7 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -262,16 +262,16 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX512DQ-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: var_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm1<def> %xmm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -902,10 +902,10 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
;
; AVX512BW-LABEL: constant_shift_v8i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-shift-shl-256.ll b/test/CodeGen/X86/vector-shift-shl-256.ll
index 712d9dbeef6..04713881271 100644
--- a/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -232,10 +232,10 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
;
; AVX512BW-LABEL: var_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: var_shift_v16i16:
@@ -966,10 +966,10 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
;
; AVX512BW-LABEL: constant_shift_v16i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm0
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512BW-NEXT: retq
;
; AVX512DQVL-LABEL: constant_shift_v16i16:
diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll
index 3c8377d364d..41dcb5032ee 100644
--- a/test/CodeGen/X86/vector-shuffle-256-v4.ll
+++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll
@@ -1295,21 +1295,21 @@ define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
; AVX1-LABEL: insert_reg_and_zero_v4f64:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX1-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_reg_and_zero_v4f64:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
; AVX2-NEXT: retq
;
; AVX512VL-LABEL: insert_reg_and_zero_v4f64:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512VL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX512VL-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-512-v16.ll b/test/CodeGen/X86/vector-shuffle-512-v16.ll
index 12d3fb33be8..fc189189eed 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v16.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v16.ll
@@ -689,7 +689,7 @@ define <16 x i32> @mask_shuffle_v16i32_00_01_02_03_16_17_18_19_08_09_10_11_12_13
define <16 x i32> @mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x i32> %a) {
; ALL-LABEL: mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; ALL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; ALL-NEXT: retq
@@ -700,7 +700,7 @@ define <16 x i32> @mask_shuffle_v4i32_v16i32_00_01_02_03_00_01_02_03_00_01_02_03
define <16 x float> @mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03(<4 x float> %a) {
; ALL-LABEL: mask_shuffle_v4f32_v16f32_00_01_02_03_00_01_02_03_00_01_02_03_00_01_02_03:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; ALL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; ALL-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; ALL-NEXT: retq
diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll
index fd33c0aeb59..5df15fbe078 100644
--- a/test/CodeGen/X86/vector-shuffle-512-v8.ll
+++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll
@@ -2644,14 +2644,14 @@ define <8 x double> @shuffle_v4f64_v8f64_22222222(<4 x double> %a) {
define <8 x i64> @shuffle_v2i64_v8i64_01010101(<2 x i64> %a) {
; AVX512F-LABEL: shuffle_v2i64_v8i64_01010101:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v2i64_v8i64_01010101:
; AVX512F-32: # %bb.0:
-; AVX512F-32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512F-32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
@@ -2662,14 +2662,14 @@ define <8 x i64> @shuffle_v2i64_v8i64_01010101(<2 x i64> %a) {
define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) {
; AVX512F-LABEL: shuffle_v2f64_v8f64_01010101:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-NEXT: retq
;
; AVX512F-32-LABEL: shuffle_v2f64_v8f64_01010101:
; AVX512F-32: # %bb.0:
-; AVX512F-32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; AVX512F-32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; AVX512F-32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX512F-32-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512F-32-NEXT: retl
diff --git a/test/CodeGen/X86/vector-shuffle-avx512.ll b/test/CodeGen/X86/vector-shuffle-avx512.ll
index be4a8b7b669..b066f123861 100644
--- a/test/CodeGen/X86/vector-shuffle-avx512.ll
+++ b/test/CodeGen/X86/vector-shuffle-avx512.ll
@@ -8,7 +8,7 @@
define <8 x float> @expand(<4 x float> %a) {
; SKX64-LABEL: expand:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX64-NEXT: movb $5, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -23,7 +23,7 @@ define <8 x float> @expand(<4 x float> %a) {
;
; SKX32-LABEL: expand:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX32-NEXT: movb $5, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -42,7 +42,7 @@ define <8 x float> @expand(<4 x float> %a) {
define <8 x float> @expand1(<4 x float> %a ) {
; SKX64-LABEL: expand1:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX64-NEXT: movb $-86, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -50,7 +50,7 @@ define <8 x float> @expand1(<4 x float> %a ) {
;
; KNL64-LABEL: expand1:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL64-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
; KNL64-NEXT: vpermps %ymm0, %ymm1, %ymm0
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -59,7 +59,7 @@ define <8 x float> @expand1(<4 x float> %a ) {
;
; SKX32-LABEL: expand1:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX32-NEXT: movb $-86, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -67,7 +67,7 @@ define <8 x float> @expand1(<4 x float> %a ) {
;
; KNL32-LABEL: expand1:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL32-NEXT: vmovaps {{.*#+}} ymm1 = <u,0,u,1,u,2,u,3>
; KNL32-NEXT: vpermps %ymm0, %ymm1, %ymm0
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
@@ -81,7 +81,7 @@ define <8 x float> @expand1(<4 x float> %a ) {
define <4 x double> @expand2(<2 x double> %a) {
; SKX64-LABEL: expand2:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX64-NEXT: movb $9, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z}
@@ -89,7 +89,7 @@ define <4 x double> @expand2(<2 x double> %a) {
;
; KNL64-LABEL: expand2:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL64-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
@@ -97,7 +97,7 @@ define <4 x double> @expand2(<2 x double> %a) {
;
; SKX32-LABEL: expand2:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX32-NEXT: movb $9, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandpd %ymm0, %ymm0 {%k1} {z}
@@ -105,7 +105,7 @@ define <4 x double> @expand2(<2 x double> %a) {
;
; KNL32-LABEL: expand2:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL32-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
@@ -118,7 +118,7 @@ define <4 x double> @expand2(<2 x double> %a) {
define <8 x i32> @expand3(<4 x i32> %a ) {
; SKX64-LABEL: expand3:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
@@ -133,7 +133,7 @@ define <8 x i32> @expand3(<4 x i32> %a ) {
;
; SKX32-LABEL: expand3:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandd %ymm0, %ymm0 {%k1} {z}
@@ -153,7 +153,7 @@ define <8 x i32> @expand3(<4 x i32> %a ) {
define <4 x i64> @expand4(<2 x i64> %a ) {
; SKX64-LABEL: expand4:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX64-NEXT: movb $9, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
@@ -161,7 +161,7 @@ define <4 x i64> @expand4(<2 x i64> %a ) {
;
; KNL64-LABEL: expand4:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
@@ -169,7 +169,7 @@ define <4 x i64> @expand4(<2 x i64> %a ) {
;
; SKX32-LABEL: expand4:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX32-NEXT: movb $9, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandq %ymm0, %ymm0 {%k1} {z}
@@ -177,7 +177,7 @@ define <4 x i64> @expand4(<2 x i64> %a ) {
;
; KNL32-LABEL: expand4:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; KNL32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
@@ -251,7 +251,7 @@ define <8 x float> @expand6(<4 x float> %a ) {
define <16 x float> @expand7(<8 x float> %a) {
; SKX64-LABEL: expand7:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX64-NEXT: movw $1285, %ax # imm = 0x505
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -259,7 +259,7 @@ define <16 x float> @expand7(<8 x float> %a) {
;
; KNL64-LABEL: expand7:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL64-NEXT: movw $1285, %ax # imm = 0x505
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -267,7 +267,7 @@ define <16 x float> @expand7(<8 x float> %a) {
;
; SKX32-LABEL: expand7:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX32-NEXT: movw $1285, %ax # imm = 0x505
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -275,7 +275,7 @@ define <16 x float> @expand7(<8 x float> %a) {
;
; KNL32-LABEL: expand7:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL32-NEXT: movw $1285, %ax # imm = 0x505
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -287,7 +287,7 @@ define <16 x float> @expand7(<8 x float> %a) {
define <16 x float> @expand8(<8 x float> %a ) {
; SKX64-LABEL: expand8:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -295,7 +295,7 @@ define <16 x float> @expand8(<8 x float> %a ) {
;
; KNL64-LABEL: expand8:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -303,7 +303,7 @@ define <16 x float> @expand8(<8 x float> %a ) {
;
; SKX32-LABEL: expand8:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -311,7 +311,7 @@ define <16 x float> @expand8(<8 x float> %a ) {
;
; KNL32-LABEL: expand8:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vexpandps %zmm0, %zmm0 {%k1} {z}
@@ -324,7 +324,7 @@ define <16 x float> @expand8(<8 x float> %a ) {
define <8 x double> @expand9(<4 x double> %a) {
; SKX64-LABEL: expand9:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
@@ -332,7 +332,7 @@ define <8 x double> @expand9(<4 x double> %a) {
;
; KNL64-LABEL: expand9:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL64-NEXT: movb $-127, %al
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
@@ -340,7 +340,7 @@ define <8 x double> @expand9(<4 x double> %a) {
;
; SKX32-LABEL: expand9:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
@@ -348,7 +348,7 @@ define <8 x double> @expand9(<4 x double> %a) {
;
; KNL32-LABEL: expand9:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL32-NEXT: movb $-127, %al
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vexpandpd %zmm0, %zmm0 {%k1} {z}
@@ -360,7 +360,7 @@ define <8 x double> @expand9(<4 x double> %a) {
define <16 x i32> @expand10(<8 x i32> %a ) {
; SKX64-LABEL: expand10:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
@@ -368,7 +368,7 @@ define <16 x i32> @expand10(<8 x i32> %a ) {
;
; KNL64-LABEL: expand10:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL64-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
@@ -376,7 +376,7 @@ define <16 x i32> @expand10(<8 x i32> %a ) {
;
; SKX32-LABEL: expand10:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
@@ -384,7 +384,7 @@ define <16 x i32> @expand10(<8 x i32> %a ) {
;
; KNL32-LABEL: expand10:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL32-NEXT: movw $-21846, %ax # imm = 0xAAAA
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vpexpandd %zmm0, %zmm0 {%k1} {z}
@@ -396,7 +396,7 @@ define <16 x i32> @expand10(<8 x i32> %a ) {
define <8 x i64> @expand11(<4 x i64> %a) {
; SKX64-LABEL: expand11:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX64-NEXT: movb $-127, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
@@ -404,7 +404,7 @@ define <8 x i64> @expand11(<4 x i64> %a) {
;
; KNL64-LABEL: expand11:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL64-NEXT: movb $-127, %al
; KNL64-NEXT: kmovw %eax, %k1
; KNL64-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
@@ -412,7 +412,7 @@ define <8 x i64> @expand11(<4 x i64> %a) {
;
; SKX32-LABEL: expand11:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX32-NEXT: movb $-127, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
@@ -420,7 +420,7 @@ define <8 x i64> @expand11(<4 x i64> %a) {
;
; KNL32-LABEL: expand11:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL32-NEXT: movb $-127, %al
; KNL32-NEXT: kmovw %eax, %k1
; KNL32-NEXT: vpexpandq %zmm0, %zmm0 {%k1} {z}
@@ -433,7 +433,7 @@ define <8 x i64> @expand11(<4 x i64> %a) {
define <16 x float> @expand12(<8 x float> %a) {
; SKX64-LABEL: expand12:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; SKX64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
@@ -442,7 +442,7 @@ define <16 x float> @expand12(<8 x float> %a) {
;
; KNL64-LABEL: expand12:
; KNL64: # %bb.0:
-; KNL64-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL64-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL64-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; KNL64-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL64-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
@@ -451,7 +451,7 @@ define <16 x float> @expand12(<8 x float> %a) {
;
; SKX32-LABEL: expand12:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; SKX32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; SKX32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; SKX32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; SKX32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
@@ -460,7 +460,7 @@ define <16 x float> @expand12(<8 x float> %a) {
;
; KNL32-LABEL: expand12:
; KNL32: # %bb.0:
-; KNL32-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; KNL32-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; KNL32-NEXT: vmovaps {{.*#+}} zmm2 = [0,16,2,16,4,16,6,16,0,16,1,16,2,16,3,16]
; KNL32-NEXT: vxorps %xmm1, %xmm1, %xmm1
; KNL32-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
@@ -503,7 +503,7 @@ define <16 x float> @expand13(<8 x float> %a ) {
define <8 x float> @expand14(<4 x float> %a) {
; SKX64-LABEL: expand14:
; SKX64: # %bb.0:
-; SKX64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX64-NEXT: movb $20, %al
; SKX64-NEXT: kmovd %eax, %k1
; SKX64-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
@@ -520,7 +520,7 @@ define <8 x float> @expand14(<4 x float> %a) {
;
; SKX32-LABEL: expand14:
; SKX32: # %bb.0:
-; SKX32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; SKX32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; SKX32-NEXT: movb $20, %al
; SKX32-NEXT: kmovd %eax, %k1
; SKX32-NEXT: vexpandps %ymm0, %ymm0 {%k1} {z}
diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index b0b76d7dc13..8c17978d237 100644
--- a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -196,13 +196,13 @@ define <16 x i8> @combine_pshufb_as_vpbroadcastb128(<16 x i8> %a) {
define <32 x i8> @combine_pshufb_as_vpbroadcastb256(<2 x i64> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastb256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vpbroadcastb %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastb256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vpbroadcastb %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -231,13 +231,13 @@ define <16 x i8> @combine_pshufb_as_vpbroadcastw128(<16 x i8> %a) {
define <32 x i8> @combine_pshufb_as_vpbroadcastw256(<2 x i64> %a) {
; X32-LABEL: combine_pshufb_as_vpbroadcastw256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vpbroadcastw %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastw256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vpbroadcastw %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x i64> %a, <2 x i64> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
@@ -269,14 +269,14 @@ define <16 x i8> @combine_pshufb_as_vpbroadcastd128(<16 x i8> %a) {
define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) {
; X32-LABEL: combine_permd_as_vpbroadcastd256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vpbroadcastd %xmm0, %ymm0
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastd256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vpbroadcastd %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -303,14 +303,14 @@ define <16 x i8> @combine_pshufb_as_vpbroadcastq128(<16 x i8> %a) {
define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) {
; X32-LABEL: combine_permd_as_vpbroadcastq256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vpbroadcastq %xmm0, %ymm0
; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastq256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vpbroadcastq %xmm0, %ymm0
; X64-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
@@ -339,13 +339,13 @@ define <4 x float> @combine_pshufb_as_vpbroadcastss128(<4 x float> %a) {
define <8 x float> @combine_permps_as_vpbroadcastss256(<4 x float> %a) {
; X32-LABEL: combine_permps_as_vpbroadcastss256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vbroadcastss %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_vpbroadcastss256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vbroadcastss %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <4 x float> %a, <4 x float> undef, <8 x i32> <i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -356,13 +356,13 @@ define <8 x float> @combine_permps_as_vpbroadcastss256(<4 x float> %a) {
define <4 x double> @combine_permps_as_vpbroadcastsd256(<2 x double> %a) {
; X32-LABEL: combine_permps_as_vpbroadcastsd256:
; X32: # %bb.0:
-; X32-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X32-NEXT: vbroadcastsd %xmm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: combine_permps_as_vpbroadcastsd256:
; X64: # %bb.0:
-; X64-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
; X64-NEXT: vbroadcastsd %xmm0, %ymm0
; X64-NEXT: retq
%1 = shufflevector <2 x double> %a, <2 x double> undef, <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
diff --git a/test/CodeGen/X86/vector-shuffle-v1.ll b/test/CodeGen/X86/vector-shuffle-v1.ll
index b3219a33058..9c92ca756eb 100644
--- a/test/CodeGen/X86/vector-shuffle-v1.ll
+++ b/test/CodeGen/X86/vector-shuffle-v1.ll
@@ -116,7 +116,7 @@ define <8 x i1> @shuf8i1_3_6_1_0_3_7_7_0(<8 x i64> %a, <8 x i64> %b, <8 x i64> %
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -243,7 +243,7 @@ define <8 x i1> @shuf8i1_u_2_u_u_2_u_2_u(i8 %a) {
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k1
; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -287,7 +287,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -301,7 +301,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -314,7 +314,7 @@ define i8 @shuf8i1_10_2_9_u_3_u_2_u(i8 %a) {
; VL_BW_DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -332,7 +332,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -344,7 +344,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -355,7 +355,7 @@ define i8 @shuf8i1_0_1_4_5_u_u_u_u(i8 %a) {
; VL_BW_DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,4,5,0,1,0,1]
; VL_BW_DQ-NEXT: vpmovq2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -375,7 +375,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -389,7 +389,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -402,7 +402,7 @@ define i8 @shuf8i1_9_6_1_0_3_7_7_0(i8 %a) {
; VL_BW_DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -422,7 +422,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -436,7 +436,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -449,7 +449,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0(i8 %a) {
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -471,7 +471,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -487,7 +487,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -500,7 +500,7 @@ define i8 @shuf8i1__9_6_1_10_3_7_7_1(i8 %a) {
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -522,7 +522,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; AVX512F-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512F-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %al killed %al killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -538,7 +538,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; AVX512VL-NEXT: vpsllq $63, %zmm2, %zmm0
; AVX512VL-NEXT: vptestmq %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; AVX512VL-NEXT: # kill: def %al killed %al killed %eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -552,7 +552,7 @@ define i8 @shuf8i1_9_6_1_10_3_7_7_0_all_ones(<8 x i1> %a) {
; VL_BW_DQ-NEXT: vpermt2q %zmm0, %zmm1, %zmm2
; VL_BW_DQ-NEXT: vpmovq2m %zmm2, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %al<def> %al<kill> %eax<kill>
+; VL_BW_DQ-NEXT: # kill: def %al killed %al killed %eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%c = shufflevector <8 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <8 x i1> %a, <8 x i32> <i32 9, i32 6, i32 1, i32 0, i32 3, i32 7, i32 7, i32 0>
@@ -570,7 +570,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
; AVX512F-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
-; AVX512F-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512F-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -582,7 +582,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
; AVX512VL-NEXT: vpslld $31, %zmm0, %zmm0
; AVX512VL-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovw %k0, %eax
-; AVX512VL-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; AVX512VL-NEXT: # kill: def %ax killed %ax killed %eax
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
;
@@ -593,7 +593,7 @@ define i16 @shuf16i1_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0_0(i16 %a) {
; VL_BW_DQ-NEXT: vpbroadcastd %xmm0, %zmm0
; VL_BW_DQ-NEXT: vpmovd2m %zmm0, %k0
; VL_BW_DQ-NEXT: kmovd %k0, %eax
-; VL_BW_DQ-NEXT: # kill: %ax<def> %ax<kill> %eax<kill>
+; VL_BW_DQ-NEXT: # kill: def %ax killed %ax killed %eax
; VL_BW_DQ-NEXT: vzeroupper
; VL_BW_DQ-NEXT: retq
%b = bitcast i16 %a to <16 x i1>
diff --git a/test/CodeGen/X86/vector-shuffle-variable-128.ll b/test/CodeGen/X86/vector-shuffle-variable-128.ll
index 6a0474803c6..0367737dda6 100644
--- a/test/CodeGen/X86/vector-shuffle-variable-128.ll
+++ b/test/CodeGen/X86/vector-shuffle-variable-128.ll
@@ -37,8 +37,8 @@ define <2 x double> @var_shuffle_v2f64_v2f64_xx_i64(<2 x double> %x, i64 %i0, i6
define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind {
; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; SSE: # %bb.0:
-; SSE-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE-NEXT: # kill: def %edi killed %edi def %rdi
; SSE-NEXT: andl $1, %edi
; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $1, %esi
@@ -49,8 +49,8 @@ define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1)
;
; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX-NEXT: # kill: def %edi killed %edi def %rdi
; AVX-NEXT: andl $1, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $1, %esi
@@ -68,10 +68,10 @@ define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1)
define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-NEXT: andl $3, %edi
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: andl $3, %edx
@@ -88,10 +88,10 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
;
; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
+; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
+; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSSE3-NEXT: andl $3, %edi
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: andl $3, %edx
@@ -108,10 +108,10 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
;
; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
; SSE41-NEXT: andl $3, %edi
; SSE41-NEXT: andl $3, %esi
; SSE41-NEXT: andl $3, %edx
@@ -125,10 +125,10 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX-NEXT: # kill: def %edi killed %edi def %rdi
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: andl $3, %esi
; AVX-NEXT: andl $3, %edx
@@ -153,10 +153,10 @@ define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i3
define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-NEXT: andl $3, %edi
; SSE2-NEXT: andl $3, %esi
; SSE2-NEXT: andl $3, %edx
@@ -173,10 +173,10 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
;
; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
+; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
+; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSSE3-NEXT: andl $3, %edi
; SSSE3-NEXT: andl $3, %esi
; SSSE3-NEXT: andl $3, %edx
@@ -193,10 +193,10 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
;
; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
; SSE41-NEXT: andl $3, %edi
; SSE41-NEXT: andl $3, %esi
; SSE41-NEXT: andl $3, %edx
@@ -210,10 +210,10 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
;
; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX-NEXT: # kill: def %edi killed %edi def %rdi
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: andl $3, %esi
; AVX-NEXT: andl $3, %edx
@@ -238,12 +238,12 @@ define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i
define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; SSE2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-NEXT: # kill: def %r9d killed %r9d def %r9
+; SSE2-NEXT: # kill: def %r8d killed %r8d def %r8
+; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-NEXT: andl $7, %edi
; SSE2-NEXT: andl $7, %esi
; SSE2-NEXT: andl $7, %edx
@@ -282,12 +282,12 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; SSSE3-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSSE3-NEXT: # kill: def %r9d killed %r9d def %r9
+; SSSE3-NEXT: # kill: def %r8d killed %r8d def %r8
+; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
+; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
+; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSSE3-NEXT: andl $7, %edi
; SSSE3-NEXT: andl $7, %esi
; SSSE3-NEXT: andl $7, %edx
@@ -326,12 +326,12 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; SSE41-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE41-NEXT: # kill: def %r9d killed %r9d def %r9
+; SSE41-NEXT: # kill: def %r8d killed %r8d def %r8
+; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
; SSE41-NEXT: andl $7, %edi
; SSE41-NEXT: andl $7, %esi
; SSE41-NEXT: andl $7, %edx
@@ -356,12 +356,12 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; AVX-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX-NEXT: # kill: def %r9d killed %r9d def %r9
+; AVX-NEXT: # kill: def %r8d killed %r8d def %r8
+; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX-NEXT: # kill: def %edi killed %edi def %rdi
; AVX-NEXT: andl $7, %edi
; AVX-NEXT: andl $7, %esi
; AVX-NEXT: andl $7, %edx
@@ -405,12 +405,12 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i1
define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind {
; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; SSE2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-NEXT: # kill: def %r9d killed %r9d def %r9
+; SSE2-NEXT: # kill: def %r8d killed %r8d def %r8
+; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE2-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSE2-NEXT: andl $15, %eax
@@ -490,12 +490,12 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
;
; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; SSSE3-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSSE3-NEXT: # kill: def %r9d killed %r9d def %r9
+; SSSE3-NEXT: # kill: def %r8d killed %r8d def %r8
+; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
+; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
+; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSSE3-NEXT: movzbl {{[0-9]+}}(%rsp), %eax
; SSSE3-NEXT: andl $15, %eax
@@ -575,12 +575,12 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
;
; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; SSE41-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE41-NEXT: # kill: def %r9d killed %r9d def %r9
+; SSE41-NEXT: # kill: def %r8d killed %r8d def %r8
+; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
; SSE41-NEXT: andl $15, %edi
; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
@@ -630,12 +630,12 @@ define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %
;
; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; AVX-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX-NEXT: # kill: def %r9d killed %r9d def %r9
+; AVX-NEXT: # kill: def %r8d killed %r8d def %r8
+; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX-NEXT: # kill: def %edi killed %edi def %rdi
; AVX-NEXT: andl $15, %edi
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
@@ -1168,9 +1168,9 @@ define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8*
define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; SSE: # %bb.0:
-; SSE-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE-NEXT: # kill: def %edi killed %edi def %rdi
; SSE-NEXT: andl $3, %edi
; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; SSE-NEXT: andl $3, %edx
@@ -1185,9 +1185,9 @@ define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float>
;
; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX-NEXT: # kill: def %edi killed %edi def %rdi
; AVX-NEXT: andl $3, %edi
; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
; AVX-NEXT: andl $3, %edx
@@ -1213,12 +1213,12 @@ define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float>
define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; SSE2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; SSE2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE2-NEXT: # kill: def %r9d killed %r9d def %r9
+; SSE2-NEXT: # kill: def %r8d killed %r8d def %r8
+; SSE2-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE2-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE2-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE2-NEXT: # kill: def %edi killed %edi def %rdi
; SSE2-NEXT: andl $7, %edi
; SSE2-NEXT: andl $7, %esi
; SSE2-NEXT: andl $7, %edx
@@ -1250,12 +1250,12 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %
;
; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; SSSE3-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; SSSE3-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSSE3-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSSE3-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSSE3-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSSE3-NEXT: # kill: def %r9d killed %r9d def %r9
+; SSSE3-NEXT: # kill: def %r8d killed %r8d def %r8
+; SSSE3-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSSE3-NEXT: # kill: def %edx killed %edx def %rdx
+; SSSE3-NEXT: # kill: def %esi killed %esi def %rsi
+; SSSE3-NEXT: # kill: def %edi killed %edi def %rdi
; SSSE3-NEXT: andl $7, %edi
; SSSE3-NEXT: andl $7, %esi
; SSSE3-NEXT: andl $7, %edx
@@ -1287,12 +1287,12 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %
;
; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; SSE41: # %bb.0:
-; SSE41-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; SSE41-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; SSE41-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; SSE41-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; SSE41-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; SSE41-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; SSE41-NEXT: # kill: def %r9d killed %r9d def %r9
+; SSE41-NEXT: # kill: def %r8d killed %r8d def %r8
+; SSE41-NEXT: # kill: def %ecx killed %ecx def %rcx
+; SSE41-NEXT: # kill: def %edx killed %edx def %rdx
+; SSE41-NEXT: # kill: def %esi killed %esi def %rsi
+; SSE41-NEXT: # kill: def %edi killed %edi def %rdi
; SSE41-NEXT: andl $7, %edi
; SSE41-NEXT: andl $7, %esi
; SSE41-NEXT: andl $7, %edx
@@ -1312,12 +1312,12 @@ define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %
;
; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
; AVX: # %bb.0:
-; AVX-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; AVX-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; AVX-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX-NEXT: # kill: def %r9d killed %r9d def %r9
+; AVX-NEXT: # kill: def %r8d killed %r8d def %r8
+; AVX-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX-NEXT: # kill: def %edi killed %edi def %rdi
; AVX-NEXT: andl $7, %edi
; AVX-NEXT: andl $7, %esi
; AVX-NEXT: andl $7, %edx
diff --git a/test/CodeGen/X86/vector-shuffle-variable-256.ll b/test/CodeGen/X86/vector-shuffle-variable-256.ll
index 7926fb93335..91672d07b05 100644
--- a/test/CodeGen/X86/vector-shuffle-variable-256.ll
+++ b/test/CodeGen/X86/vector-shuffle-variable-256.ll
@@ -185,12 +185,12 @@ define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0
; ALL-NEXT: movq %rsp, %rbp
; ALL-NEXT: andq $-32, %rsp
; ALL-NEXT: subq $64, %rsp
-; ALL-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; ALL-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; ALL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; ALL-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; ALL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ALL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ALL-NEXT: # kill: def %r9d killed %r9d def %r9
+; ALL-NEXT: # kill: def %r8d killed %r8d def %r8
+; ALL-NEXT: # kill: def %ecx killed %ecx def %rcx
+; ALL-NEXT: # kill: def %edx killed %edx def %rdx
+; ALL-NEXT: # kill: def %esi killed %esi def %rsi
+; ALL-NEXT: # kill: def %edi killed %edi def %rdi
; ALL-NEXT: andl $7, %edi
; ALL-NEXT: andl $7, %esi
; ALL-NEXT: andl $7, %edx
@@ -236,12 +236,12 @@ define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0
define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; ALL-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; ALL-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; ALL-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; ALL-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; ALL-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; ALL-NEXT: # kill: def %r9d killed %r9d def %r9
+; ALL-NEXT: # kill: def %r8d killed %r8d def %r8
+; ALL-NEXT: # kill: def %ecx killed %ecx def %rcx
+; ALL-NEXT: # kill: def %edx killed %edx def %rdx
+; ALL-NEXT: # kill: def %esi killed %esi def %rsi
+; ALL-NEXT: # kill: def %edi killed %edi def %rdi
; ALL-NEXT: andl $3, %edi
; ALL-NEXT: andl $3, %esi
; ALL-NEXT: andl $3, %edx
@@ -289,12 +289,12 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
; AVX1-NEXT: movq %rsp, %rbp
; AVX1-NEXT: andq $-32, %rsp
; AVX1-NEXT: subq $64, %rsp
-; AVX1-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; AVX1-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; AVX1-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX1-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX1-NEXT: # kill: def %r9d killed %r9d def %r9
+; AVX1-NEXT: # kill: def %r8d killed %r8d def %r8
+; AVX1-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX1-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX1-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
; AVX1-NEXT: andl $15, %edi
; AVX1-NEXT: vmovaps %ymm0, (%rsp)
; AVX1-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -351,12 +351,12 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
; AVX2-NEXT: movq %rsp, %rbp
; AVX2-NEXT: andq $-32, %rsp
; AVX2-NEXT: subq $64, %rsp
-; AVX2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; AVX2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; AVX2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX2-NEXT: # kill: def %r9d killed %r9d def %r9
+; AVX2-NEXT: # kill: def %r8d killed %r8d def %r8
+; AVX2-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX2-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX2-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
; AVX2-NEXT: andl $15, %edi
; AVX2-NEXT: vmovaps %ymm0, (%rsp)
; AVX2-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -444,12 +444,12 @@ define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x,
define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
; AVX1-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; AVX1-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; AVX1-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX1-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX1-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX1-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX1-NEXT: # kill: def %r9d killed %r9d def %r9
+; AVX1-NEXT: # kill: def %r8d killed %r8d def %r8
+; AVX1-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX1-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX1-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX1-NEXT: # kill: def %edi killed %edi def %rdi
; AVX1-NEXT: andl $7, %edi
; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX1-NEXT: movzwl -24(%rsp,%rdi,2), %eax
@@ -500,12 +500,12 @@ define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i
;
; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: # kill: %r9d<def> %r9d<kill> %r9<def>
-; AVX2-NEXT: # kill: %r8d<def> %r8d<kill> %r8<def>
-; AVX2-NEXT: # kill: %ecx<def> %ecx<kill> %rcx<def>
-; AVX2-NEXT: # kill: %edx<def> %edx<kill> %rdx<def>
-; AVX2-NEXT: # kill: %esi<def> %esi<kill> %rsi<def>
-; AVX2-NEXT: # kill: %edi<def> %edi<kill> %rdi<def>
+; AVX2-NEXT: # kill: def %r9d killed %r9d def %r9
+; AVX2-NEXT: # kill: def %r8d killed %r8d def %r8
+; AVX2-NEXT: # kill: def %ecx killed %ecx def %rcx
+; AVX2-NEXT: # kill: def %edx killed %edx def %rdx
+; AVX2-NEXT: # kill: def %esi killed %esi def %rsi
+; AVX2-NEXT: # kill: def %edi killed %edi def %rdi
; AVX2-NEXT: andl $7, %edi
; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; AVX2-NEXT: movzwl -24(%rsp,%rdi,2), %eax
diff --git a/test/CodeGen/X86/vector-trunc-math.ll b/test/CodeGen/X86/vector-trunc-math.ll
index 45479941143..7d26cec9928 100644
--- a/test/CodeGen/X86/vector-trunc-math.ll
+++ b/test/CodeGen/X86/vector-trunc-math.ll
@@ -33,7 +33,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -41,7 +41,7 @@ define <4 x i32> @trunc_add_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <4 x i64> %a0, %a1
@@ -101,7 +101,7 @@ define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -146,7 +146,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -154,7 +154,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = add <8 x i32> %a0, %a1
@@ -383,7 +383,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -435,7 +435,7 @@ define <8 x i16> @trunc_add_v8i32_v8i16_sext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
;
; AVX512-LABEL: trunc_add_v8i32_v8i16_sext_8i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0
; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0
@@ -477,7 +477,7 @@ define <4 x i32> @trunc_add_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_add_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -576,7 +576,7 @@ define <8 x i16> @trunc_add_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_add_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -771,7 +771,7 @@ define <16 x i8> @trunc_add_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
;
; AVX512BW-LABEL: trunc_add_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -816,7 +816,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -824,7 +824,7 @@ define <4 x i32> @trunc_sub_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, %a1
@@ -884,7 +884,7 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -929,7 +929,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -937,7 +937,7 @@ define <8 x i16> @trunc_sub_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, %a1
@@ -1166,7 +1166,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1214,7 +1214,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX2-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1222,7 +1222,7 @@ define <4 x i32> @trunc_sub_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubq {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <4 x i64> %a0, <i64 0, i64 1, i64 2, i64 3>
@@ -1287,7 +1287,7 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1331,7 +1331,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX2-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1339,7 +1339,7 @@ define <8 x i16> @trunc_sub_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpsubd {{.*}}(%rip), %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = sub <8 x i32> %a0, <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -1567,7 +1567,7 @@ define <16 x i8> @trunc_sub_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1635,8 +1635,8 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
;
; AVX512F-LABEL: trunc_mul_v4i64_v4i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmulld %xmm1, %xmm0, %xmm0
@@ -1645,8 +1645,8 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
;
; AVX512BW-LABEL: trunc_mul_v4i64_v4i32:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmulld %xmm1, %xmm0, %xmm0
@@ -1655,11 +1655,11 @@ define <4 x i32> @trunc_mul_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
;
; AVX512DQ-LABEL: trunc_mul_v4i64_v4i32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512DQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512DQ-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512DQ-NEXT: vpmullq %zmm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%1 = mul <4 x i64> %a0, %a1
@@ -1810,7 +1810,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -1818,7 +1818,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = mul <8 x i32> %a0, %a1
@@ -2241,7 +2241,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmullw %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2293,7 +2293,7 @@ define <8 x i16> @trunc_mul_v8i32_v8i16_zext_8i8(<16 x i8> %a0, <8 x i32> %a1) {
;
; AVX512-LABEL: trunc_mul_v8i32_v8i16_zext_8i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
+; AVX512-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
; AVX512-NEXT: vpmovdw %zmm1, %ymm1
; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512-NEXT: vpmullw %xmm1, %xmm0, %xmm0
@@ -2350,7 +2350,7 @@ define <4 x i32> @trunc_mul_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_mul_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2449,7 +2449,7 @@ define <8 x i16> @trunc_mul_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_mul_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -2793,7 +2793,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2834,7 +2834,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2842,7 +2842,7 @@ define <4 x i32> @trunc_and_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <4 x i64> %a0, %a1
@@ -2898,7 +2898,7 @@ define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2941,7 +2941,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -2949,7 +2949,7 @@ define <8 x i16> @trunc_and_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = and <8 x i32> %a0, %a1
@@ -3164,7 +3164,7 @@ define <16 x i8> @trunc_and_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3209,7 +3209,7 @@ define <4 x i32> @trunc_and_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_and_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3308,7 +3308,7 @@ define <8 x i16> @trunc_and_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_and_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -3503,7 +3503,7 @@ define <16 x i8> @trunc_and_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
;
; AVX512BW-LABEL: trunc_and_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -3546,7 +3546,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -3554,7 +3554,7 @@ define <4 x i32> @trunc_xor_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <4 x i64> %a0, %a1
@@ -3610,7 +3610,7 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -3653,7 +3653,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -3661,7 +3661,7 @@ define <8 x i16> @trunc_xor_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = xor <8 x i32> %a0, %a1
@@ -3876,7 +3876,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -3921,7 +3921,7 @@ define <4 x i32> @trunc_xor_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_xor_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4020,7 +4020,7 @@ define <8 x i16> @trunc_xor_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_xor_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4215,7 +4215,7 @@ define <16 x i8> @trunc_xor_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
;
; AVX512BW-LABEL: trunc_xor_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
@@ -4258,7 +4258,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX2-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -4266,7 +4266,7 @@ define <4 x i32> @trunc_or_v4i64_v4i32(<4 x i64> %a0, <4 x i64> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <4 x i64> %a0, %a1
@@ -4322,7 +4322,7 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -4365,7 +4365,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -4373,7 +4373,7 @@ define <8 x i16> @trunc_or_v8i32_v8i16(<8 x i32> %a0, <8 x i32> %a1) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = or <8 x i32> %a0, %a1
@@ -4588,7 +4588,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -4633,7 +4633,7 @@ define <4 x i32> @trunc_or_const_v4i64_v4i32(<4 x i64> %a0) nounwind {
;
; AVX512-LABEL: trunc_or_const_v4i64_v4i32:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4732,7 +4732,7 @@ define <8 x i16> @trunc_or_const_v8i32_v8i16(<8 x i32> %a0) nounwind {
;
; AVX512-LABEL: trunc_or_const_v8i32_v8i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512-NEXT: vpmovdw %zmm0, %ymm0
; AVX512-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
@@ -4927,7 +4927,7 @@ define <16 x i8> @trunc_or_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
;
; AVX512BW-LABEL: trunc_or_const_v16i16_v16i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
diff --git a/test/CodeGen/X86/vector-trunc.ll b/test/CodeGen/X86/vector-trunc.ll
index 62428a558de..d25117ca715 100644
--- a/test/CodeGen/X86/vector-trunc.ll
+++ b/test/CodeGen/X86/vector-trunc.ll
@@ -237,7 +237,7 @@ define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -348,15 +348,15 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; AVX2-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX2-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512F-LABEL: trunc8i32_8i16:
; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -368,9 +368,9 @@ define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
;
; AVX512BW-LABEL: trunc8i32_8i16:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -413,7 +413,7 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -428,7 +428,7 @@ define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpsrad $16, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -492,7 +492,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
; AVX512F: # %bb.0: # %entry
; AVX512F-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512F-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512F-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -507,7 +507,7 @@ define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
; AVX512BW: # %bb.0: # %entry
; AVX512BW-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512BW-NEXT: # kill: %xmm0<def> %xmm0<kill> %ymm0<kill>
+; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -574,7 +574,7 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
;
; AVX512F-LABEL: trunc8i32_8i8:
; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vmovq %xmm0, (%rax)
@@ -589,7 +589,7 @@ define void @trunc8i32_8i8(<8 x i32> %a) {
;
; AVX512BW-LABEL: trunc8i32_8i8:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX512BW-NEXT: vmovq %xmm0, (%rax)
@@ -1089,7 +1089,7 @@ define void @trunc16i16_16i8(<16 x i16> %a) {
;
; AVX512BW-LABEL: trunc16i16_16i8:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
@@ -1379,8 +1379,8 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512F-LABEL: trunc2x4i64_8i32:
; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -1395,8 +1395,8 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512BW-LABEL: trunc2x4i64_8i32:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
@@ -1489,8 +1489,8 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512F-LABEL: trunc2x4i64_8i16:
; AVX512F: # %bb.0: # %entry
-; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
@@ -1516,8 +1516,8 @@ define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
;
; AVX512BW-LABEL: trunc2x4i64_8i16:
; AVX512BW: # %bb.0: # %entry
-; AVX512BW-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512BW-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512BW-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
diff --git a/test/CodeGen/X86/vector-tzcnt-128.ll b/test/CodeGen/X86/vector-tzcnt-128.ll
index 15ff8f78d32..cca8496cab3 100644
--- a/test/CodeGen/X86/vector-tzcnt-128.ll
+++ b/test/CodeGen/X86/vector-tzcnt-128.ll
@@ -133,7 +133,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -354,7 +354,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -625,7 +625,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -886,7 +886,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: vzeroupper
; AVX512VPOPCNTDQ-NEXT: retq
;
@@ -1104,7 +1104,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
@@ -1286,7 +1286,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
@@ -1449,7 +1449,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
@@ -1608,7 +1608,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; BITALG_NOVLX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
; BITALG_NOVLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; BITALG_NOVLX-NEXT: vzeroupper
; BITALG_NOVLX-NEXT: retq
;
diff --git a/test/CodeGen/X86/vector-tzcnt-256.ll b/test/CodeGen/X86/vector-tzcnt-256.ll
index cc2bcd8710d..9d5402e7208 100644
--- a/test/CodeGen/X86/vector-tzcnt-256.ll
+++ b/test/CodeGen/X86/vector-tzcnt-256.ll
@@ -103,7 +103,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i64:
@@ -239,7 +239,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv4i64u:
@@ -411,7 +411,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i32:
@@ -572,7 +572,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0
-; AVX512VPOPCNTDQ-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512VPOPCNTDQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512VPOPCNTDQ-NEXT: retq
;
; BITALG_NOVLX-LABEL: testv8i32u:
@@ -759,7 +759,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16:
@@ -910,7 +910,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntw %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv16i16u:
@@ -1051,7 +1051,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8:
@@ -1189,7 +1189,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; BITALG_NOVLX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1
; BITALG_NOVLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; BITALG_NOVLX-NEXT: vpopcntb %zmm0, %zmm0
-; BITALG_NOVLX-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; BITALG_NOVLX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; BITALG_NOVLX-NEXT: retq
;
; BITALG-LABEL: testv32i8u:
diff --git a/test/CodeGen/X86/verifier-phi-fail0.mir b/test/CodeGen/X86/verifier-phi-fail0.mir
index 482c2c85d15..c17b0daa75b 100644
--- a/test/CodeGen/X86/verifier-phi-fail0.mir
+++ b/test/CodeGen/X86/verifier-phi-fail0.mir
@@ -3,13 +3,13 @@
# CHECK: Bad machine code: PHI operand is not live-out from predecessor
# CHECK: - function: func0
# CHECK: - basic block: %bb.3
-# CHECK: - instruction: %0<def> = PHI
+# CHECK: - instruction: %0:gr32 = PHI
# CHECK: - operand 1: %1
#
# CHECK: Bad machine code: PHI operand is not live-out from predecessor
# CHECK: - function: func0
# CHECK: - basic block: %bb.3
-# CHECK: - instruction: %0<def> = PHI
+# CHECK: - instruction: %0:gr32 = PHI
# CHECK: - operand 3: %0
name: func0
tracksRegLiveness: true
diff --git a/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll b/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
index 7f37bcc9778..004dcfb69a1 100644
--- a/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
+++ b/test/CodeGen/X86/vpshufbitqbm-intrinsics.ll
@@ -8,7 +8,7 @@ define i16 @test_vpshufbitqmb_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshufbitqmb %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: %ax<def> %ax<kill> %eax<kill>
+; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.vpshufbitqmb.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
diff --git a/test/CodeGen/X86/vselect-pcmp.ll b/test/CodeGen/X86/vselect-pcmp.ll
index c13aa717237..e36d74194aa 100644
--- a/test/CodeGen/X86/vselect-pcmp.ll
+++ b/test/CodeGen/X86/vselect-pcmp.ll
@@ -182,13 +182,13 @@ define <8 x i32> @signbit_sel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask)
;
; AVX512F-LABEL: signbit_sel_v8i32:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: %ymm2<def> %ymm2<kill> %zmm2<def>
-; AVX512F-NEXT: # kill: %ymm1<def> %ymm1<kill> %zmm1<def>
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<def>
+; AVX512F-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; AVX512F-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpcmpgtd %zmm2, %zmm3, %k1
; AVX512F-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512F-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512F-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: signbit_sel_v8i32:
diff --git a/test/CodeGen/X86/widen_bitops-0.ll b/test/CodeGen/X86/widen_bitops-0.ll
index f8843fd8ce2..f939396452e 100644
--- a/test/CodeGen/X86/widen_bitops-0.ll
+++ b/test/CodeGen/X86/widen_bitops-0.ll
@@ -141,9 +141,9 @@ define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-NEXT: pextrb $0, %xmm1, %eax
; X32-SSE-NEXT: pextrb $4, %xmm1, %edx
; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X32-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
-; X32-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
-; X32-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X32-SSE-NEXT: # kill: def %al killed %al killed %eax
+; X32-SSE-NEXT: # kill: def %dl killed %dl killed %edx
+; X32-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: and_v3i8_as_i24:
@@ -158,9 +158,9 @@ define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X64-SSE-NEXT: pextrb $0, %xmm1, %eax
; X64-SSE-NEXT: pextrb $4, %xmm1, %edx
; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X64-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
-; X64-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
-; X64-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X64-SSE-NEXT: # kill: def %al killed %al killed %eax
+; X64-SSE-NEXT: # kill: def %dl killed %dl killed %edx
+; X64-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
; X64-SSE-NEXT: retq
%1 = bitcast <3 x i8> %a to i24
%2 = bitcast <3 x i8> %b to i24
@@ -182,9 +182,9 @@ define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-NEXT: pextrb $0, %xmm1, %eax
; X32-SSE-NEXT: pextrb $4, %xmm1, %edx
; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X32-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
-; X32-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
-; X32-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X32-SSE-NEXT: # kill: def %al killed %al killed %eax
+; X32-SSE-NEXT: # kill: def %dl killed %dl killed %edx
+; X32-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: xor_v3i8_as_i24:
@@ -199,9 +199,9 @@ define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X64-SSE-NEXT: pextrb $0, %xmm1, %eax
; X64-SSE-NEXT: pextrb $4, %xmm1, %edx
; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X64-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
-; X64-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
-; X64-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X64-SSE-NEXT: # kill: def %al killed %al killed %eax
+; X64-SSE-NEXT: # kill: def %dl killed %dl killed %edx
+; X64-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
; X64-SSE-NEXT: retq
%1 = bitcast <3 x i8> %a to i24
%2 = bitcast <3 x i8> %b to i24
@@ -223,9 +223,9 @@ define <3 x i8> @or_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X32-SSE-NEXT: pextrb $0, %xmm1, %eax
; X32-SSE-NEXT: pextrb $4, %xmm1, %edx
; X32-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X32-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
-; X32-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
-; X32-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X32-SSE-NEXT: # kill: def %al killed %al killed %eax
+; X32-SSE-NEXT: # kill: def %dl killed %dl killed %edx
+; X32-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: or_v3i8_as_i24:
@@ -240,9 +240,9 @@ define <3 x i8> @or_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
; X64-SSE-NEXT: pextrb $0, %xmm1, %eax
; X64-SSE-NEXT: pextrb $4, %xmm1, %edx
; X64-SSE-NEXT: pextrb $8, %xmm1, %ecx
-; X64-SSE-NEXT: # kill: %al<def> %al<kill> %eax<kill>
-; X64-SSE-NEXT: # kill: %dl<def> %dl<kill> %edx<kill>
-; X64-SSE-NEXT: # kill: %cl<def> %cl<kill> %ecx<kill>
+; X64-SSE-NEXT: # kill: def %al killed %al killed %eax
+; X64-SSE-NEXT: # kill: def %dl killed %dl killed %edx
+; X64-SSE-NEXT: # kill: def %cl killed %cl killed %ecx
; X64-SSE-NEXT: retq
%1 = bitcast <3 x i8> %a to i24
%2 = bitcast <3 x i8> %b to i24
diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll
index 0c0ca56ea3a..792bbbed52e 100644
--- a/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/test/CodeGen/X86/x86-interleaved-access.ll
@@ -651,7 +651,7 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
; AVX512-NEXT: vpmovb2m %zmm0, %k1
; AVX512-NEXT: kxnorw %k1, %k0, %k0
; AVX512-NEXT: vpmovm2b %k0, %zmm0
-; AVX512-NEXT: # kill: %xmm0<def> %xmm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%wide.vec = load <64 x i8>, <64 x i8>* %ptr
@@ -964,7 +964,7 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) {
; AVX512-NEXT: vpmovb2m %zmm0, %k1
; AVX512-NEXT: kxnord %k1, %k0, %k0
; AVX512-NEXT: vpmovm2b %k0, %zmm0
-; AVX512-NEXT: # kill: %ymm0<def> %ymm0<kill> %zmm0<kill>
+; AVX512-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
; AVX512-NEXT: retq
%wide.vec = load <128 x i8>, <128 x i8>* %ptr
%v1 = shufflevector <128 x i8> %wide.vec, <128 x i8> undef, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124>
diff --git a/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll b/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
index 98fd3d90cbe..543d4f405ad 100644
--- a/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
+++ b/test/CodeGen/X86/x86-upgrade-avx2-vbroadcast.ll
@@ -8,7 +8,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
define <4 x i64> @broadcast128(<2 x i64> %src) {
; CHECK-LABEL: broadcast128:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: %xmm0<def> %xmm0<kill> %ymm0<def>
+; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: retq
diff --git a/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir b/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
index 7660a031690..c554c04cfb1 100644
--- a/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
+++ b/test/DebugInfo/MIR/X86/live-debug-vars-unused-arg-debugonly.mir
@@ -153,11 +153,11 @@ body: |
# not cover the whole BB.
#
# CHECKDBG-LABEL: ********** EMITTING LIVE DEBUG VARIABLES **********
-# CHECKDBG-NEXT: !"argc,5" [0B;0e):0 Loc0=%edi
+# CHECKDBG-NEXT: !"argc,5" [0B;0e):0 Loc0=debug-use %edi
# CHECKDBG-NEXT: [0B;0e):0 %bb.0-160B
-# CHECKDBG-NEXT: !"argv,5" [0B;0e):0 Loc0=%rsi
+# CHECKDBG-NEXT: !"argv,5" [0B;0e):0 Loc0=debug-use %rsi
# CHECKDBG-NEXT: [0B;0e):0 %bb.0-160B
-# CHECKDBG-NEXT: !"a0,7" [16r;64r):0 Loc0=%2
+# CHECKDBG-NEXT: !"a0,7" [16r;64r):0 Loc0=debug-use %2
# CHECKDBG-NEXT: [16r;64r):0 %bb.0-160B
-# CHECKDBG-NEXT: !"a1,8" [32r;80r):0 Loc0=%3
+# CHECKDBG-NEXT: !"a1,8" [32r;80r):0 Loc0=debug-use %3
# CHECKDBG-NEXT: [32r;80r):0 %bb.0-160B
diff --git a/test/MC/AArch64/arm64-leaf-compact-unwind.s b/test/MC/AArch64/arm64-leaf-compact-unwind.s
index 2278ab7c248..1619a50f6b0 100644
--- a/test/MC/AArch64/arm64-leaf-compact-unwind.s
+++ b/test/MC/AArch64/arm64-leaf-compact-unwind.s
@@ -200,7 +200,7 @@ Ltmp13:
.cfi_offset w27, -16
Ltmp14:
.cfi_offset w28, -24
- ; kill: W0<def> W0<kill> X0<def>
+ ; kill: def W0 killed W0 def X0
mov x9, xzr
ubfx x10, x0, #0, #32
mov x8, sp
diff --git a/unittests/CodeGen/MachineOperandTest.cpp b/unittests/CodeGen/MachineOperandTest.cpp
index afbf4f4c368..5926b6767ff 100644
--- a/unittests/CodeGen/MachineOperandTest.cpp
+++ b/unittests/CodeGen/MachineOperandTest.cpp
@@ -9,6 +9,7 @@
#include "llvm/ADT/ilist_node.h"
#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/Support/raw_ostream.h"
#include "gtest/gtest.h"
using namespace llvm;
@@ -37,4 +38,42 @@ TEST(MachineOperandTest, ChangeToTargetIndexTest) {
ASSERT_TRUE(MO.getTargetFlags() == 12);
}
+TEST(MachineOperandTest, PrintRegisterMask) {
+ uint32_t Dummy;
+ MachineOperand MO = MachineOperand::CreateRegMask(&Dummy);
+
+ // Checking some preconditions on the newly created
+ // MachineOperand.
+ ASSERT_TRUE(MO.isRegMask());
+ ASSERT_TRUE(MO.getRegMask() == &Dummy);
+
+ // Print a MachineOperand containing a RegMask. Here we check that without a
+ // TRI and IntrinsicInfo we still print a less detailed regmask.
+ std::string str;
+ raw_string_ostream OS(str);
+ MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
+ ASSERT_TRUE(OS.str() == "<regmask ...>");
+}
+
+TEST(MachineOperandTest, PrintSubReg) {
+ // Create a MachineOperand with RegNum=1 and SubReg=5.
+ MachineOperand MO = MachineOperand::CreateReg(
+ /*Reg=*/1, /*isDef=*/false, /*isImp=*/false, /*isKill=*/false,
+ /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/false,
+ /*SubReg=*/5, /*isDebug=*/false, /*isInternalRead=*/false);
+
+ // Checking some preconditions on the newly created
+ // MachineOperand.
+ ASSERT_TRUE(MO.isReg());
+ ASSERT_TRUE(MO.getReg() == 1);
+ ASSERT_TRUE(MO.getSubReg() == 5);
+
+ // Print a MachineOperand containing a SubReg. Here we check that without a
+ // TRI and IntrinsicInfo we can still print the subreg index.
+ std::string str;
+ raw_string_ostream OS(str);
+ MO.print(OS, /*TRI=*/nullptr, /*IntrinsicInfo=*/nullptr);
+ ASSERT_TRUE(OS.str() == "%physreg1.subreg5");
+}
+
} // end namespace