diff options
author | Artur Pilipenko <apilipenko@azulsystems.com> | 2017-02-06 17:48:08 +0000 |
---|---|---|
committer | Artur Pilipenko <apilipenko@azulsystems.com> | 2017-02-06 17:48:08 +0000 |
commit | e7cc4f3c599a28d02d0e01f4bf5cd3f9f4532067 (patch) | |
tree | d6f3d4c38be9392e3794ceeb7285ba727e4c2429 | |
parent | d9394e7ae7841a85a654dfb9bda8ebf8b0994d38 (diff) |
[DAGCombiner] Support bswap as a part of load combine patterns
Reviewed By: RKSimon
Differential Revision: https://reviews.llvm.org/D29397
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294201 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 3 | ||||
-rw-r--r-- | test/CodeGen/AArch64/load-combine-big-endian.ll | 22 | ||||
-rw-r--r-- | test/CodeGen/AArch64/load-combine.ll | 23 | ||||
-rw-r--r-- | test/CodeGen/ARM/load-combine-big-endian.ll | 34 | ||||
-rw-r--r-- | test/CodeGen/ARM/load-combine.ll | 34 | ||||
-rw-r--r-- | test/CodeGen/X86/load-combine.ll | 32 |
6 files changed, 148 insertions, 0 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 02221d700bf..97ec5d6b2d7 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4463,6 +4463,9 @@ const Optional<ByteProvider> calculateByteProvider(SDValue Op, unsigned Index, ? ByteProvider::getConstantZero() : calculateByteProvider(NarrowOp, Index, Depth + 1); } + case ISD::BSWAP: + return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1, + Depth + 1); case ISD::LOAD: { auto L = cast<LoadSDNode>(Op.getNode()); diff --git a/test/CodeGen/AArch64/load-combine-big-endian.ll b/test/CodeGen/AArch64/load-combine-big-endian.ll index 692a57c4471..9fc852b7174 100644 --- a/test/CodeGen/AArch64/load-combine-big-endian.ll +++ b/test/CodeGen/AArch64/load-combine-big-endian.ll @@ -331,3 +331,25 @@ define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) { %tmp18 = or i32 %tmp13, %tmp17 ret i32 %tmp18 } + +declare i16 @llvm.bswap.i16(i16) + +; i16* p; // p is 4 byte aligned +; (i32) bswap(p[0]) | (i32) bswap(p[1] << 16) +define i32 @load_i32_by_bswap_i16(i32* %arg) { +; CHECK-LABEL: load_i32_by_bswap_i16: +; CHECK: ldr w8, [x0] +; CHECK-NEXT: rev w0, w8 +; CHECK-NEXT: ret + %tmp = bitcast i32* %arg to i16* + %tmp1 = load i16, i16* %tmp, align 4 + %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1) + %tmp2 = zext i16 %tmp11 to i32 + %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1 + %tmp4 = load i16, i16* %tmp3, align 1 + %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4) + %tmp5 = zext i16 %tmp41 to i32 + %tmp6 = shl nuw nsw i32 %tmp5, 16 + %tmp7 = or i32 %tmp6, %tmp2 + ret i32 %tmp7 +} diff --git a/test/CodeGen/AArch64/load-combine.ll b/test/CodeGen/AArch64/load-combine.ll index 86461e2a1d7..baa9628d4e3 100644 --- a/test/CodeGen/AArch64/load-combine.ll +++ b/test/CodeGen/AArch64/load-combine.ll @@ -318,3 +318,26 @@ define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) { %tmp18 = or i32 %tmp13, %tmp17 ret i32 %tmp18 } + +declare i16 @llvm.bswap.i16(i16) + +; i16* p; // p is 4 byte aligned +; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16) +define i32 @load_i32_by_bswap_i16(i32* %arg) { +; CHECK-LABEL: load_i32_by_bswap_i16: +; CHECK: ldr w8, [x0] +; CHECK-NEXT: rev w0, w8 +; CHECK-NEXT: ret + + %tmp = bitcast i32* %arg to i16* + %tmp1 = load i16, i16* %tmp, align 4 + %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1) + %tmp2 = zext i16 %tmp11 to i32 + %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1 + %tmp4 = load i16, i16* %tmp3, align 1 + %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4) + %tmp5 = zext i16 %tmp41 to i32 + %tmp6 = shl nuw nsw i32 %tmp2, 16 + %tmp7 = or i32 %tmp6, %tmp5 + ret i32 %tmp7 +} diff --git a/test/CodeGen/ARM/load-combine-big-endian.ll b/test/CodeGen/ARM/load-combine-big-endian.ll index 795e69fc4f5..b5a405df967 100644 --- a/test/CodeGen/ARM/load-combine-big-endian.ll +++ b/test/CodeGen/ARM/load-combine-big-endian.ll @@ -449,3 +449,37 @@ define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) { %tmp18 = or i32 %tmp13, %tmp17 ret i32 %tmp18 } + +declare i16 @llvm.bswap.i16(i16) + +; i16* p; // p is 4 byte aligned +; (i32) bswap(p[0]) | (i32) bswap(p[1] << 16) +define i32 @load_i32_by_bswap_i16(i32* %arg) { +; CHECK-LABEL: load_i32_by_bswap_i16: +; CHECK: ldr r0, [r0] +; CHECK-NEXT: mov r1, #65280 +; CHECK-NEXT: mov r2, #16711680 +; CHECK-NEXT: and r1, r1, r0, lsr #8 +; CHECK-NEXT: and r2, r2, r0, lsl #8 +; CHECK-NEXT: orr r1, r1, r0, lsr #24 +; CHECK-NEXT: orr r0, r2, r0, lsl #24 +; CHECK-NEXT: orr r0, r0, r1 +; CHECK-NEXT: mov pc, lr + +; CHECK-ARMv6-LABEL: load_i32_by_bswap_i16: +; CHECK-ARMv6: ldr r0, [r0] +; CHECK-ARMv6-NEXT: rev r0, r0 +; CHECK-ARMv6-NEXT: bx lr + + %tmp = bitcast i32* %arg to i16* + %tmp1 = load i16, i16* %tmp, align 4 + %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1) + %tmp2 = zext i16 %tmp11 to i32 + %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1 + %tmp4 = load i16, i16* %tmp3, align 1 + %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4) + %tmp5 = zext i16 %tmp41 to i32 + %tmp6 = shl nuw nsw i32 %tmp5, 16 + %tmp7 = or i32 %tmp6, %tmp2 + ret i32 %tmp7 +} diff --git a/test/CodeGen/ARM/load-combine.ll b/test/CodeGen/ARM/load-combine.ll index bafa13894c6..eb476b673b1 100644 --- a/test/CodeGen/ARM/load-combine.ll +++ b/test/CodeGen/ARM/load-combine.ll @@ -407,3 +407,37 @@ define i32 @load_i32_by_i8_neg_offset_bswap(i32* %arg) { %tmp18 = or i32 %tmp13, %tmp17 ret i32 %tmp18 } + +declare i16 @llvm.bswap.i16(i16) + +; i16* p; // p is 4 byte aligned +; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16) +define i32 @load_i32_by_bswap_i16(i32* %arg) { +; CHECK-LABEL: load_i32_by_bswap_i16: +; CHECK: ldr r0, [r0] +; CHECK-NEXT: mov r1, #65280 +; CHECK-NEXT: mov r2, #16711680 +; CHECK-NEXT: and r1, r1, r0, lsr #8 +; CHECK-NEXT: and r2, r2, r0, lsl #8 +; CHECK-NEXT: orr r1, r1, r0, lsr #24 +; CHECK-NEXT: orr r0, r2, r0, lsl #24 +; CHECK-NEXT: orr r0, r0, r1 +; CHECK-NEXT: mov pc, lr + +; CHECK-ARMv6-LABEL: load_i32_by_bswap_i16: +; CHECK-ARMv6: ldr r0, [r0] +; CHECK-ARMv6-NEXT: rev r0, r0 +; CHECK-ARMv6-NEXT: bx lr + + %tmp = bitcast i32* %arg to i16* + %tmp1 = load i16, i16* %tmp, align 4 + %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1) + %tmp2 = zext i16 %tmp11 to i32 + %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1 + %tmp4 = load i16, i16* %tmp3, align 1 + %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4) + %tmp5 = zext i16 %tmp41 to i32 + %tmp6 = shl nuw nsw i32 %tmp2, 16 + %tmp7 = or i32 %tmp6, %tmp5 + ret i32 %tmp7 +} diff --git a/test/CodeGen/X86/load-combine.ll b/test/CodeGen/X86/load-combine.ll index 8942ca09c14..7c881653057 100644 --- a/test/CodeGen/X86/load-combine.ll +++ b/test/CodeGen/X86/load-combine.ll @@ -869,3 +869,35 @@ entry: store i64 %conv75, i64* %dst, align 8 ret void } + +declare i16 @llvm.bswap.i16(i16) + +; i16* p; +; (i32) bswap(p[1]) | (i32) bswap(p[0] << 16) +define i32 @load_i32_by_bswap_i16(i32* %arg) { +; CHECK-LABEL: load_i32_by_bswap_i16: +; CHECK: # BB#0: +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl (%eax), %eax +; CHECK-NEXT: bswapl %eax +; CHECK-NEXT: retl +; +; CHECK64-LABEL: load_i32_by_bswap_i16: +; CHECK64: # BB#0: +; CHECK64-NEXT: movl (%rdi), %eax +; CHECK64-NEXT: bswapl %eax +; CHECK64-NEXT: retq + + + %tmp = bitcast i32* %arg to i16* + %tmp1 = load i16, i16* %tmp, align 4 + %tmp11 = call i16 @llvm.bswap.i16(i16 %tmp1) + %tmp2 = zext i16 %tmp11 to i32 + %tmp3 = getelementptr inbounds i16, i16* %tmp, i32 1 + %tmp4 = load i16, i16* %tmp3, align 1 + %tmp41 = call i16 @llvm.bswap.i16(i16 %tmp4) + %tmp5 = zext i16 %tmp41 to i32 + %tmp6 = shl nuw nsw i32 %tmp2, 16 + %tmp7 = or i32 %tmp6, %tmp5 + ret i32 %tmp7 +} |