2010-08-27 00:23:39 +02:00
|
|
|
; RUN: opt < %s -instcombine -S | FileCheck %s
|
2010-08-27 22:32:06 +02:00
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
|
2009-06-18 01:17:05 +02:00
|
|
|
|
2009-06-18 18:30:21 +02:00
|
|
|
; Instcombine should be able to eliminate all of these ext casts.
|
2009-06-18 01:17:05 +02:00
|
|
|
|
|
|
|
declare void @use(i32)
|
|
|
|
|
2010-08-27 00:23:39 +02:00
|
|
|
define i64 @test1(i64 %a) {
|
2009-06-18 01:17:05 +02:00
|
|
|
%b = trunc i64 %a to i32
|
|
|
|
%c = and i32 %b, 15
|
|
|
|
%d = zext i32 %c to i64
|
|
|
|
call void @use(i32 %b)
|
|
|
|
ret i64 %d
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test1(
|
2012-06-22 18:36:43 +02:00
|
|
|
; CHECK-NOT: ext
|
|
|
|
; CHECK: ret
|
2009-06-18 01:17:05 +02:00
|
|
|
}
|
2010-08-27 00:23:39 +02:00
|
|
|
define i64 @test2(i64 %a) {
|
2009-06-18 01:17:05 +02:00
|
|
|
%b = trunc i64 %a to i32
|
|
|
|
%c = shl i32 %b, 4
|
|
|
|
%q = ashr i32 %c, 4
|
|
|
|
%d = sext i32 %q to i64
|
|
|
|
call void @use(i32 %b)
|
|
|
|
ret i64 %d
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test2(
|
2010-08-27 00:23:39 +02:00
|
|
|
; CHECK: shl i64 %a, 36
|
2011-02-10 06:36:31 +01:00
|
|
|
; CHECK: %d = ashr exact i64 {{.*}}, 36
|
2010-08-27 00:23:39 +02:00
|
|
|
; CHECK: ret i64 %d
|
2009-06-18 01:17:05 +02:00
|
|
|
}
|
2010-08-27 00:23:39 +02:00
|
|
|
define i64 @test3(i64 %a) {
|
2009-06-18 18:30:21 +02:00
|
|
|
%b = trunc i64 %a to i32
|
|
|
|
%c = and i32 %b, 8
|
|
|
|
%d = zext i32 %c to i64
|
|
|
|
call void @use(i32 %b)
|
|
|
|
ret i64 %d
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test3(
|
2012-06-22 18:36:43 +02:00
|
|
|
; CHECK-NOT: ext
|
|
|
|
; CHECK: ret
|
2009-06-18 18:30:21 +02:00
|
|
|
}
|
2010-08-27 00:23:39 +02:00
|
|
|
define i64 @test4(i64 %a) {
|
2009-06-18 18:30:21 +02:00
|
|
|
%b = trunc i64 %a to i32
|
|
|
|
%c = and i32 %b, 8
|
|
|
|
%x = xor i32 %c, 8
|
|
|
|
%d = zext i32 %x to i64
|
|
|
|
call void @use(i32 %b)
|
|
|
|
ret i64 %d
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test4(
|
2010-08-27 00:23:39 +02:00
|
|
|
; CHECK: = and i64 %a, 8
|
2012-06-22 18:36:43 +02:00
|
|
|
; CHECK: = xor i64 {{.*}}, 8
|
|
|
|
; CHECK-NOT: ext
|
|
|
|
; CHECK: ret
|
2009-06-18 18:30:21 +02:00
|
|
|
}
|
Add an instcombine to clean up a common pattern produced
by the SRoA "promote to large integer" code, eliminating
some type conversions like this:
%94 = zext i16 %93 to i32 ; <i32> [#uses=2]
%96 = lshr i32 %94, 8 ; <i32> [#uses=1]
%101 = trunc i32 %96 to i8 ; <i8> [#uses=1]
This also unblocks other xforms from happening, now clang is able to compile:
struct S { float A, B, C, D; };
float foo(struct S A) { return A.A + A.B+A.C+A.D; }
into:
_foo: ## @foo
## BB#0: ## %entry
pshufd $1, %xmm0, %xmm2
addss %xmm0, %xmm2
movdqa %xmm1, %xmm3
addss %xmm2, %xmm3
pshufd $1, %xmm1, %xmm0
addss %xmm3, %xmm0
ret
on x86-64, instead of:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movapd %xmm1, %xmm3
addss %xmm2, %xmm3
movd %xmm1, %rax
shrq $32, %rax
movd %eax, %xmm0
addss %xmm3, %xmm0
ret
This seems pretty close to optimal to me, at least without
using horizontal adds. This also triggers in lots of other
code, including SPEC.
llvm-svn: 112278
2010-08-27 20:31:05 +02:00
|
|
|
|
|
|
|
define i32 @test5(i32 %A) {
|
|
|
|
%B = zext i32 %A to i128
|
|
|
|
%C = lshr i128 %B, 16
|
|
|
|
%D = trunc i128 %C to i32
|
|
|
|
ret i32 %D
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test5(
|
Add an instcombine to clean up a common pattern produced
by the SRoA "promote to large integer" code, eliminating
some type conversions like this:
%94 = zext i16 %93 to i32 ; <i32> [#uses=2]
%96 = lshr i32 %94, 8 ; <i32> [#uses=1]
%101 = trunc i32 %96 to i8 ; <i8> [#uses=1]
This also unblocks other xforms from happening, now clang is able to compile:
struct S { float A, B, C, D; };
float foo(struct S A) { return A.A + A.B+A.C+A.D; }
into:
_foo: ## @foo
## BB#0: ## %entry
pshufd $1, %xmm0, %xmm2
addss %xmm0, %xmm2
movdqa %xmm1, %xmm3
addss %xmm2, %xmm3
pshufd $1, %xmm1, %xmm0
addss %xmm3, %xmm0
ret
on x86-64, instead of:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movapd %xmm1, %xmm3
addss %xmm2, %xmm3
movd %xmm1, %rax
shrq $32, %rax
movd %eax, %xmm0
addss %xmm3, %xmm0
ret
This seems pretty close to optimal to me, at least without
using horizontal adds. This also triggers in lots of other
code, including SPEC.
llvm-svn: 112278
2010-08-27 20:31:05 +02:00
|
|
|
; CHECK: %C = lshr i32 %A, 16
|
|
|
|
; CHECK: ret i32 %C
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test6(i64 %A) {
|
|
|
|
%B = zext i64 %A to i128
|
|
|
|
%C = lshr i128 %B, 32
|
|
|
|
%D = trunc i128 %C to i32
|
|
|
|
ret i32 %D
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test6(
|
Add an instcombine to clean up a common pattern produced
by the SRoA "promote to large integer" code, eliminating
some type conversions like this:
%94 = zext i16 %93 to i32 ; <i32> [#uses=2]
%96 = lshr i32 %94, 8 ; <i32> [#uses=1]
%101 = trunc i32 %96 to i8 ; <i8> [#uses=1]
This also unblocks other xforms from happening, now clang is able to compile:
struct S { float A, B, C, D; };
float foo(struct S A) { return A.A + A.B+A.C+A.D; }
into:
_foo: ## @foo
## BB#0: ## %entry
pshufd $1, %xmm0, %xmm2
addss %xmm0, %xmm2
movdqa %xmm1, %xmm3
addss %xmm2, %xmm3
pshufd $1, %xmm1, %xmm0
addss %xmm3, %xmm0
ret
on x86-64, instead of:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movapd %xmm1, %xmm3
addss %xmm2, %xmm3
movd %xmm1, %rax
shrq $32, %rax
movd %eax, %xmm0
addss %xmm3, %xmm0
ret
This seems pretty close to optimal to me, at least without
using horizontal adds. This also triggers in lots of other
code, including SPEC.
llvm-svn: 112278
2010-08-27 20:31:05 +02:00
|
|
|
; CHECK: %C = lshr i64 %A, 32
|
|
|
|
; CHECK: %D = trunc i64 %C to i32
|
|
|
|
; CHECK: ret i32 %D
|
|
|
|
}
|
|
|
|
|
|
|
|
define i92 @test7(i64 %A) {
|
|
|
|
%B = zext i64 %A to i128
|
|
|
|
%C = lshr i128 %B, 32
|
|
|
|
%D = trunc i128 %C to i92
|
|
|
|
ret i92 %D
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test7(
|
2010-08-27 22:32:06 +02:00
|
|
|
; CHECK: %B = zext i64 %A to i92
|
|
|
|
; CHECK: %C = lshr i92 %B, 32
|
|
|
|
; CHECK: ret i92 %C
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test8(i32 %A, i32 %B) {
|
|
|
|
%tmp38 = zext i32 %A to i128
|
|
|
|
%tmp32 = zext i32 %B to i128
|
|
|
|
%tmp33 = shl i128 %tmp32, 32
|
|
|
|
%ins35 = or i128 %tmp33, %tmp38
|
|
|
|
%tmp42 = trunc i128 %ins35 to i64
|
|
|
|
ret i64 %tmp42
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test8(
|
2010-08-27 22:32:06 +02:00
|
|
|
; CHECK: %tmp38 = zext i32 %A to i64
|
|
|
|
; CHECK: %tmp32 = zext i32 %B to i64
|
2011-02-10 06:36:31 +01:00
|
|
|
; CHECK: %tmp33 = shl nuw i64 %tmp32, 32
|
2010-08-27 22:32:06 +02:00
|
|
|
; CHECK: %ins35 = or i64 %tmp33, %tmp38
|
|
|
|
; CHECK: ret i64 %ins35
|
|
|
|
}
|
|
|
|
|
implement an instcombine xform that canonicalizes casts outside of and-with-constant operations.
This fixes rdar://8808586 which observed that we used to compile:
union xy {
struct x { _Bool b[15]; } x;
__attribute__((packed))
struct y {
__attribute__((packed)) unsigned long b0to7;
__attribute__((packed)) unsigned int b8to11;
__attribute__((packed)) unsigned short b12to13;
__attribute__((packed)) unsigned char b14;
} y;
};
struct x
foo(union xy *xy)
{
return xy->x;
}
into:
_foo: ## @foo
movq (%rdi), %rax
movabsq $1095216660480, %rcx ## imm = 0xFF00000000
andq %rax, %rcx
movabsq $-72057594037927936, %rdx ## imm = 0xFF00000000000000
andq %rax, %rdx
movzbl %al, %esi
orq %rdx, %rsi
movq %rax, %rdx
andq $65280, %rdx ## imm = 0xFF00
orq %rsi, %rdx
movq %rax, %rsi
andq $16711680, %rsi ## imm = 0xFF0000
orq %rdx, %rsi
movl %eax, %edx
andl $-16777216, %edx ## imm = 0xFFFFFFFFFF000000
orq %rsi, %rdx
orq %rcx, %rdx
movabsq $280375465082880, %rcx ## imm = 0xFF0000000000
movq %rax, %rsi
andq %rcx, %rsi
orq %rdx, %rsi
movabsq $71776119061217280, %r8 ## imm = 0xFF000000000000
andq %r8, %rax
orq %rsi, %rax
movzwl 12(%rdi), %edx
movzbl 14(%rdi), %esi
shlq $16, %rsi
orl %edx, %esi
movq %rsi, %r9
shlq $32, %r9
movl 8(%rdi), %edx
orq %r9, %rdx
andq %rdx, %rcx
movzbl %sil, %esi
shlq $32, %rsi
orq %rcx, %rsi
movl %edx, %ecx
andl $-16777216, %ecx ## imm = 0xFFFFFFFFFF000000
orq %rsi, %rcx
movq %rdx, %rsi
andq $16711680, %rsi ## imm = 0xFF0000
orq %rcx, %rsi
movq %rdx, %rcx
andq $65280, %rcx ## imm = 0xFF00
orq %rsi, %rcx
movzbl %dl, %esi
orq %rcx, %rsi
andq %r8, %rdx
orq %rsi, %rdx
ret
We now compile this into:
_foo: ## @foo
## BB#0: ## %entry
movzwl 12(%rdi), %eax
movzbl 14(%rdi), %ecx
shlq $16, %rcx
orl %eax, %ecx
shlq $32, %rcx
movl 8(%rdi), %edx
orq %rcx, %rdx
movq (%rdi), %rax
ret
A small improvement :-)
llvm-svn: 123520
2011-01-15 07:32:33 +01:00
|
|
|
define i8 @test9(i32 %X) {
|
|
|
|
%Y = and i32 %X, 42
|
|
|
|
%Z = trunc i32 %Y to i8
|
|
|
|
ret i8 %Z
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test9(
|
implement an instcombine xform that canonicalizes casts outside of and-with-constant operations.
This fixes rdar://8808586 which observed that we used to compile:
union xy {
struct x { _Bool b[15]; } x;
__attribute__((packed))
struct y {
__attribute__((packed)) unsigned long b0to7;
__attribute__((packed)) unsigned int b8to11;
__attribute__((packed)) unsigned short b12to13;
__attribute__((packed)) unsigned char b14;
} y;
};
struct x
foo(union xy *xy)
{
return xy->x;
}
into:
_foo: ## @foo
movq (%rdi), %rax
movabsq $1095216660480, %rcx ## imm = 0xFF00000000
andq %rax, %rcx
movabsq $-72057594037927936, %rdx ## imm = 0xFF00000000000000
andq %rax, %rdx
movzbl %al, %esi
orq %rdx, %rsi
movq %rax, %rdx
andq $65280, %rdx ## imm = 0xFF00
orq %rsi, %rdx
movq %rax, %rsi
andq $16711680, %rsi ## imm = 0xFF0000
orq %rdx, %rsi
movl %eax, %edx
andl $-16777216, %edx ## imm = 0xFFFFFFFFFF000000
orq %rsi, %rdx
orq %rcx, %rdx
movabsq $280375465082880, %rcx ## imm = 0xFF0000000000
movq %rax, %rsi
andq %rcx, %rsi
orq %rdx, %rsi
movabsq $71776119061217280, %r8 ## imm = 0xFF000000000000
andq %r8, %rax
orq %rsi, %rax
movzwl 12(%rdi), %edx
movzbl 14(%rdi), %esi
shlq $16, %rsi
orl %edx, %esi
movq %rsi, %r9
shlq $32, %r9
movl 8(%rdi), %edx
orq %r9, %rdx
andq %rdx, %rcx
movzbl %sil, %esi
shlq $32, %rsi
orq %rcx, %rsi
movl %edx, %ecx
andl $-16777216, %ecx ## imm = 0xFFFFFFFFFF000000
orq %rsi, %rcx
movq %rdx, %rsi
andq $16711680, %rsi ## imm = 0xFF0000
orq %rcx, %rsi
movq %rdx, %rcx
andq $65280, %rcx ## imm = 0xFF00
orq %rsi, %rcx
movzbl %dl, %esi
orq %rcx, %rsi
andq %r8, %rdx
orq %rsi, %rdx
ret
We now compile this into:
_foo: ## @foo
## BB#0: ## %entry
movzwl 12(%rdi), %eax
movzbl 14(%rdi), %ecx
shlq $16, %rcx
orl %eax, %ecx
shlq $32, %rcx
movl 8(%rdi), %edx
orq %rcx, %rdx
movq (%rdi), %rax
ret
A small improvement :-)
llvm-svn: 123520
2011-01-15 07:32:33 +01:00
|
|
|
; CHECK: trunc
|
|
|
|
; CHECK: and
|
|
|
|
; CHECK: ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; rdar://8808586
|
|
|
|
define i8 @test10(i32 %X) {
|
|
|
|
%Y = trunc i32 %X to i8
|
|
|
|
%Z = and i8 %Y, 42
|
|
|
|
ret i8 %Z
|
2013-07-14 03:42:54 +02:00
|
|
|
; CHECK-LABEL: @test10(
|
implement an instcombine xform that canonicalizes casts outside of and-with-constant operations.
This fixes rdar://8808586 which observed that we used to compile:
union xy {
struct x { _Bool b[15]; } x;
__attribute__((packed))
struct y {
__attribute__((packed)) unsigned long b0to7;
__attribute__((packed)) unsigned int b8to11;
__attribute__((packed)) unsigned short b12to13;
__attribute__((packed)) unsigned char b14;
} y;
};
struct x
foo(union xy *xy)
{
return xy->x;
}
into:
_foo: ## @foo
movq (%rdi), %rax
movabsq $1095216660480, %rcx ## imm = 0xFF00000000
andq %rax, %rcx
movabsq $-72057594037927936, %rdx ## imm = 0xFF00000000000000
andq %rax, %rdx
movzbl %al, %esi
orq %rdx, %rsi
movq %rax, %rdx
andq $65280, %rdx ## imm = 0xFF00
orq %rsi, %rdx
movq %rax, %rsi
andq $16711680, %rsi ## imm = 0xFF0000
orq %rdx, %rsi
movl %eax, %edx
andl $-16777216, %edx ## imm = 0xFFFFFFFFFF000000
orq %rsi, %rdx
orq %rcx, %rdx
movabsq $280375465082880, %rcx ## imm = 0xFF0000000000
movq %rax, %rsi
andq %rcx, %rsi
orq %rdx, %rsi
movabsq $71776119061217280, %r8 ## imm = 0xFF000000000000
andq %r8, %rax
orq %rsi, %rax
movzwl 12(%rdi), %edx
movzbl 14(%rdi), %esi
shlq $16, %rsi
orl %edx, %esi
movq %rsi, %r9
shlq $32, %r9
movl 8(%rdi), %edx
orq %r9, %rdx
andq %rdx, %rcx
movzbl %sil, %esi
shlq $32, %rsi
orq %rcx, %rsi
movl %edx, %ecx
andl $-16777216, %ecx ## imm = 0xFFFFFFFFFF000000
orq %rsi, %rcx
movq %rdx, %rsi
andq $16711680, %rsi ## imm = 0xFF0000
orq %rcx, %rsi
movq %rdx, %rcx
andq $65280, %rcx ## imm = 0xFF00
orq %rsi, %rcx
movzbl %dl, %esi
orq %rcx, %rsi
andq %r8, %rdx
orq %rsi, %rdx
ret
We now compile this into:
_foo: ## @foo
## BB#0: ## %entry
movzwl 12(%rdi), %eax
movzbl 14(%rdi), %ecx
shlq $16, %rcx
orl %eax, %ecx
shlq $32, %rcx
movl 8(%rdi), %edx
orq %rcx, %rdx
movq (%rdi), %rax
ret
A small improvement :-)
llvm-svn: 123520
2011-01-15 07:32:33 +01:00
|
|
|
; CHECK: trunc
|
|
|
|
; CHECK: and
|
|
|
|
; CHECK: ret
|
|
|
|
}
|