1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-23 19:23:23 +01:00

[x86] avoid printing unnecessary sign bits of hex immediates in asm comments (PR20347)

It would be better to check the valid/expected size of the immediate operand, but this is
generally better than what we print right now.

Differential Revision: http://reviews.llvm.org/D20385

llvm-svn: 271114
This commit is contained in:
Sanjay Patel 2016-05-28 14:58:37 +00:00
parent 396b16d3af
commit fc1048d379
12 changed files with 79 additions and 70 deletions

View File

@ -165,16 +165,25 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
if (Op.isReg()) {
printRegName(O, Op.getReg());
} else if (Op.isImm()) {
// Print X86 immediates as signed values.
// Print immediates as signed values.
int64_t Imm = Op.getImm();
O << markup("<imm:") << '$' << formatImm(Imm) << markup(">");
// TODO: This should be in a helper function in the base class, so it can
// be used by other printers.
// If there are no instruction-specific comments, add a comment clarifying
// the hex value of the immediate operand when it isn't in the range
// [-256,255].
if (CommentStream && !HasCustomInstComment && (Imm > 255 || Imm < -256))
*CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Imm);
if (CommentStream && !HasCustomInstComment && (Imm > 255 || Imm < -256)) {
// Don't print unnecessary hex sign bits.
if (Imm == (int16_t)(Imm))
*CommentStream << format("imm = 0x%" PRIX16 "\n", (uint16_t)Imm);
else if (Imm == (int32_t)(Imm))
*CommentStream << format("imm = 0x%" PRIX32 "\n", (uint32_t)Imm);
else
*CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Imm);
}
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
O << markup("<imm:") << '$';

View File

@ -42,7 +42,7 @@ entry:
define void @mp_11193(<8 x float> * nocapture %aFOO, <8 x float>* nocapture %RET) nounwind {
; CHECK-LABEL: mp_11193:
; CHECK: # BB#0: # %allocas
; CHECK-NEXT: movl $-1082130432, (%rsi) # imm = 0xFFFFFFFFBF800000
; CHECK-NEXT: movl $-1082130432, (%rsi) # imm = 0xBF800000
; CHECK-NEXT: retq
allocas:
%bincmp = fcmp olt <8 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 9.000000e+00, float 1.000000e+00, float 9.000000e+00, float 1.000000e+00> , <float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00, float 3.000000e+00>

View File

@ -122,7 +122,7 @@ define i32 @test8(i32 %a1, i32 %a2, i32 %a3) {
; ALL-NEXT: testl %edx, %edx
; ALL-NEXT: movl $1, %eax
; ALL-NEXT: cmovel %eax, %edx
; ALL-NEXT: cmpl $-2147483648, %esi ## imm = 0xFFFFFFFF80000000
; ALL-NEXT: cmpl $-2147483648, %esi ## imm = 0x80000000
; ALL-NEXT: cmovnel %edx, %eax
; ALL-NEXT: cmpl $-1, %edi
; ALL-NEXT: cmovnel %edx, %eax

View File

@ -26,7 +26,7 @@ define i8 @g(i8 %a) {
define <2 x i16> @fold_v2i16() {
; CHECK-LABEL: fold_v2i16:
; CHECK: # BB#0:
; CHECK-NEXT: movw $-4096, %ax # imm = 0xFFFFFFFFFFFFF000
; CHECK-NEXT: movw $-4096, %ax # imm = 0xF000
; CHECK-NEXT: movw $240, %dx
; CHECK-NEXT: retl
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> <i16 15, i16 3840>)

View File

@ -6,7 +6,7 @@ define i64 @foo(i32 %sum) {
; CHECK: # BB#0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: shrl $2, %eax
; CHECK-NEXT: orl $-67108864, %eax # imm = 0xFFFFFFFFFC000000
; CHECK-NEXT: orl $-67108864, %eax # imm = 0xFC000000
; CHECK-NEXT: movl $1073741823, %edx # imm = 0x3FFFFFFF
; CHECK-NEXT: retl
entry:

View File

@ -5,7 +5,7 @@ define i32 @test1(i32 %X) {
; CHECK-LABEL: test1:
; CHECK: # BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl $-2139062143, %edx # imm = 0xFFFFFFFF80808081
; CHECK-NEXT: movl $-2139062143, %edx # imm = 0x80808081
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: imull %edx
; CHECK-NEXT: addl %ecx, %edx
@ -44,7 +44,7 @@ define i32 @test3(i32 %X) {
; CHECK-LABEL: test3:
; CHECK: # BB#0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl $-2139062143, %edx # imm = 0xFFFFFFFF80808081
; CHECK-NEXT: movl $-2139062143, %edx # imm = 0x80808081
; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: mull %edx
; CHECK-NEXT: shrl $7, %edx

View File

@ -16,7 +16,7 @@ define i32 @sad_16i8() nounwind {
; SSE2-NEXT: andq $-64, %rsp
; SSE2-NEXT: subq $128, %rsp
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: pxor %xmm2, %xmm2
@ -58,7 +58,7 @@ define i32 @sad_16i8() nounwind {
; AVX2-NEXT: andq $-64, %rsp
; AVX2-NEXT: subq $128, %rsp
; AVX2-NEXT: vpxor %ymm0, %ymm0, %ymm0
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: .p2align 4, 0x90
; AVX2-NEXT: .LBB0_1: # %vector.body
@ -89,7 +89,7 @@ define i32 @sad_16i8() nounwind {
; AVX512F-LABEL: sad_16i8:
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vpxord %zmm0, %zmm0, %zmm0
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512F-NEXT: .p2align 4, 0x90
; AVX512F-NEXT: .LBB0_1: # %vector.body
; AVX512F-NEXT: # =>This Inner Loop Header: Depth=1
@ -116,7 +116,7 @@ define i32 @sad_16i8() nounwind {
; AVX512BW-LABEL: sad_16i8:
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vpxord %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512BW-NEXT: .p2align 4, 0x90
; AVX512BW-NEXT: .LBB0_1: # %vector.body
; AVX512BW-NEXT: # =>This Inner Loop Header: Depth=1
@ -180,7 +180,7 @@ define i32 @sad_32i8() nounwind {
; SSE2-LABEL: sad_32i8:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: pxor %xmm12, %xmm12
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm0, %xmm0
@ -316,7 +316,7 @@ define i32 @sad_32i8() nounwind {
; AVX2-NEXT: andq $-128, %rsp
; AVX2-NEXT: subq $256, %rsp # imm = 0x100
; AVX2-NEXT: vpxor %ymm0, %ymm0, %ymm0
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
@ -358,7 +358,7 @@ define i32 @sad_32i8() nounwind {
; AVX512F-NEXT: andq $-128, %rsp
; AVX512F-NEXT: subq $256, %rsp # imm = 0x100
; AVX512F-NEXT: vpxord %zmm0, %zmm0, %zmm0
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512F-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512F-NEXT: .p2align 4, 0x90
; AVX512F-NEXT: .LBB1_1: # %vector.body
@ -397,7 +397,7 @@ define i32 @sad_32i8() nounwind {
; AVX512BW-NEXT: andq $-128, %rsp
; AVX512BW-NEXT: subq $256, %rsp # imm = 0x100
; AVX512BW-NEXT: vpxord %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512BW-NEXT: .p2align 4, 0x90
; AVX512BW-NEXT: .LBB1_1: # %vector.body
@ -472,7 +472,7 @@ define i32 @sad_avx64i8() nounwind {
; SSE2: # BB#0: # %entry
; SSE2-NEXT: subq $232, %rsp
; SSE2-NEXT: pxor %xmm8, %xmm8
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: pxor %xmm5, %xmm5
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm1, %xmm1
@ -764,7 +764,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX2-LABEL: sad_avx64i8:
; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpxor %ymm0, %ymm0, %ymm0
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
@ -840,7 +840,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX512F-LABEL: sad_avx64i8:
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vpxord %zmm0, %zmm0, %zmm0
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512F-NEXT: vpxord %zmm1, %zmm1, %zmm1
; AVX512F-NEXT: vpxord %zmm2, %zmm2, %zmm2
; AVX512F-NEXT: vpxord %zmm3, %zmm3, %zmm3
@ -893,7 +893,7 @@ define i32 @sad_avx64i8() nounwind {
; AVX512BW-NEXT: andq $-256, %rsp
; AVX512BW-NEXT: subq $512, %rsp # imm = 0x200
; AVX512BW-NEXT: vpxord %zmm0, %zmm0, %zmm0
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512BW-NEXT: vpxord %zmm2, %zmm2, %zmm2
; AVX512BW-NEXT: vpxord %zmm3, %zmm3, %zmm3
; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1
@ -976,7 +976,7 @@ define i32 @sad_2i8() nounwind {
; SSE2-LABEL: sad_2i8:
; SSE2: # BB#0: # %entry
; SSE2-NEXT: pxor %xmm0, %xmm0
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
; SSE2-NEXT: movl $65535, %ecx # imm = 0xFFFF
; SSE2-NEXT: movd %ecx, %xmm1
; SSE2-NEXT: .p2align 4, 0x90
@ -999,7 +999,7 @@ define i32 @sad_2i8() nounwind {
; AVX2-LABEL: sad_2i8:
; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: .p2align 4, 0x90
; AVX2-NEXT: .LBB3_1: # %vector.body
@ -1021,7 +1021,7 @@ define i32 @sad_2i8() nounwind {
; AVX512F-LABEL: sad_2i8:
; AVX512F: # BB#0: # %entry
; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512F-NEXT: .p2align 4, 0x90
; AVX512F-NEXT: .LBB3_1: # %vector.body
@ -1043,7 +1043,7 @@ define i32 @sad_2i8() nounwind {
; AVX512BW-LABEL: sad_2i8:
; AVX512BW: # BB#0: # %entry
; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFFFFFFFFFFFFFC00
; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: .p2align 4, 0x90
; AVX512BW-NEXT: .LBB3_1: # %vector.body

View File

@ -1411,7 +1411,7 @@ define void @test_MM_SET_EXCEPTION_MASK(i32 %a0) nounwind {
; X32-NEXT: leal (%esp), %ecx
; X32-NEXT: stmxcsr (%ecx)
; X32-NEXT: movl (%esp), %edx
; X32-NEXT: andl $-8065, %edx # imm = 0xFFFFFFFFFFFFE07F
; X32-NEXT: andl $-8065, %edx # imm = 0xE07F
; X32-NEXT: orl %eax, %edx
; X32-NEXT: movl %edx, (%esp)
; X32-NEXT: ldmxcsr (%ecx)
@ -1423,7 +1423,7 @@ define void @test_MM_SET_EXCEPTION_MASK(i32 %a0) nounwind {
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
; X64-NEXT: andl $-8065, %ecx # imm = 0xFFFFFFFFFFFFE07F
; X64-NEXT: andl $-8065, %ecx # imm = 0xE07F
; X64-NEXT: orl %edi, %ecx
; X64-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
; X64-NEXT: ldmxcsr (%rax)
@ -1484,7 +1484,7 @@ define void @test_MM_SET_FLUSH_ZERO_MODE(i32 %a0) nounwind {
; X32-NEXT: leal (%esp), %ecx
; X32-NEXT: stmxcsr (%ecx)
; X32-NEXT: movl (%esp), %edx
; X32-NEXT: andl $-32769, %edx # imm = 0xFFFFFFFFFFFF7FFF
; X32-NEXT: andl $-32769, %edx # imm = 0xFFFF7FFF
; X32-NEXT: orl %eax, %edx
; X32-NEXT: movl %edx, (%esp)
; X32-NEXT: ldmxcsr (%ecx)
@ -1496,7 +1496,7 @@ define void @test_MM_SET_FLUSH_ZERO_MODE(i32 %a0) nounwind {
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
; X64-NEXT: andl $-32769, %ecx # imm = 0xFFFFFFFFFFFF7FFF
; X64-NEXT: andl $-32769, %ecx # imm = 0xFFFF7FFF
; X64-NEXT: orl %edi, %ecx
; X64-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
; X64-NEXT: ldmxcsr (%rax)
@ -1564,7 +1564,7 @@ define void @test_MM_SET_ROUNDING_MODE(i32 %a0) nounwind {
; X32-NEXT: leal (%esp), %ecx
; X32-NEXT: stmxcsr (%ecx)
; X32-NEXT: movl (%esp), %edx
; X32-NEXT: andl $-24577, %edx # imm = 0xFFFFFFFFFFFF9FFF
; X32-NEXT: andl $-24577, %edx # imm = 0x9FFF
; X32-NEXT: orl %eax, %edx
; X32-NEXT: movl %edx, (%esp)
; X32-NEXT: ldmxcsr (%ecx)
@ -1576,7 +1576,7 @@ define void @test_MM_SET_ROUNDING_MODE(i32 %a0) nounwind {
; X64-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
; X64-NEXT: stmxcsr (%rax)
; X64-NEXT: movl -{{[0-9]+}}(%rsp), %ecx
; X64-NEXT: andl $-24577, %ecx # imm = 0xFFFFFFFFFFFF9FFF
; X64-NEXT: andl $-24577, %ecx # imm = 0x9FFF
; X64-NEXT: orl %edi, %ecx
; X64-NEXT: movl %ecx, -{{[0-9]+}}(%rsp)
; X64-NEXT: ldmxcsr (%rax)

View File

@ -58,7 +58,7 @@ define <2 x float> @fneg_bitcast(i64 %i) nounwind {
; X32-SSE1-NEXT: movl %esp, %ebp
; X32-SSE1-NEXT: andl $-16, %esp
; X32-SSE1-NEXT: subl $32, %esp
; X32-SSE1-NEXT: movl $-2147483648, %eax # imm = 0xFFFFFFFF80000000
; X32-SSE1-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X32-SSE1-NEXT: movl 12(%ebp), %ecx
; X32-SSE1-NEXT: xorl %eax, %ecx
; X32-SSE1-NEXT: movl %ecx, {{[0-9]+}}(%esp)
@ -71,7 +71,7 @@ define <2 x float> @fneg_bitcast(i64 %i) nounwind {
;
; X32-SSE2-LABEL: fneg_bitcast:
; X32-SSE2: # BB#0:
; X32-SSE2-NEXT: movl $-2147483648, %eax # imm = 0xFFFFFFFF80000000
; X32-SSE2-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-SSE2-NEXT: xorl %eax, %ecx
; X32-SSE2-NEXT: xorl {{[0-9]+}}(%esp), %eax

View File

@ -615,7 +615,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
; SSE-NEXT: orq %rcx, %rdx
; SSE-NEXT: movq %rdi, %rcx
; SSE-NEXT: shrq %rcx
; SSE-NEXT: andl $-2147483648, %ecx # imm = 0xFFFFFFFF80000000
; SSE-NEXT: andl $-2147483648, %ecx # imm = 0x80000000
; SSE-NEXT: orq %rdx, %rcx
; SSE-NEXT: movq %rdi, %rdx
; SSE-NEXT: shrq $3, %rdx
@ -871,7 +871,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
; AVX-NEXT: orq %rcx, %rdx
; AVX-NEXT: movq %rdi, %rcx
; AVX-NEXT: shrq %rcx
; AVX-NEXT: andl $-2147483648, %ecx # imm = 0xFFFFFFFF80000000
; AVX-NEXT: andl $-2147483648, %ecx # imm = 0x80000000
; AVX-NEXT: orq %rdx, %rcx
; AVX-NEXT: movq %rdi, %rdx
; AVX-NEXT: shrq $3, %rdx

View File

@ -87,7 +87,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrd $1, %xmm1, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -96,7 +96,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: vmovd %xmm1, %ecx
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rdx
; AVX-NEXT: addl %edx, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -107,7 +107,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
; AVX-NEXT: vpextrd $2, %xmm1, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -117,7 +117,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX-NEXT: vpextrd $3, %xmm1, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -128,7 +128,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vextracti32x4 $2, %zmm0, %xmm2
; AVX-NEXT: vpextrd $1, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -137,7 +137,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: vmovd %xmm2, %ecx
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rdx
; AVX-NEXT: addl %edx, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -148,7 +148,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $2, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -158,7 +158,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $3, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -170,7 +170,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vextracti32x4 $1, %zmm0, %xmm2
; AVX-NEXT: vpextrd $1, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -179,7 +179,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: vmovd %xmm2, %ecx
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rdx
; AVX-NEXT: addl %edx, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -190,7 +190,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $2, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -200,7 +200,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $3, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -210,7 +210,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $3, %eax, %xmm3, %xmm2
; AVX-NEXT: vpextrd $1, %xmm0, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -219,7 +219,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rdx
; AVX-NEXT: addl %edx, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -230,7 +230,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $2, %xmm0, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -240,7 +240,7 @@ define <16 x i32> @test_div7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %ecx, %eax
; AVX-NEXT: movl %eax, %ecx
@ -1162,7 +1162,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vextracti32x4 $3, %zmm0, %xmm1
; AVX-NEXT: vpextrd $1, %xmm1, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1174,7 +1174,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: subl %edx, %eax
; AVX-NEXT: vmovd %xmm1, %ecx
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rdx
; AVX-NEXT: addl %ecx, %edx
; AVX-NEXT: movl %edx, %esi
@ -1188,7 +1188,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
; AVX-NEXT: vpextrd $2, %xmm1, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1201,7 +1201,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX-NEXT: vpextrd $3, %xmm1, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1215,7 +1215,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vextracti32x4 $2, %zmm0, %xmm2
; AVX-NEXT: vpextrd $1, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1227,7 +1227,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: subl %edx, %eax
; AVX-NEXT: vmovd %xmm2, %ecx
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rdx
; AVX-NEXT: addl %ecx, %edx
; AVX-NEXT: movl %edx, %esi
@ -1241,7 +1241,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $2, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1254,7 +1254,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $3, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1269,7 +1269,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vextracti32x4 $1, %zmm0, %xmm2
; AVX-NEXT: vpextrd $1, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1281,7 +1281,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: subl %edx, %eax
; AVX-NEXT: vmovd %xmm2, %ecx
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rdx
; AVX-NEXT: addl %ecx, %edx
; AVX-NEXT: movl %edx, %esi
@ -1295,7 +1295,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $2, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1308,7 +1308,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $3, %xmm2, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1321,7 +1321,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $3, %eax, %xmm3, %xmm2
; AVX-NEXT: vpextrd $1, %xmm0, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1333,7 +1333,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: subl %edx, %eax
; AVX-NEXT: vmovd %xmm0, %ecx
; AVX-NEXT: movslq %ecx, %rcx
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rdx
; AVX-NEXT: addl %ecx, %edx
; AVX-NEXT: movl %edx, %esi
@ -1347,7 +1347,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $2, %xmm0, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx
@ -1360,7 +1360,7 @@ define <16 x i32> @test_rem7_16i32(<16 x i32> %a) nounwind {
; AVX-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: cltq
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
; AVX-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0x92492493
; AVX-NEXT: shrq $32, %rcx
; AVX-NEXT: addl %eax, %ecx
; AVX-NEXT: movl %ecx, %edx

View File

@ -18,10 +18,10 @@ movabsq $-9223372036854775808, %rax
# CHECK: movb $-128, %al
# CHECK: movw $32767, %ax # imm = 0x7FFF
# CHECK: movw $-32768, %ax # imm = 0xFFFFFFFFFFFF8000
# CHECK: movw $-32768, %ax # imm = 0x8000
# CHECK: movl $2147483647, %eax # imm = 0x7FFFFFFF
# CHECK: movl $-2147483648, %eax # imm = 0xFFFFFFFF80000000
# CHECK: movl $-2147483648, %eax # imm = 0x80000000
# CHECK: movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
# CHECK: movabsq $-9223372036854775808, %rax # imm = 0x8000000000000000