mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-24 03:33:20 +01:00
[x86] Change u8imm operands to always print as unsigned. This makes shuffle masks and the like make way more sense.
llvm-svn: 226902
This commit is contained in:
parent
8d442c114d
commit
043ec61ef1
@ -282,3 +282,10 @@ void X86ATTInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
|
||||
|
||||
O << markup(">");
|
||||
}
|
||||
|
||||
void X86ATTInstPrinter::printU8Imm(const MCInst *MI, unsigned Op,
|
||||
raw_ostream &O) {
|
||||
O << markup("<imm:")
|
||||
<< '$' << formatImm(MI->getOperand(Op).getImm() & 0xff)
|
||||
<< markup(">");
|
||||
}
|
||||
|
@ -52,6 +52,7 @@ public:
|
||||
void printDstIdx(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
|
||||
void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &OS);
|
||||
void printRoundingControl(const MCInst *MI, unsigned Op, raw_ostream &OS);
|
||||
void printU8Imm(const MCInst *MI, unsigned Op, raw_ostream &OS);
|
||||
|
||||
void printanymem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printMemReference(MI, OpNo, O);
|
||||
|
@ -245,3 +245,8 @@ void X86IntelInstPrinter::printMemOffset(const MCInst *MI, unsigned Op,
|
||||
|
||||
O << ']';
|
||||
}
|
||||
|
||||
void X86IntelInstPrinter::printU8Imm(const MCInst *MI, unsigned Op,
|
||||
raw_ostream &O) {
|
||||
O << formatImm(MI->getOperand(Op).getImm() & 0xff);
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ public:
|
||||
void printSrcIdx(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printDstIdx(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printRoundingControl(const MCInst *MI, unsigned Op, raw_ostream &OS);
|
||||
void printU8Imm(const MCInst *MI, unsigned Op, raw_ostream &O);
|
||||
|
||||
void printanymem(const MCInst *MI, unsigned OpNo, raw_ostream &O) {
|
||||
printMemReference(MI, OpNo, O);
|
||||
|
@ -643,6 +643,7 @@ def i64i8imm : Operand<i64> {
|
||||
|
||||
// Unsigned 8-bit immediate used by SSE/AVX instructions.
|
||||
def u8imm : Operand<i8> {
|
||||
let PrintMethod = "printU8Imm";
|
||||
let ParserMatchClass = ImmUnsignedi8AsmOperand;
|
||||
let OperandType = "OPERAND_IMMEDIATE";
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
define <4 x float> @f4523(<4 x float> %a,<4 x float> %b) nounwind {
|
||||
entry:
|
||||
; CHECK: shufps $-28, %xmm
|
||||
; CHECK: shufps $228, %xmm
|
||||
%shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4,i32
|
||||
5,i32 2,i32 3>
|
||||
ret <4 x float> %shuffle
|
||||
|
@ -60,7 +60,7 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
|
||||
; X32: movl 8(%esp), %ecx
|
||||
; CHECK-NOT: mov
|
||||
;; Try to match a bit more of the instr, since we need the load's offset.
|
||||
; CHECK: vinsertps $-64, 12(%{{...}},%{{...}}), %
|
||||
; CHECK: vinsertps $192, 12(%{{...}},%{{...}}), %
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr inbounds <4 x float>* %pb, i64 %index
|
||||
%2 = load <4 x float>* %1, align 16
|
||||
|
@ -849,13 +849,13 @@ define <4 x float> @insertps_from_vector_load_offset_2(<4 x float> %a, <4 x floa
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: shll $4, %ecx
|
||||
; X32-NEXT: insertps $-64, 12(%eax,%ecx), %xmm0
|
||||
; X32-NEXT: insertps $192, 12(%eax,%ecx), %xmm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: insertps_from_vector_load_offset_2:
|
||||
; X64: ## BB#0:
|
||||
; X64-NEXT: shlq $4, %rsi
|
||||
; X64-NEXT: insertps $-64, 12(%rdi,%rsi), %xmm0
|
||||
; X64-NEXT: insertps $192, 12(%rdi,%rsi), %xmm0
|
||||
; X64-NEXT: retq
|
||||
%1 = getelementptr inbounds <4 x float>* %pb, i64 %index
|
||||
%2 = load <4 x float>* %1, align 16
|
||||
@ -986,12 +986,12 @@ define <4 x float> @pr20087(<4 x float> %a, <4 x float> *%ptr) {
|
||||
; X32-LABEL: pr20087:
|
||||
; X32: ## BB#0:
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: insertps $-78, 8(%eax), %xmm0
|
||||
; X32-NEXT: insertps $178, 8(%eax), %xmm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: pr20087:
|
||||
; X64: ## BB#0:
|
||||
; X64-NEXT: insertps $-78, 8(%rdi), %xmm0
|
||||
; X64-NEXT: insertps $178, 8(%rdi), %xmm0
|
||||
; X64-NEXT: retq
|
||||
%load = load <4 x float> *%ptr
|
||||
%ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2>
|
||||
@ -1004,14 +1004,14 @@ define void @insertps_pr20411(i32* noalias nocapture %RET) #1 {
|
||||
; X32: ## BB#0:
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3]
|
||||
; X32-NEXT: insertps $-36, LCPI49_1+12, %xmm0
|
||||
; X32-NEXT: insertps $220, LCPI49_1+12, %xmm0
|
||||
; X32-NEXT: movups %xmm0, (%eax)
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: insertps_pr20411:
|
||||
; X64: ## BB#0:
|
||||
; X64-NEXT: movaps {{.*#+}} xmm0 = [3,3,3,3]
|
||||
; X64-NEXT: insertps $-36, LCPI49_1+{{.*}}(%rip), %xmm0
|
||||
; X64-NEXT: insertps $220, LCPI49_1+{{.*}}(%rip), %xmm0
|
||||
; X64-NEXT: movups %xmm0, (%rdi)
|
||||
; X64-NEXT: retq
|
||||
%gather_load = shufflevector <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
||||
|
@ -1459,7 +1459,7 @@ define <4 x double> @stack_fold_shufpd_ymm(<4 x double> %a0, <4 x double> %a1) {
|
||||
|
||||
define <4 x float> @stack_fold_shufps(<4 x float> %a0, <4 x float> %a1) {
|
||||
;CHECK-LABEL: stack_fold_shufps
|
||||
;CHECK: vshufps $-56, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
|
||||
;CHECK: vshufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
|
||||
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
|
||||
%2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
|
||||
ret <4 x float> %2
|
||||
@ -1467,7 +1467,7 @@ define <4 x float> @stack_fold_shufps(<4 x float> %a0, <4 x float> %a1) {
|
||||
|
||||
define <8 x float> @stack_fold_shufps_ymm(<8 x float> %a0, <8 x float> %a1) {
|
||||
;CHECK-LABEL: stack_fold_shufps_ymm
|
||||
;CHECK: vshufps $-108, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
|
||||
;CHECK: vshufps $148, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
|
||||
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
|
||||
%2 = shufflevector <8 x float> %a0, <8 x float> %a1, <8 x i32> <i32 0, i32 1, i32 9, i32 10, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
ret <8 x float> %2
|
||||
|
@ -902,7 +902,7 @@ define <2 x double> @stack_fold_shufpd(<2 x double> %a0, <2 x double> %a1) {
|
||||
|
||||
define <4 x float> @stack_fold_shufps(<4 x float> %a0, <4 x float> %a1) {
|
||||
;CHECK-LABEL: stack_fold_shufps
|
||||
;CHECK: shufps $-56, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
|
||||
;CHECK: shufps $200, {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
|
||||
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
|
||||
%2 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 0, i32 2, i32 4, i32 7>
|
||||
ret <4 x float> %2
|
||||
|
@ -45,7 +45,7 @@ define <4 x i32> @test4(<4 x i32> %A, <4 x i32> %B) nounwind {
|
||||
define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK-LABEL: test5:
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-79
|
||||
; CHECK: pshufd $177
|
||||
; CHECK: pand
|
||||
; CHECK: ret
|
||||
%C = icmp eq <2 x i64> %A, %B
|
||||
@ -56,7 +56,7 @@ define <2 x i64> @test5(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK-LABEL: test6:
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-79
|
||||
; CHECK: pshufd $177
|
||||
; CHECK: pand
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pxor
|
||||
@ -77,11 +77,11 @@ define <2 x i64> @test7(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK: pxor [[CONSTREG]]
|
||||
; CHECK: pxor [[CONSTREG]]
|
||||
; CHECK: pcmpgtd %xmm1
|
||||
; CHECK: pshufd $-96
|
||||
; CHECK: pshufd $160
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: pand
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: por
|
||||
; CHECK: ret
|
||||
%C = icmp sgt <2 x i64> %A, %B
|
||||
@ -94,11 +94,11 @@ define <2 x i64> @test8(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK: pxor
|
||||
; CHECK: pxor
|
||||
; CHECK: pcmpgtd %xmm0
|
||||
; CHECK: pshufd $-96
|
||||
; CHECK: pshufd $160
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: pand
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: por
|
||||
; CHECK: ret
|
||||
%C = icmp slt <2 x i64> %A, %B
|
||||
@ -111,11 +111,11 @@ define <2 x i64> @test9(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK: pxor
|
||||
; CHECK: pxor
|
||||
; CHECK: pcmpgtd %xmm0
|
||||
; CHECK: pshufd $-96
|
||||
; CHECK: pshufd $160
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: pand
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: por
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pxor
|
||||
@ -130,11 +130,11 @@ define <2 x i64> @test10(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK: pxor
|
||||
; CHECK: pxor
|
||||
; CHECK: pcmpgtd %xmm1
|
||||
; CHECK: pshufd $-96
|
||||
; CHECK: pshufd $160
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: pand
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: por
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pxor
|
||||
@ -155,11 +155,11 @@ define <2 x i64> @test11(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK: pxor [[CONSTREG]]
|
||||
; CHECK: pxor [[CONSTREG]]
|
||||
; CHECK: pcmpgtd %xmm1
|
||||
; CHECK: pshufd $-96
|
||||
; CHECK: pshufd $160
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: pand
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: por
|
||||
; CHECK: ret
|
||||
%C = icmp ugt <2 x i64> %A, %B
|
||||
@ -172,11 +172,11 @@ define <2 x i64> @test12(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK: pxor
|
||||
; CHECK: pxor
|
||||
; CHECK: pcmpgtd %xmm0
|
||||
; CHECK: pshufd $-96
|
||||
; CHECK: pshufd $160
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: pand
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: por
|
||||
; CHECK: ret
|
||||
%C = icmp ult <2 x i64> %A, %B
|
||||
@ -189,11 +189,11 @@ define <2 x i64> @test13(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK: pxor
|
||||
; CHECK: pxor
|
||||
; CHECK: pcmpgtd %xmm0
|
||||
; CHECK: pshufd $-96
|
||||
; CHECK: pshufd $160
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: pand
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: por
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pxor
|
||||
@ -208,11 +208,11 @@ define <2 x i64> @test14(<2 x i64> %A, <2 x i64> %B) nounwind {
|
||||
; CHECK: pxor
|
||||
; CHECK: pxor
|
||||
; CHECK: pcmpgtd %xmm1
|
||||
; CHECK: pshufd $-96
|
||||
; CHECK: pshufd $160
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: pand
|
||||
; CHECK: pshufd $-11
|
||||
; CHECK: pshufd $245
|
||||
; CHECK: por
|
||||
; CHECK: pcmpeqd
|
||||
; CHECK: pxor
|
||||
|
@ -313,20 +313,20 @@ define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
|
||||
; SSE2-LABEL: load_zext_4i32_to_4i64:
|
||||
; SSE2: # BB#0: # %entry
|
||||
; SSE2-NEXT: movdqa (%rdi), %xmm1
|
||||
; SSE2-NEXT: pshufd $-44, %xmm1, %xmm0 # xmm0 = xmm1[0,1,1,3]
|
||||
; SSE2-NEXT: pshufd $212, %xmm1, %xmm0 # xmm0 = xmm1[0,1,1,3]
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
|
||||
; SSE2-NEXT: pand %xmm2, %xmm0
|
||||
; SSE2-NEXT: pshufd $-6, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
|
||||
; SSE2-NEXT: pshufd $250, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
|
||||
; SSE2-NEXT: pand %xmm2, %xmm1
|
||||
; SSE2-NEXT: retq
|
||||
|
||||
; SSSE3-LABEL: load_zext_4i32_to_4i64:
|
||||
; SSSE3: # BB#0: # %entry
|
||||
; SSSE3-NEXT: movdqa (%rdi), %xmm1
|
||||
; SSSE3-NEXT: pshufd $-44, %xmm1, %xmm0 # xmm0 = xmm1[0,1,1,3]
|
||||
; SSSE3-NEXT: pshufd $212, %xmm1, %xmm0 # xmm0 = xmm1[0,1,1,3]
|
||||
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
|
||||
; SSSE3-NEXT: pand %xmm2, %xmm0
|
||||
; SSSE3-NEXT: pshufd $-6, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
|
||||
; SSSE3-NEXT: pshufd $250, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
|
||||
; SSSE3-NEXT: pand %xmm2, %xmm1
|
||||
; SSSE3-NEXT: retq
|
||||
|
||||
@ -336,7 +336,7 @@ define <4 x i64> @load_zext_4i32_to_4i64(<4 x i32> *%ptr) {
|
||||
; SSE41-NEXT: pmovzxdq %xmm1, %xmm0
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
|
||||
; SSE41-NEXT: pand %xmm2, %xmm0
|
||||
; SSE41-NEXT: pshufd $-6, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
|
||||
; SSE41-NEXT: pshufd $250, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
|
||||
; SSE41-NEXT: pand %xmm2, %xmm1
|
||||
; SSE41-NEXT: retq
|
||||
|
||||
|
@ -3352,7 +3352,7 @@
|
||||
// CHECK: vinsertps $129, %xmm3, %xmm2, %xmm1
|
||||
vinsertps $0x81, %xmm3, %xmm2, %xmm1
|
||||
|
||||
// CHECK: vcmpps $-128, %xmm2, %xmm1, %xmm0
|
||||
// CHECK: vcmpps $128, %xmm2, %xmm1, %xmm0
|
||||
// CHECK: encoding: [0xc5,0xf0,0xc2,0xc2,0x80]
|
||||
vcmpps $-128, %xmm2, %xmm1, %xmm0
|
||||
|
||||
|
@ -19622,31 +19622,31 @@
|
||||
// immediate. Check both forms here.
|
||||
// CHECK: blendps $129, %xmm2, %xmm1
|
||||
blendps $0x81, %xmm2, %xmm1
|
||||
// CHECK: blendps $-64, %xmm2, %xmm1
|
||||
// CHECK: blendps $192, %xmm2, %xmm1
|
||||
blendps $-64, %xmm2, %xmm1
|
||||
// CHECK: blendpd $129, %xmm2, %xmm1
|
||||
blendpd $0x81, %xmm2, %xmm1
|
||||
// CHECK: blendpd $-64, %xmm2, %xmm1
|
||||
// CHECK: blendpd $192, %xmm2, %xmm1
|
||||
blendpd $-64, %xmm2, %xmm1
|
||||
// CHECK: pblendw $129, %xmm2, %xmm1
|
||||
pblendw $0x81, %xmm2, %xmm1
|
||||
// CHECK: pblendw $-64, %xmm2, %xmm1
|
||||
// CHECK: pblendw $192, %xmm2, %xmm1
|
||||
pblendw $-64, %xmm2, %xmm1
|
||||
// CHECK: mpsadbw $129, %xmm2, %xmm1
|
||||
mpsadbw $0x81, %xmm2, %xmm1
|
||||
// CHECK: mpsadbw $-64, %xmm2, %xmm1
|
||||
// CHECK: mpsadbw $192, %xmm2, %xmm1
|
||||
mpsadbw $-64, %xmm2, %xmm1
|
||||
// CHECK: dpps $129, %xmm2, %xmm1
|
||||
dpps $0x81, %xmm2, %xmm1
|
||||
// CHECK: dpps $-64, %xmm2, %xmm1
|
||||
// CHECK: dpps $192, %xmm2, %xmm1
|
||||
dpps $-64, %xmm2, %xmm1
|
||||
// CHECK: dppd $129, %xmm2, %xmm1
|
||||
dppd $0x81, %xmm2, %xmm1
|
||||
// CHECK: dppd $-64, %xmm2, %xmm1
|
||||
// CHECK: dppd $192, %xmm2, %xmm1
|
||||
dppd $-64, %xmm2, %xmm1
|
||||
// CHECK: insertps $129, %xmm2, %xmm1
|
||||
insertps $0x81, %xmm2, %xmm1
|
||||
// CHECK: insertps $-64, %xmm2, %xmm1
|
||||
// CHECK: insertps $192, %xmm2, %xmm1
|
||||
insertps $-64, %xmm2, %xmm1
|
||||
|
||||
// PR13253 handle implicit optional third argument that must always be xmm0
|
||||
|
Loading…
Reference in New Issue
Block a user