mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-31 20:51:52 +01:00
[X86] Do not generate __multi3 for mul i128 on X86
Summary: __multi3 is not available on x86 (32-bit). Setting lib call name for MULI_128 to nullptr forces DAGTypeLegalizer::ExpandIntRes_MUL to generate instructions for 128-bit multiply instead of a call to an undefined function. This fixes PR20871 though it may be worth looking at why licm and indvars combine to generate 65-bit multiplies in that test. Patch by Riyaz V Puthiyapurayil Reviewers: craig.topper, schweitz Reviewed By: craig.topper, schweitz Subscribers: RKSimon, llvm-commits Differential Revision: https://reviews.llvm.org/D38668 llvm-svn: 316254
This commit is contained in:
parent
e08f7dbc57
commit
06e8ee7204
@ -1585,6 +1585,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
|
||||
setLibcallName(RTLIB::SHL_I128, nullptr);
|
||||
setLibcallName(RTLIB::SRL_I128, nullptr);
|
||||
setLibcallName(RTLIB::SRA_I128, nullptr);
|
||||
setLibcallName(RTLIB::MUL_I128, nullptr);
|
||||
}
|
||||
|
||||
// Combine sin / cos into one node or libcall if possible.
|
||||
|
@ -22,7 +22,7 @@ define void @test_sret_libcall(i128 %l, i128 %r) {
|
||||
; CHECK: pushl 72(%esp)
|
||||
; CHECK: pushl [[SRET_ADDR]]
|
||||
|
||||
; CHECK: calll __multi3
|
||||
; CHECK: calll __udivti3
|
||||
|
||||
; CHECK: addl $44, %esp
|
||||
; CHECK-DAG: movl 8(%esp), [[RES0:%[a-z]+]]
|
||||
@ -33,7 +33,7 @@ define void @test_sret_libcall(i128 %l, i128 %r) {
|
||||
; CHECK-DAG: movl [[RES1]], var+4
|
||||
; CHECK-DAG: movl [[RES2]], var+8
|
||||
; CHECK-DAG: movl [[RES3]], var+12
|
||||
%prod = mul i128 %l, %r
|
||||
store i128 %prod, i128* @var
|
||||
%quot = udiv i128 %l, %r
|
||||
store i128 %quot, i128* @var
|
||||
ret void
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -9,180 +9,346 @@ define void @test(i256* %a, i256* %b, i256* %out) #0 {
|
||||
; X32: # BB#0: # %entry
|
||||
; X32-NEXT: pushl %ebp
|
||||
; X32-NEXT: .cfi_def_cfa_offset 8
|
||||
; X32-NEXT: .cfi_offset %ebp, -8
|
||||
; X32-NEXT: movl %esp, %ebp
|
||||
; X32-NEXT: .cfi_def_cfa_register %ebp
|
||||
; X32-NEXT: pushl %ebx
|
||||
; X32-NEXT: .cfi_def_cfa_offset 12
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: .cfi_def_cfa_offset 16
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: andl $-8, %esp
|
||||
; X32-NEXT: subl $168, %esp
|
||||
; X32-NEXT: .cfi_def_cfa_offset 20
|
||||
; X32-NEXT: subl $88, %esp
|
||||
; X32-NEXT: .cfi_def_cfa_offset 108
|
||||
; X32-NEXT: .cfi_offset %esi, -20
|
||||
; X32-NEXT: .cfi_offset %edi, -16
|
||||
; X32-NEXT: .cfi_offset %ebx, -12
|
||||
; X32-NEXT: movl 8(%ebp), %eax
|
||||
; X32-NEXT: movl 16(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 20(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 24(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 28(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 8(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 12(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl (%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 4(%eax), %ebx
|
||||
; X32-NEXT: movl 12(%ebp), %eax
|
||||
; X32-NEXT: movl 16(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 20(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 24(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 28(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl (%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: .cfi_offset %ebp, -8
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: movl 12(%ecx), %ebp
|
||||
; X32-NEXT: movl 8(%ecx), %edi
|
||||
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl (%eax), %ebx
|
||||
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edi, %eax
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebp, %eax
|
||||
; X32-NEXT: movl %ebp, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: movl %eax, %ebx
|
||||
; X32-NEXT: addl %ecx, %ebx
|
||||
; X32-NEXT: adcl $0, %esi
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movl 4(%eax), %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 8(%eax), %esi
|
||||
; X32-NEXT: movl 12(%eax), %edi
|
||||
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: movl %edi, %eax
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %ecx, %edi
|
||||
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl %ebx
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl %eax
|
||||
; X32-NEXT: calll __multi3
|
||||
; X32-NEXT: addl $32, %esp
|
||||
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: pushl %eax
|
||||
; X32-NEXT: calll __multi3
|
||||
; X32-NEXT: addl $32, %esp
|
||||
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl %ebx
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl %eax
|
||||
; X32-NEXT: calll __multi3
|
||||
; X32-NEXT: addl $32, %esp
|
||||
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: pushl %ebx
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl %eax
|
||||
; X32-NEXT: calll __multi3
|
||||
; X32-NEXT: addl $32, %esp
|
||||
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: pushl %eax
|
||||
; X32-NEXT: calll __multi3
|
||||
; X32-NEXT: addl $32, %esp
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
|
||||
; X32-NEXT: adcl $0, %ecx
|
||||
; X32-NEXT: adcl $0, %eax
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
|
||||
; X32-NEXT: adcl $0, %edi
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
||||
; X32-NEXT: adcl $0, %ebx
|
||||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: addl %ebx, %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %esi, %ecx
|
||||
; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
|
||||
; X32-NEXT: movl %ebp, %eax
|
||||
; X32-NEXT: mull %edi
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %eax, %edi
|
||||
; X32-NEXT: addl %ecx, %edi
|
||||
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
|
||||
; X32-NEXT: adcl %eax, %ebx
|
||||
; X32-NEXT: setb %al
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebx
|
||||
; X32-NEXT: movzbl %al, %eax
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: xorl %edx, %edx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: mull %edx
|
||||
; X32-NEXT: movl %edx, %ebp
|
||||
; X32-NEXT: movl %ebp, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, %esi
|
||||
; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: xorl %edx, %edx
|
||||
; X32-NEXT: mull %edx
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: adcl $0, %eax
|
||||
; X32-NEXT: addl %esi, %eax
|
||||
; X32-NEXT: adcl %ebp, %edx
|
||||
; X32-NEXT: addl %edi, %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
|
||||
; X32-NEXT: pushl %eax
|
||||
; X32-NEXT: calll __multi3
|
||||
; X32-NEXT: addl $32, %esp
|
||||
; X32-NEXT: adcl %ebx, %edx
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %esi
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: addl %edi, %esi
|
||||
; X32-NEXT: adcl %ebx, %ecx
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
|
||||
; X32-NEXT: movl (%esi), %ebp
|
||||
; X32-NEXT: movl %ebp, %eax
|
||||
; X32-NEXT: movl %ebp, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %ecx, %edi
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: movl 4(%esi), %esi
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: mull %edi
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %eax, %ebx
|
||||
; X32-NEXT: addl %ecx, %ebx
|
||||
; X32-NEXT: adcl $0, %edi
|
||||
; X32-NEXT: movl %ebp, %eax
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp # 4-byte Reload
|
||||
; X32-NEXT: mull %ebp
|
||||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: addl %ebx, %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %edi, %ecx
|
||||
; X32-NEXT: setb %bl
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: mull %ebp
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %eax, %ebp
|
||||
; X32-NEXT: addl %ecx, %ebp
|
||||
; X32-NEXT: movzbl %bl, %eax
|
||||
; X32-NEXT: adcl %eax, %edi
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: xorl %edx, %edx
|
||||
; X32-NEXT: mull %edx
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl (%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: addl %eax, %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: adcl %edx, %eax
|
||||
; X32-NEXT: addl %ebp, %ecx
|
||||
; X32-NEXT: adcl %edi, %eax
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
|
||||
; X32-NEXT: adcl $0, {{[0-9]+}}(%esp) # 4-byte Folded Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: movl 8(%ecx), %ebx
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: movl %esi, %edi
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %edx, %ebp
|
||||
; X32-NEXT: movl %eax, %esi
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
|
||||
; X32-NEXT: adcl $0, %ebp
|
||||
; X32-NEXT: movl 12(%ecx), %ecx
|
||||
; X32-NEXT: movl %edi, %eax
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: addl %esi, %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %ebp, %edi
|
||||
; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: movl %eax, %ebp
|
||||
; X32-NEXT: addl %edi, %ebp
|
||||
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax # 1-byte Folded Reload
|
||||
; X32-NEXT: adcl %eax, %esi
|
||||
; X32-NEXT: movl %ebx, %edi
|
||||
; X32-NEXT: movl %edi, %eax
|
||||
; X32-NEXT: xorl %ecx, %ecx
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
|
||||
; X32-NEXT: addl %eax, %ebx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: adcl %edx, %eax
|
||||
; X32-NEXT: addl %ebp, %ebx
|
||||
; X32-NEXT: adcl %esi, %eax
|
||||
; X32-NEXT: movl (%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: addl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: adcl %ecx, {{[0-9]+}}(%esp) # 4-byte Folded Spill
|
||||
; X32-NEXT: adcl $0, %ebx
|
||||
; X32-NEXT: adcl $0, %eax
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %ebx # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: setb (%esp) # 1-byte Folded Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: mull %edi
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: mull %edi
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %eax, %ebp
|
||||
; X32-NEXT: addl %ebx, %ebp
|
||||
; X32-NEXT: adcl $0, %edi
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: addl %ebp, %eax
|
||||
; X32-NEXT: movl %eax, %ebp
|
||||
; X32-NEXT: adcl %edi, %esi
|
||||
; X32-NEXT: setb {{[0-9]+}}(%esp) # 1-byte Folded Spill
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: addl %esi, %eax
|
||||
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %esi # 1-byte Folded Reload
|
||||
; X32-NEXT: adcl %esi, %edx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
|
||||
; X32-NEXT: addl %eax, %esi
|
||||
; X32-NEXT: adcl %edx, %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebp # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %ebp, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload
|
||||
; X32-NEXT: adcl %eax, %esi
|
||||
; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl $0, %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: movl 16(%ecx), %esi
|
||||
; X32-NEXT: imull %esi, %ebx
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
|
||||
; X32-NEXT: mull %edi
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: addl %ebx, %edx
|
||||
; X32-NEXT: movl 20(%ecx), %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: imull %eax, %edi
|
||||
; X32-NEXT: addl %edx, %edi
|
||||
; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl 24(%ecx), %eax
|
||||
; X32-NEXT: movl %ecx, %ebp
|
||||
; X32-NEXT: movl %eax, %edi
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: imull %ecx, %edi
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
||||
; X32-NEXT: addl %edi, %edx
|
||||
; X32-NEXT: movl 28(%ebp), %ebp
|
||||
; X32-NEXT: imull %ebx, %ebp
|
||||
; X32-NEXT: addl %edx, %ebp
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx # 4-byte Reload
|
||||
; X32-NEXT: addl %edx, (%esp) # 4-byte Folded Spill
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %ebp # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: mull %esi
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: mull %esi
|
||||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: movl %eax, %edi
|
||||
; X32-NEXT: addl %ebx, %edi
|
||||
; X32-NEXT: adcl $0, %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: addl %edi, %eax
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %ecx, %esi
|
||||
; X32-NEXT: setb %cl
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: mull %ebx
|
||||
; X32-NEXT: addl %esi, %eax
|
||||
; X32-NEXT: movzbl %cl, %ecx
|
||||
; X32-NEXT: adcl %ecx, %edx
|
||||
; X32-NEXT: addl (%esp), %eax # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %ebp, %edx
|
||||
; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx
|
||||
; X32-NEXT: movl 16(%ebp), %edi
|
||||
; X32-NEXT: movl %ebx, 4(%edi)
|
||||
; X32-NEXT: movl 16(%ebp), %ebx
|
||||
; X32-NEXT: movl 28(%ebx), %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
|
||||
; X32-NEXT: imull %esi, %ecx
|
||||
; X32-NEXT: movl 24(%ebx), %edi
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: mull %edi
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: addl %ecx, %edx
|
||||
; X32-NEXT: imull {{[0-9]+}}(%esp), %edi # 4-byte Folded Reload
|
||||
; X32-NEXT: addl %edx, %edi
|
||||
; X32-NEXT: movl 16(%ebx), %ebp
|
||||
; X32-NEXT: movl 20(%ebx), %ebx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: movl %eax, %ecx
|
||||
; X32-NEXT: imull %ebx, %ecx
|
||||
; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: mull %ebp
|
||||
; X32-NEXT: addl %ecx, %edx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: imull %ebp, %ecx
|
||||
; X32-NEXT: addl %edx, %ecx
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: adcl %edi, %ecx
|
||||
; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebp, %eax
|
||||
; X32-NEXT: mull %esi
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X32-NEXT: movl %ebx, %eax
|
||||
; X32-NEXT: mull %esi
|
||||
; X32-NEXT: movl %edx, %ecx
|
||||
; X32-NEXT: movl %eax, %ebx
|
||||
; X32-NEXT: addl %edi, %ebx
|
||||
; X32-NEXT: adcl $0, %ecx
|
||||
; X32-NEXT: movl %ebp, %eax
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp # 4-byte Reload
|
||||
; X32-NEXT: mull %ebp
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %eax, %esi
|
||||
; X32-NEXT: addl %ebx, %esi
|
||||
; X32-NEXT: adcl %ecx, %edi
|
||||
; X32-NEXT: setb %cl
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
|
||||
; X32-NEXT: mull %ebp
|
||||
; X32-NEXT: addl %edi, %eax
|
||||
; X32-NEXT: movzbl %cl, %ecx
|
||||
; X32-NEXT: adcl %ecx, %edx
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
|
||||
; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx # 4-byte Folded Reload
|
||||
; X32-NEXT: movl %ecx, %ebx
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %esi # 4-byte Folded Reload
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
|
||||
; X32-NEXT: adcl {{[0-9]+}}(%esp), %edx # 4-byte Folded Reload
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
|
||||
; X32-NEXT: movl %edi, (%ebx)
|
||||
; X32-NEXT: movl %edi, (%ecx)
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
|
||||
; X32-NEXT: movl %edi, 8(%ebx)
|
||||
; X32-NEXT: movl %edi, 4(%ecx)
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
|
||||
; X32-NEXT: movl %edi, 12(%ebx)
|
||||
; X32-NEXT: movl %esi, 16(%ebx)
|
||||
; X32-NEXT: movl %ecx, 20(%ebx)
|
||||
; X32-NEXT: movl %edx, 24(%ebx)
|
||||
; X32-NEXT: movl %eax, 28(%ebx)
|
||||
; X32-NEXT: leal -12(%ebp), %esp
|
||||
; X32-NEXT: movl %edi, 8(%ecx)
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
|
||||
; X32-NEXT: movl %edi, 12(%ecx)
|
||||
; X32-NEXT: movl %ebx, 16(%ecx)
|
||||
; X32-NEXT: movl %esi, 20(%ecx)
|
||||
; X32-NEXT: movl %eax, 24(%ecx)
|
||||
; X32-NEXT: movl %edx, 28(%ecx)
|
||||
; X32-NEXT: addl $88, %esp
|
||||
; X32-NEXT: popl %esi
|
||||
; X32-NEXT: popl %edi
|
||||
; X32-NEXT: popl %ebx
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,6 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
|
||||
; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X86
|
||||
|
||||
define i128 @foo(i128 %t, i128 %u) {
|
||||
; X64-LABEL: foo:
|
||||
@ -12,6 +13,84 @@ define i128 @foo(i128 %t, i128 %u) {
|
||||
; X64-NEXT: imulq %r8, %rsi
|
||||
; X64-NEXT: addq %rsi, %rdx
|
||||
; X64-NEXT: retq
|
||||
;
|
||||
; X86-LABEL: foo:
|
||||
; X86: # BB#0:
|
||||
; X86-NEXT: pushl %ebp
|
||||
; X86-NEXT: .cfi_def_cfa_offset 8
|
||||
; X86-NEXT: pushl %ebx
|
||||
; X86-NEXT: .cfi_def_cfa_offset 12
|
||||
; X86-NEXT: pushl %edi
|
||||
; X86-NEXT: .cfi_def_cfa_offset 16
|
||||
; X86-NEXT: pushl %esi
|
||||
; X86-NEXT: .cfi_def_cfa_offset 20
|
||||
; X86-NEXT: subl $8, %esp
|
||||
; X86-NEXT: .cfi_def_cfa_offset 28
|
||||
; X86-NEXT: .cfi_offset %esi, -20
|
||||
; X86-NEXT: .cfi_offset %edi, -16
|
||||
; X86-NEXT: .cfi_offset %ebx, -12
|
||||
; X86-NEXT: .cfi_offset %ebp, -8
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
|
||||
; X86-NEXT: imull %edx, %esi
|
||||
; X86-NEXT: movl %edi, %eax
|
||||
; X86-NEXT: mull %edx
|
||||
; X86-NEXT: movl %eax, %ebx
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X86-NEXT: imull %edi, %ecx
|
||||
; X86-NEXT: addl %edx, %ecx
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X86-NEXT: addl %esi, %ecx
|
||||
; X86-NEXT: movl %eax, %esi
|
||||
; X86-NEXT: imull {{[0-9]+}}(%esp), %esi
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
||||
; X86-NEXT: mull %ebp
|
||||
; X86-NEXT: addl %esi, %edx
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
|
||||
; X86-NEXT: imull %ebp, %edi
|
||||
; X86-NEXT: addl %edx, %edi
|
||||
; X86-NEXT: addl %ebx, %eax
|
||||
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
|
||||
; X86-NEXT: adcl %ecx, %edi
|
||||
; X86-NEXT: movl %ebp, %eax
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X86-NEXT: mull %ecx
|
||||
; X86-NEXT: movl %edx, %ebx
|
||||
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X86-NEXT: mull %ecx
|
||||
; X86-NEXT: movl %edx, %esi
|
||||
; X86-NEXT: movl %eax, %ecx
|
||||
; X86-NEXT: addl %ebx, %ecx
|
||||
; X86-NEXT: adcl $0, %esi
|
||||
; X86-NEXT: movl %ebp, %eax
|
||||
; X86-NEXT: mull {{[0-9]+}}(%esp)
|
||||
; X86-NEXT: movl %edx, %ebx
|
||||
; X86-NEXT: movl %eax, %ebp
|
||||
; X86-NEXT: addl %ecx, %ebp
|
||||
; X86-NEXT: adcl %esi, %ebx
|
||||
; X86-NEXT: setb %cl
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X86-NEXT: mull {{[0-9]+}}(%esp)
|
||||
; X86-NEXT: addl %ebx, %eax
|
||||
; X86-NEXT: movzbl %cl, %ecx
|
||||
; X86-NEXT: adcl %ecx, %edx
|
||||
; X86-NEXT: addl {{[0-9]+}}(%esp), %eax # 4-byte Folded Reload
|
||||
; X86-NEXT: adcl %edi, %edx
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X86-NEXT: movl (%esp), %esi # 4-byte Reload
|
||||
; X86-NEXT: movl %esi, (%ecx)
|
||||
; X86-NEXT: movl %ebp, 4(%ecx)
|
||||
; X86-NEXT: movl %eax, 8(%ecx)
|
||||
; X86-NEXT: movl %edx, 12(%ecx)
|
||||
; X86-NEXT: movl %ecx, %eax
|
||||
; X86-NEXT: addl $8, %esp
|
||||
; X86-NEXT: popl %esi
|
||||
; X86-NEXT: popl %edi
|
||||
; X86-NEXT: popl %ebx
|
||||
; X86-NEXT: popl %ebp
|
||||
; X86-NEXT: retl $4
|
||||
%k = mul i128 %t, %u
|
||||
ret i128 %k
|
||||
}
|
||||
|
@ -6,39 +6,60 @@ define i128 @mulhioverflow(i64 %a, i64 %b, i64 %c) nounwind {
|
||||
; X32-LABEL: mulhioverflow:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: pushl %ebp
|
||||
; X32-NEXT: movl %esp, %ebp
|
||||
; X32-NEXT: pushl %ebx
|
||||
; X32-NEXT: pushl %edi
|
||||
; X32-NEXT: pushl %esi
|
||||
; X32-NEXT: andl $-8, %esp
|
||||
; X32-NEXT: subl $16, %esp
|
||||
; X32-NEXT: movl 8(%ebp), %esi
|
||||
; X32-NEXT: movl 28(%ebp), %edi
|
||||
; X32-NEXT: movl %esp, %eax
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl 24(%ebp)
|
||||
; X32-NEXT: pushl 20(%ebp)
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl $0
|
||||
; X32-NEXT: pushl 16(%ebp)
|
||||
; X32-NEXT: pushl 12(%ebp)
|
||||
; X32-NEXT: pushl %eax
|
||||
; X32-NEXT: calll __multi3
|
||||
; X32-NEXT: addl $32, %esp
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: mull %ebp
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: mull %ebp
|
||||
; X32-NEXT: movl %edx, %ebp
|
||||
; X32-NEXT: movl %eax, %esi
|
||||
; X32-NEXT: addl %ebx, %esi
|
||||
; X32-NEXT: adcl $0, %ebp
|
||||
; X32-NEXT: movl %ecx, %eax
|
||||
; X32-NEXT: mull %edi
|
||||
; X32-NEXT: movl %edx, %ebx
|
||||
; X32-NEXT: addl %esi, %eax
|
||||
; X32-NEXT: adcl %ebp, %ebx
|
||||
; X32-NEXT: setb %al
|
||||
; X32-NEXT: movzbl %al, %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: andl $1, %edi
|
||||
; X32-NEXT: addl {{[0-9]+}}(%esp), %edi
|
||||
; X32-NEXT: adcl $0, %eax
|
||||
; X32-NEXT: mull %edi
|
||||
; X32-NEXT: movl %edx, %esi
|
||||
; X32-NEXT: movl %eax, %ebp
|
||||
; X32-NEXT: addl %ebx, %ebp
|
||||
; X32-NEXT: adcl %ecx, %esi
|
||||
; X32-NEXT: xorl %ecx, %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: movl %edx, %edi
|
||||
; X32-NEXT: movl %eax, %ebx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: mull %ecx
|
||||
; X32-NEXT: addl %ebx, %eax
|
||||
; X32-NEXT: adcl %edi, %edx
|
||||
; X32-NEXT: addl %ebp, %eax
|
||||
; X32-NEXT: adcl %esi, %edx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-NEXT: andl $1, %ecx
|
||||
; X32-NEXT: addl %eax, %ecx
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movl %ecx, (%eax)
|
||||
; X32-NEXT: adcl $0, %edx
|
||||
; X32-NEXT: movl %edx, 4(%eax)
|
||||
; X32-NEXT: setb %cl
|
||||
; X32-NEXT: movzbl %cl, %ecx
|
||||
; X32-NEXT: movl %edi, (%esi)
|
||||
; X32-NEXT: movl %eax, 4(%esi)
|
||||
; X32-NEXT: movl %ecx, 8(%esi)
|
||||
; X32-NEXT: movl $0, 12(%esi)
|
||||
; X32-NEXT: movl %esi, %eax
|
||||
; X32-NEXT: leal -8(%ebp), %esp
|
||||
; X32-NEXT: movl %ecx, 8(%eax)
|
||||
; X32-NEXT: movl $0, 12(%eax)
|
||||
; X32-NEXT: popl %esi
|
||||
; X32-NEXT: popl %edi
|
||||
; X32-NEXT: popl %ebx
|
||||
; X32-NEXT: popl %ebp
|
||||
; X32-NEXT: retl $4
|
||||
;
|
||||
|
Loading…
x
Reference in New Issue
Block a user