1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-25 14:02:52 +02:00
Commit Graph

263 Commits

Author SHA1 Message Date
Chris Lattner
9bb0083d16 Remove support for llvm.isnan. Alkis wins :)
llvm-svn: 14189
2004-06-15 21:48:07 +00:00
Chris Lattner
d11493d8c4 Add basic support for the isunordered intrinsic. The isnan stuff still needs to go
llvm-svn: 14185
2004-06-15 21:36:44 +00:00
Chris Lattner
3a8e675c03 By far, one of the most common uses of isnan is to make 'isunordered'
comparisons.  In an 'isunordered' predicate, which looks like this at
the LLVM level:

        %a = call bool %llvm.isnan(double %X)
        %b = call bool %llvm.isnan(double %Y)
        %COM = or bool %a, %b

We used to generate this code:

        fxch %ST(1)
        fucomip %ST(0), %ST(0)
        setp %AL
        fucomip %ST(0), %ST(0)
        setp %AH
        or %AL, %AH

With this patch, we generate this code:

        fucomip %ST(0), %ST(1)
        fstp %ST(0)
        setp %AL

Which should make alkis happy.  Tested as X86/compare_folding.llx:test1

llvm-svn: 14148
2004-06-11 05:33:49 +00:00
Chris Lattner
a8e603b719 Now that compare instructions aren't lumped in with the other twoargfp instructions,
we can get rid of the FpUCOM/FpUCOMi pseudo instructions, which makes stuff simpler
and faster.

llvm-svn: 14144
2004-06-11 04:49:02 +00:00
Chris Lattner
edb06042b9 Add direct support for the isnan intrinsic, implementing test/Regression/CodeGen/X86/isnan.llx
testcase

llvm-svn: 14141
2004-06-11 04:31:10 +00:00
John Criswell
287e3fc88b Fix for PR#366. We use getClassB() so that we can handle cast instructions
that cast to bool.

llvm-svn: 14096
2004-06-09 15:18:51 +00:00
Chris Lattner
5ad9eaab1a Convert to the new TargetMachine interface.
llvm-svn: 13952
2004-06-02 05:55:25 +00:00
Chris Lattner
85f19c7b3f Add some notes to myself, no functional changes
llvm-svn: 13695
2004-05-23 21:23:12 +00:00
Brian Gaeke
a17301ca8b Generate branch machine instructions with MachineBasicBlock operands instead of
LLVM BasicBlock operands.

llvm-svn: 13566
2004-05-14 06:54:56 +00:00
Chris Lattner
269da7901a Two more improvements for null pointer handling: storing a null pointer
and passing a null pointer into a function.

For this testcase:

void %test(int** %X) {
  store int* null, int** %X
  call void %test(int** null)
  ret void
}

we now generate this:

test:
        sub %ESP, 12
        mov %EAX, DWORD PTR [%ESP + 16]
        mov DWORD PTR [%EAX], 0
        mov DWORD PTR [%ESP], 0
        call test
        add %ESP, 12
        ret

instead of this:

test:
        sub %ESP, 12
        mov %EAX, DWORD PTR [%ESP + 16]
        mov %ECX, 0
        mov DWORD PTR [%EAX], %ECX
        mov %EAX, 0
        mov DWORD PTR [%ESP], %EAX
        call test
        add %ESP, 12
        ret

llvm-svn: 13558
2004-05-13 15:26:48 +00:00
Chris Lattner
dc8e8484e5 Second half of my fixed-sized-alloca patch. This folds the LEA to compute
the alloca address into common operations like loads/stores.

In a simple testcase like this (which is just designed to excersize the
alloca A, nothing more):

int %test(int %X, bool %C) {
        %A = alloca int
        store int %X, int* %A
        store int* %A, int** %G
        br bool %C, label %T, label %F
T:
        call int %test(int 1, bool false)
        %V = load int* %A
        ret int %V
F:
        call int %test(int 123, bool true)
        %V2 = load int* %A
        ret int %V2
}

We now generate:

test:
        sub %ESP, 12
        mov %EAX, DWORD PTR [%ESP + 16]
        mov %CL, BYTE PTR [%ESP + 20]
***     mov DWORD PTR [%ESP + 8], %EAX
        mov %EAX, OFFSET G
        lea %EDX, DWORD PTR [%ESP + 8]
        mov DWORD PTR [%EAX], %EDX
        test %CL, %CL
        je .LBB2 # PC rel: F
.LBB1:  # T
        mov DWORD PTR [%ESP], 1
        mov DWORD PTR [%ESP + 4], 0
        call test
***     mov %EAX, DWORD PTR [%ESP + 8]
        add %ESP, 12
        ret
.LBB2:  # F
        mov DWORD PTR [%ESP], 123
        mov DWORD PTR [%ESP + 4], 1
        call test
***     mov %EAX, DWORD PTR [%ESP + 8]
        add %ESP, 12
        ret

Instead of:

test:
        sub %ESP, 20
        mov %EAX, DWORD PTR [%ESP + 24]
        mov %CL, BYTE PTR [%ESP + 28]
***     lea %EDX, DWORD PTR [%ESP + 16]
***     mov DWORD PTR [%EDX], %EAX
        mov %EAX, OFFSET G
        mov DWORD PTR [%EAX], %EDX
        test %CL, %CL
***     mov DWORD PTR [%ESP + 12], %EDX
        je .LBB2 # PC rel: F
.LBB1:  # T
        mov DWORD PTR [%ESP], 1
        mov %EAX, 0
        mov DWORD PTR [%ESP + 4], %EAX
        call test
***     mov %EAX, DWORD PTR [%ESP + 12]
***     mov %EAX, DWORD PTR [%EAX]
        add %ESP, 20
        ret
.LBB2:  # F
        mov DWORD PTR [%ESP], 123
        mov %EAX, 1
        mov DWORD PTR [%ESP + 4], %EAX
        call test
***     mov %EAX, DWORD PTR [%ESP + 12]
***     mov %EAX, DWORD PTR [%EAX]
        add %ESP, 20
        ret

llvm-svn: 13557
2004-05-13 15:12:43 +00:00
Chris Lattner
94de563118 Substantially improve code generation for address exposed locals (aka fixed
sized allocas in the entry block).  Instead of generating code like this:

entry:
  reg1024 = ESP+1234
... (much later)
  *reg1024 = 17


Generate code that looks like this:
entry:
  (no code generated)
... (much later)
  t = ESP+1234
  *t = 17

The advantage being that we DRAMATICALLY reduce the register pressure for these
silly temporaries (they were all being spilled to the stack, resulting in very
silly code).  This is actually a manual implementation of rematerialization :)

I have a patch to fold the alloca address computation into loads & stores, which
will make this much better still, but just getting this right took way too much time
and I'm sleepy.

llvm-svn: 13554
2004-05-13 07:40:27 +00:00
Chris Lattner
a19bb14155 Pass boolean constants into function calls more efficiently, generating:
mov DWORD PTR [%ESP + 4], 1

instead of:

        mov %EAX, 1
        mov DWORD PTR [%ESP + 4], %EAX

llvm-svn: 13494
2004-05-12 16:35:04 +00:00
Chris Lattner
a407338e12 Fix a fairly serious pessimizaion that was preventing us from efficiently
compiling things like 'add long %X, 1'.  The problem is that we were switching
the order of the operands for longs even though we can't fold them yet.

llvm-svn: 13451
2004-05-10 15:15:55 +00:00
Chris Lattner
0962db8f10 Fix some comments, avoid sign extending booleans when zero extend works fine
llvm-svn: 13440
2004-05-09 23:16:33 +00:00
Chris Lattner
d18c637a37 Generate more efficient code for casting booleans to integers (no sign extension required)
llvm-svn: 13439
2004-05-09 22:28:45 +00:00
Chris Lattner
67c21e74ec Codegen floating point stores of constants into integer instructions. This
allows us to compile:

store float 10.0, float* %P

into:
        mov DWORD PTR [%EAX], 1092616192

instead of:

.CPItest_0:                                     # float 0x4024000000000000
.long   1092616192      # float 10
...
        fld DWORD PTR [.CPItest_0]
        fstp DWORD PTR [%EAX]

llvm-svn: 13409
2004-05-07 21:18:15 +00:00
Chris Lattner
2021030378 Make comparisons against the null pointer as efficient as integer comparisons
against zero.  In particular, don't emit:

        mov %ESI, 0
        cmp %ECX, %ESI

instead, emit:

       test %ECX, %ECX

llvm-svn: 13407
2004-05-07 19:55:55 +00:00
Chris Lattner
42e602b94f Remove unneeded check
llvm-svn: 13355
2004-05-04 19:35:11 +00:00
Chris Lattner
dac54ebbee Improve signed division by power of 2 *dramatically* from this:
div:
        mov %EDX, DWORD PTR [%ESP + 4]
        mov %ECX, 64
        mov %EAX, %EDX
        sar %EDX, 31
        idiv %ECX
        ret

to this:

div:
        mov %EAX, DWORD PTR [%ESP + 4]
        mov %ECX, %EAX
        sar %ECX, 5
        shr %ECX, 26
        mov %EDX, %EAX
        add %EDX, %ECX
        sar %EAX, 6
        ret

Note that the intel compiler is currently making this:

div:
        movl      4(%esp), %edx                                 #3.5
        movl      %edx, %eax                                    #4.14
        sarl      $5, %eax                                      #4.14
        shrl      $26, %eax                                     #4.14
        addl      %edx, %eax                                    #4.14
        sarl      $6, %eax                                      #4.14
        ret                                                     #4.14

Which has one less register->register copy.  (hint hint alkis :)

llvm-svn: 13354
2004-05-04 19:33:58 +00:00
Chris Lattner
cb9a614ea4 Improve code generated for integer multiplications by 2,3,5,9
llvm-svn: 13342
2004-05-04 15:47:14 +00:00
Chris Lattner
4b5d4eb5b1 Remove unused #include
llvm-svn: 13304
2004-05-01 21:29:16 +00:00
Brian Gaeke
bfb4fe5109 Make RequiresFPRegKill() take a MachineBasicBlock arg.
In InsertFPRegKills(), just check the MachineBasicBlock for successors
instead of its corresponding BasicBlock.

llvm-svn: 13213
2004-04-28 04:45:55 +00:00
Brian Gaeke
74ed24c9de In InsertFPRegKills(), use the machine-CFG itself rather than the
LLVM CFG when trying to find the successors of BB.

llvm-svn: 13212
2004-04-28 04:34:16 +00:00
Brian Gaeke
6c03805717 Update the machine-CFG edges whenever we see a branch.
llvm-svn: 13211
2004-04-28 04:19:37 +00:00
John Criswell
8a4525ae64 Remove code to adjust the iterator for llvm.readio and llvm.writeio.
The iterator is pointing at the next instruction which should not disappear
when doing the load/store replacement.

llvm-svn: 12954
2004-04-14 21:27:56 +00:00
John Criswell
94de925685 Added support for the llvm.readio and llvm.writeio intrinsics.
On x86, memory operations occur in-order, so these are just lowered into
volatile loads and stores.

llvm-svn: 12936
2004-04-13 22:13:14 +00:00
Chris Lattner
2ba048528f Implement a small optimization, which papers over the problem in
X86/2004-04-13-FPCMOV-Crash.llx

A more robust fix is to follow.

llvm-svn: 12935
2004-04-13 21:56:09 +00:00
Chris Lattner
8b6bc380e3 Emit the immediate form of in/out when possible.
Fix several bugs in the intrinsics:
  1. Make sure to copy the input registers before the instructions that use them
  2. Make sure to copy the value returned by 'in' out of EAX into the register
     it is supposed to be in.

This fixes assertions when using in/out and linear scan.

llvm-svn: 12896
2004-04-13 17:20:37 +00:00
Chris Lattner
43f754339a Fix issues that the local allocator has dealing with instructions that implicitly use ST(0)
llvm-svn: 12855
2004-04-12 03:02:48 +00:00
Chris Lattner
682a6361c7 Use the fucomi[p] instructions to perform floating point comparisons instead
of the fucom[p][p] instructions.  This allows us to code generate this function

bool %test(double %X, double %Y) {
        %C = setlt double %Y, %X
        ret bool %C
}

... into:

test:
        fld QWORD PTR [%ESP + 4]
        fld QWORD PTR [%ESP + 12]
        fucomip %ST(1)
        fstp %ST(0)
        setb %AL
        movsx %EAX, %AL
        ret

where before we generated:

test:
        fld QWORD PTR [%ESP + 4]
        fld QWORD PTR [%ESP + 12]
        fucompp
**      fnstsw
**      sahf
        setb %AL
        movsx %EAX, %AL
        ret

The two marked instructions (which are the ones eliminated) are very bad,
because they serialize execution of the processor.  These instructions are
available on the PPRO and later, but since we already use cmov's we aren't
losing any portability.

I retained the old code for the day when we decide we want to support back
to the 386.

llvm-svn: 12852
2004-04-12 01:43:36 +00:00
Chris Lattner
de47ad3d6f Fix a bug in my load/cast folding patch.
llvm-svn: 12849
2004-04-12 00:23:04 +00:00
Chris Lattner
b3a10e244a Adjust some comments, fix a bug in my previous patch
llvm-svn: 12848
2004-04-12 00:12:04 +00:00
Chris Lattner
24f8b11206 On X86, casting an integer to floating point requires going through memory.
If the source of the cast is a load, we can just use the source memory location,
without having to create a temporary stack slot entry.

Before we code generated this:

double %int(int* %P) {
        %V = load int* %P
        %V2 = cast int %V to double
        ret double %V2
}

into:

int:
        sub %ESP, 4
        mov %EAX, DWORD PTR [%ESP + 8]
        mov %EAX, DWORD PTR [%EAX]
        mov DWORD PTR [%ESP], %EAX
        fild DWORD PTR [%ESP]
        add %ESP, 4
        ret

Now we produce this:

int:
        mov %EAX, DWORD PTR [%ESP + 4]
        fild DWORD PTR [%EAX]
        ret

... which is nicer.

llvm-svn: 12846
2004-04-11 23:21:26 +00:00
Chris Lattner
95cf3f8765 Implement folding of loads into floating point operations. This implements:
test/Regression/CodeGen/X86/fp_load_fold.llx

llvm-svn: 12844
2004-04-11 22:05:45 +00:00
Chris Lattner
b611f10e74 Unify all of the code for floating point +,-,*,/ into one function
llvm-svn: 12842
2004-04-11 21:23:56 +00:00
Chris Lattner
3378d71a55 This implements folding of constant operands into floating point operations
for mul and div.

Instead of generating this:

test_divr:
        fld QWORD PTR [%ESP + 4]
        fld QWORD PTR [.CPItest_divr_0]
        fdivrp %ST(1)
        ret

We now generate this:

test_divr:
        fld QWORD PTR [%ESP + 4]
        fdivr QWORD PTR [.CPItest_divr_0]
        ret

This code desperately needs refactoring, which will come in the next
patch.

llvm-svn: 12841
2004-04-11 21:09:14 +00:00
Chris Lattner
833d84f48a Restructure the mul/div/rem handling code to follow the pattern the other
instructions use.  This doesn't change any functionality except that long
constant expressions of these operations will now magically start working.

llvm-svn: 12840
2004-04-11 20:56:28 +00:00
Chris Lattner
69304a897c Codegen FP adds and subtracts with a constant more efficiently, generating:
fld QWORD PTR [%ESP + 4]
        fadd QWORD PTR [.CPItest_add_0]

instead of:

        fld QWORD PTR [%ESP + 4]
        fld QWORD PTR [.CPItest_add_0]
        faddp %ST(1)

I also intend to do this for mul & div, but it appears that I have to
refactor a bit of code before I can do so.

This is tested by: test/Regression/CodeGen/X86/fp_constant_op.llx

llvm-svn: 12839
2004-04-11 20:26:20 +00:00
Chris Lattner
d22a1894a0 Two changes:
1. If an incoming argument is dead, don't load it from the stack
  2. Do not code gen noop copies at all (ie, cast int -> uint), not even to
     a move.  This should reduce register pressure for allocators that are
     unable to coallesce away these copies in some cases.

llvm-svn: 12835
2004-04-11 19:21:59 +00:00
Chris Lattner
8b1122d4dc Silence a spurious warning
llvm-svn: 12815
2004-04-10 18:32:01 +00:00
John Criswell
c9c191c41b Reversed the order of the llvm.writeport() operands so that the value
is listed first and the address is listed second.

llvm-svn: 12795
2004-04-09 19:09:14 +00:00
John Criswell
a52a2291d8 Changed assertions to error messages.
llvm-svn: 12787
2004-04-09 15:10:15 +00:00
John Criswell
8740c3767d Changes recommended by Chris:
InstSelectSimple.cpp:
  Change the checks for proper I/O port address size into an exit() instead
  of an assertion.  Assertions aren't used in Release builds, and handling
  this error should be graceful (not that this counts as graceful, but it's
  more graceful).

  Modified the generation of the IN/OUT instructions to have 0 arguments.
X86InstrInfo.td:
  Added the OpSize attribute to the 16 bit IN and OUT instructions.

llvm-svn: 12786
2004-04-08 22:39:13 +00:00
John Criswell
f6b16ea70b Added the llvm.readport and llvm.writeport intrinsics for x86. These do
I/O port instructions on x86.  The specific code sequence is tailored to
the parameters and return value of the intrinsic call.
Added the ability for implicit defintions to be printed in the Instruction
Printer.
Added the ability for RawFrm instruction to print implict uses and
defintions with correct comma output.  This required adjustment to some
methods so that a leading comma would or would not be printed.

llvm-svn: 12782
2004-04-08 20:31:47 +00:00
Chris Lattner
3808778190 Fix PR313: [x86] JIT miscompiles unsigned short to floating point
llvm-svn: 12711
2004-04-06 19:29:36 +00:00
Chris Lattner
54e93df11a Fix a minor bug in previous checking
Enable folding of long seteq/setne comparisons into branches and select instructions
Implement unfolded long relational comparisons against a constants a bit more efficiently

Folding comparisons changes code that looks like this:
        mov %EAX, DWORD PTR [%ESP + 4]
        mov %EDX, DWORD PTR [%ESP + 8]
        mov %ECX, %EAX
        or %ECX, %EDX
        sete %CL
        test %CL, %CL
        je .LBB2 # PC rel: F

into code that looks like this:
        mov %EAX, DWORD PTR [%ESP + 4]
        mov %EDX, DWORD PTR [%ESP + 8]
        mov %ECX, %EAX
        or %ECX, %EDX
        jne .LBB2 # PC rel: F

This speeds up 186.crafty by 6% with llc-ls.

llvm-svn: 12702
2004-04-06 17:34:50 +00:00
Chris Lattner
2d9b28ac0b Improve codegen of long == and != comparisons against constants. Before,
comparing a long against zero got us this:

        sub %ESP, 8
        mov DWORD PTR [%ESP + 4], %ESI
        mov DWORD PTR [%ESP], %EDI
        mov %EAX, DWORD PTR [%ESP + 12]
        mov %EDX, DWORD PTR [%ESP + 16]
        mov %ECX, 0
        mov %ESI, 0
        mov %EDI, %EAX
        xor %EDI, %ECX
        mov %ECX, %EDX
        xor %ECX, %ESI
        or %EDI, %ECX
        sete %CL
        test %CL, %CL
        je .LBB2 # PC rel: F

Now it gets us this:

        mov %EAX, DWORD PTR [%ESP + 4]
        mov %EDX, DWORD PTR [%ESP + 8]
        mov %ECX, %EAX
        or %ECX, %EDX
        sete %CL
        test %CL, %CL
        je .LBB2 # PC rel: F

llvm-svn: 12696
2004-04-06 16:02:27 +00:00
Chris Lattner
fd7b570dff Handle various other important cases of multiplying a long constant immediate. For
example, multiplying X*(1 + (1LL << 32)) now produces:

test:
        mov %ECX, DWORD PTR [%ESP + 4]
        mov %EDX, DWORD PTR [%ESP + 8]
        mov %EAX, %ECX
        add %EDX, %ECX
        ret

[[[Note to Alkis: why isn't linear scan generating this code??  This might be a
 problem with your intervals being too conservative:

test:
        mov %EAX, DWORD PTR [%ESP + 4]
        mov %EDX, DWORD PTR [%ESP + 8]
        add %EDX, %EAX
        ret

end note]]]

Whereas GCC produces this:

T:
        sub     %esp, 12
        mov     %edx, DWORD PTR [%esp+16]
        mov     DWORD PTR [%esp+8], %edi
        mov     %ecx, DWORD PTR [%esp+20]
        xor     %edi, %edi
        mov     DWORD PTR [%esp], %ebx
        mov     %ebx, %edi
        mov     %eax, %edx
        mov     DWORD PTR [%esp+4], %esi
        add     %ebx, %edx
        mov     %edi, DWORD PTR [%esp+8]
        lea     %edx, [%ecx+%ebx]
        mov     %esi, DWORD PTR [%esp+4]
        mov     %ebx, DWORD PTR [%esp]
        add     %esp, 12
        ret

I'm not sure example what GCC is smoking here, but it looks like it has just
confused itself with a bunch of stack slots or something.  The intel compiler
is better, but still not good:

T:
        movl      4(%esp), %edx                                 #2.11
        movl      8(%esp), %eax                                 #2.11
        lea       (%eax,%edx), %ecx                             #3.12
        movl      $1, %eax                                      #3.12
        mull      %edx                                          #3.12
        addl      %ecx, %edx                                    #3.12
        ret                                                     #3.12

llvm-svn: 12693
2004-04-06 04:55:43 +00:00
Chris Lattner
6038e5a4a1 Efficiently handle a long multiplication by a constant. For this testcase:
long %test(long %X) {
        %Y = mul long %X, 123
        ret long %Y
}

we used to generate:

test:
        sub %ESP, 12
        mov DWORD PTR [%ESP + 8], %ESI
        mov DWORD PTR [%ESP + 4], %EDI
        mov DWORD PTR [%ESP], %EBX
        mov %ECX, DWORD PTR [%ESP + 16]
        mov %ESI, DWORD PTR [%ESP + 20]
        mov %EDI, 123
        mov %EBX, 0
        mov %EAX, %ECX
        mul %EDI
        imul %ESI, %EDI
        add %ESI, %EDX
        imul %ECX, %EBX
        add %ESI, %ECX
        mov %EDX, %ESI
        mov %EBX, DWORD PTR [%ESP]
        mov %EDI, DWORD PTR [%ESP + 4]
        mov %ESI, DWORD PTR [%ESP + 8]
        add %ESP, 12
        ret

Now we emit:
test:
        mov %EAX, DWORD PTR [%ESP + 4]
        mov %ECX, DWORD PTR [%ESP + 8]
        mov %EDX, 123
        mul %EDX
        imul %ECX, %ECX, 123
        add %ECX, %EDX
        mov %EDX, %ECX
        ret

Which, incidently, is substantially nicer than what GCC manages:
T:
        sub     %esp, 8
        mov     %eax, 123
        mov     DWORD PTR [%esp], %ebx
        mov     %ebx, DWORD PTR [%esp+16]
        mov     DWORD PTR [%esp+4], %esi
        mov     %esi, DWORD PTR [%esp+12]
        imul    %ecx, %ebx, 123
        mov     %ebx, DWORD PTR [%esp]
        mul     %esi
        mov     %esi, DWORD PTR [%esp+4]
        add     %esp, 8
        lea     %edx, [%ecx+%edx]
        ret

llvm-svn: 12692
2004-04-06 04:29:36 +00:00