1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-25 20:23:11 +01:00
Commit Graph

2793 Commits

Author SHA1 Message Date
Chris Lattner
f3d950e816 Add support for truncstore and *extload.
llvm-svn: 19566
2005-01-15 05:22:24 +00:00
Chris Lattner
27c91fac94 Adjust to CopyFromREg changes.
llvm-svn: 19561
2005-01-14 22:37:41 +00:00
Chris Lattner
c032990335 Fix Regression/CodeGen/PowerPC/2005-01-14-UndefLong.ll
llvm-svn: 19557
2005-01-14 20:22:02 +00:00
Chris Lattner
b0b49268c4 Fix: Regression/CodeGen/PowerPC/2005-01-14-SetSelectCrash.ll
llvm-svn: 19555
2005-01-14 19:31:00 +00:00
Chris Lattner
7a8788c9ac Add new ImplicitDef node, rename CopyRegSDNode class to RegSDNode.
llvm-svn: 19535
2005-01-13 20:50:02 +00:00
Chris Lattner
fce6a5439d Codegen factor nodes more intelligently according to perceived register pressure.
llvm-svn: 19532
2005-01-13 19:56:00 +00:00
Chris Lattner
cb4359465a Initial trivial (but stupid) codegen for this node.
llvm-svn: 19529
2005-01-13 18:01:36 +00:00
Chris Lattner
9a70166615 Add some really pedantic assertions to the load folding code. Fix a bunch
of cases where we accidentally emitted a load folded once and unfolded
elsewhere.

llvm-svn: 19522
2005-01-13 05:53:16 +00:00
Chris Lattner
2ab70aafe0 We can only fold a load into an op if there is exactly one use of the value.
Checking to see if the load has two uses is not equivalent, as the chain
value may have zero uses.

llvm-svn: 19518
2005-01-12 18:38:26 +00:00
Chris Lattner
4b03f0f99e Try both ways to fold an add together. This allows us to generate this code
imul %EAX, %EAX, 400
        add %ECX, %EAX
        add %ESI, DWORD PTR [%ECX + 4*%EDX]
        inc %EDX
        cmp %EDX, 100

instead of this:

        imul %EAX, %EAX, 400
        add %ECX, %EAX
        mov %EAX, %EDX
        shl %EAX, 2
        add %ECX, %EAX
        add %ESI, DWORD PTR [%ECX]
        inc %EDX
        cmp %EDX, 100

llvm-svn: 19513
2005-01-12 18:08:53 +00:00
Chris Lattner
61c572eb7f Fix a major miscompilation where we were overwriting the scale reg.
llvm-svn: 19511
2005-01-12 07:33:20 +00:00
Chris Lattner
5816f1a302 Do not use the type of the RHS constant to determine the type of the operation.
This fails for shifts because the constant is always 8 bits.

llvm-svn: 19508
2005-01-12 05:22:07 +00:00
Chris Lattner
89d6b21ae6 Do not lose the offset from teh global when peephole optimizing instructions.
This fixes FreeBench/pcompress

llvm-svn: 19507
2005-01-12 05:17:28 +00:00
Jeff Cohen
614a5ec22a Fix C++ more compilatiom errors
llvm-svn: 19504
2005-01-12 04:29:05 +00:00
Chris Lattner
5ef92f3a40 Fix a compile error with VC++, which things that static const arrays need
to be dynamically initialized. :(

llvm-svn: 19503
2005-01-12 04:23:22 +00:00
Chris Lattner
627c64e5e5 Fix a bug that caused us to crash on povray. We weren't emitting an FP_REG_KILL into a block that had a successor with a FP PHI node.
llvm-svn: 19502
2005-01-12 04:21:28 +00:00
Chris Lattner
a5f0ba59a0 Print a load of a null pointer (in intel mode) like this:
mov %AX, WORD PTR [0]

instead of like this:

        mov %AX, WORD PTR []

llvm-svn: 19501
2005-01-12 04:07:11 +00:00
Chris Lattner
360988bae2 Print a load of a null pointer like this:
movw 0, %ax

instead of like this:

        movw , %ax

llvm-svn: 19500
2005-01-12 04:05:19 +00:00
Chris Lattner
3c85c67c97 Fix a crash compiling povray on UINT_TO_FP from i16.
llvm-svn: 19499
2005-01-12 04:00:00 +00:00
Chris Lattner
4e72a2a000 There are no [mem] op= reg instructions for FP, so remove their entries.
llvm-svn: 19496
2005-01-12 03:16:09 +00:00
Chris Lattner
00cb0ace9b Fix a bug where we didn't insert FP_REG_KILL instructions into MBB's that
contain FP PHI nodes but no other FP defining instructions.  This fixes
183.equake

llvm-svn: 19495
2005-01-12 02:57:10 +00:00
Chris Lattner
92166ed1df Fold TRUNCATE (LOAD P) into a smaller load from P.
llvm-svn: 19494
2005-01-12 02:19:06 +00:00
Chris Lattner
258b23bd9d Be more careful about order of arg evalution for CopyToReg nodes. This shrinks
256.bzip2 from 7142 to 7103 lines of .s file.

Second, add initial support for folding loads into compares, though this code
is dynamically dead for now. :(

llvm-svn: 19493
2005-01-12 02:02:48 +00:00
Chris Lattner
604416e8f4 Fold some more [mem] op= val operators. This allows us to things like this
several times in 256.bzip2:

        mov %EAX, DWORD PTR [%ESP + 204]
-       mov %EAX, DWORD PTR [%EAX]
-       or %EAX, 2097152
-       mov %ECX, DWORD PTR [%ESP + 204]
-       mov DWORD PTR [%ECX], %EAX
+       or DWORD PTR [%EAX], 2097152

llvm-svn: 19492
2005-01-12 01:28:00 +00:00
Chris Lattner
e83ae1063f Fold loads into sign/zero extends. instead of:
mov %AL, BYTE PTR [%EDX + l18_length_code]
  movzx %EAX, %AL

Emit:

  movzx %EAX, BYTE PTR [%EDX + l18_length_code]

llvm-svn: 19489
2005-01-11 23:33:00 +00:00
Chris Lattner
87a38bd4a8 Comment out debug code :)
Select [mem] += Val operations.  For constants, we used to get:

  mov %ECX, -32768
  add %ECX, DWORD PTR [l4_match_start]
  mov DWORD PTR [l4_match_start], %ECX

Now we get:

  add DWORD PTR [l4_match_start], -32768

For other values we used to get:

  mov %EBP, %EDI   ;; because the add destroys the value
  add %EBP, DWORD PTR [l4_input_len]
  mov DWORD PTR [l4_input_len], %EBP

now we get:

  add DWORD PTR [l4_input_len], %EDI

Both of these use less registers than the alternative, are faster and smaller.

llvm-svn: 19488
2005-01-11 23:21:30 +00:00
Chris Lattner
282473a25d Handle the global address case here, not just the offset case.
llvm-svn: 19487
2005-01-11 22:58:43 +00:00
Chris Lattner
9eb2cc700b Treat int constants as not requiring a register, since they are almost always
folded into an instruction.

llvm-svn: 19486
2005-01-11 22:29:12 +00:00
Chris Lattner
7cb2220907 * Factor a bunch of binary operator cases into shared code.
* Fold loads into Add, sub, and, or, xor and mul when possible.
* Codegen shl X, 1 as add X, X

llvm-svn: 19483
2005-01-11 21:19:59 +00:00
Chris Lattner
b1a72cb39a Clear the whole array, always.
llvm-svn: 19482
2005-01-11 20:25:26 +00:00
Chris Lattner
b838c9748e Fold multiplies by 3,5,9 into addressing modes when possible.
llvm-svn: 19480
2005-01-11 19:37:02 +00:00
Chris Lattner
e7b1130b01 Instead of generating stuff like this:
mov %ECX, %EAX
        add %ECX, 32768
        mov %SI, WORD PTR [2*%ECX + l13_prev]

Generate this:

        mov %SI, WORD PTR [2*%ECX + l13_prev + 65536]

This occurs when you have a GEP instruction where an index is
"something + imm".

llvm-svn: 19472
2005-01-11 06:36:20 +00:00
Chris Lattner
bb63a09cd1 Implement MEMCPY natively in terms of rep movs*
llvm-svn: 19468
2005-01-11 06:19:26 +00:00
Chris Lattner
b2b08a8bc1 Implement memset -> rep stos*
llvm-svn: 19467
2005-01-11 06:14:36 +00:00
Chris Lattner
58816a9e81 Announce that we don't support mem ops yet.
llvm-svn: 19466
2005-01-11 05:57:36 +00:00
Chris Lattner
f867443d7e Teach the address selector to make 'reg+reg' addressing modes.
llvm-svn: 19457
2005-01-11 04:40:19 +00:00
Chris Lattner
edf06be50e Emit NOT instructions.
llvm-svn: 19455
2005-01-11 04:31:30 +00:00
Chris Lattner
4e4bef2d6c Fix a bug emitting branches that broke a lot of programs.
llvm-svn: 19452
2005-01-11 04:06:27 +00:00
Chris Lattner
4b51297a94 Be more careful where we set ContainsFPCode. We were missing a set in the
int -> FP casting code.  Note that we don't have to set it for FP operations
that take FP values as operands: whatever produces the FP value will set the
flag.

llvm-svn: 19451
2005-01-11 03:50:45 +00:00
Chris Lattner
0c4c4094e3 Fix a major bug in setcc/cmov folding, where we accidentally
inverted the sense of the comparison.

llvm-svn: 19450
2005-01-11 03:37:59 +00:00
Chris Lattner
d188e03011 Take register pressure into account when we have to decide whether to
evaluate the LHS or the RHS of an operation first.  This causes good things
to happen.  For example, instead of compiling a loop to this:

.LBBstrength_result7_1: # loopentry
        movl 16(%esp), %edi
        movl (%edi), %edi             ;;; LOAD
        movl (%ecx), %ebx
        movl $2, (%eax,%ebx,4)
        movl (%edx), %ebx
        movl %esi, %ebp
        addl $21, %ebp
        addl $42, %esi
        cmpl $0, %edi                 ;;; USE
        cmovne %esi, %ebp
        cmpl %ebp, %ebx
        movl %ebp, %esi
        jg .LBBstrength_result7_1

We now compile it to this:

.LBBstrength_result7_1: # loopentry
        movl %edi, %ebx
        addl $42, %ebx
        addl $21, %edi
        movl (%ecx), %ebp              ;; LOAD
        cmpl $0, %ebp                  ;; USE
        cmovne %ebx, %edi
        movl (%edx), %ebx
        movl $2, (%eax,%ebx,4)
        movl (%esi), %ebx
        cmpl %edi, %ebx
        jg .LBBstrength_result7_1

Which reduces register pressure enough (in this case) to avoid spilling in the
loop.

As another example, consider the CodeGen/X86/regpressure.ll testcase.  We
used to generate this code for both cases:

regpressure1:
        subl $32, %esp
        movl %esi, 12(%esp)
        movl %edi, 8(%esp)
        movl %ebx, 4(%esp)
        movl %ebp, (%esp)
        movl 36(%esp), %ecx
        movl (%ecx), %eax
        movl 4(%ecx), %edx
        movl %edx, 24(%esp)
        movl 8(%ecx), %edx
        movl %edx, 16(%esp)
        movl 12(%ecx), %edx
        movl 16(%ecx), %esi
        movl 20(%ecx), %edi
        movl 24(%ecx), %ebx
        movl %ebx, 28(%esp)
        movl 28(%ecx), %ebx
        movl 32(%ecx), %ebp
        movl %ebp, 20(%esp)
        movl 36(%ecx), %ecx
        imull 24(%esp), %eax
        imull 16(%esp), %eax
        imull %edx, %eax
        imull %esi, %eax
        imull %edi, %eax
        imull 28(%esp), %eax
        imull %ebx, %eax
        imull 20(%esp), %eax
        imull %ecx, %eax
        movl (%esp), %ebp
        movl 4(%esp), %ebx
        movl 8(%esp), %edi
        movl 12(%esp), %esi
        addl $32, %esp
        ret

This code is basically trying to do all of the loads first, then execute all
of the multiplies.  Because we run out of registers, lots of spill code happens.
We now generate this code for both cases:

regpressure1:
        movl 4(%esp), %ecx
        movl (%ecx), %eax
        movl 4(%ecx), %edx
        imull %edx, %eax
        movl 8(%ecx), %edx
        imull %edx, %eax
        movl 12(%ecx), %edx
        imull %edx, %eax
        movl 16(%ecx), %edx
        imull %edx, %eax
        movl 20(%ecx), %edx
        imull %edx, %eax
        movl 24(%ecx), %edx
        imull %edx, %eax
        movl 28(%ecx), %edx
        imull %edx, %eax
        movl 32(%ecx), %edx
        imull %edx, %eax
        movl 36(%ecx), %ecx
        imull %ecx, %eax
        ret

which is much nicer (when we fold loads into the muls it will be even better).
The old instruction selector used to produce the good code for regpressure1
but not for regpressure2, as it depended on the order of operations in the
LLVM code.

llvm-svn: 19449
2005-01-11 03:11:44 +00:00
Chris Lattner
497e24c885 Fold setcc instructions into selects.
llvm-svn: 19438
2005-01-10 22:10:13 +00:00
Chris Lattner
65d007ab62 Add conditional moves for the parity flag.
llvm-svn: 19437
2005-01-10 22:09:33 +00:00
Chris Lattner
d61491dea2 Implement 8-bit multiply for X86.
llvm-svn: 19435
2005-01-10 20:55:48 +00:00
Chris Lattner
fcab5f75c0 Codegen (Reg|imm)+&GV as an LEA, because we cannot put it into the immediate field
of an ADDri (due to current restrictions on MachineOperand :( ).  This allows
us to generate:

        leal Data+16000, %edx

instead of:

        movl $Data, %edx
        addl $16000, %edx

llvm-svn: 19420
2005-01-09 20:20:29 +00:00
Chris Lattner
35375c11bf Fix copy and pasto's for FP -> Int. This fixes fldry
llvm-svn: 19418
2005-01-09 19:49:59 +00:00
Chris Lattner
45155a3dee Initial implementation of FP->INT and INT->FP casts
Also, fix zero_extend from bool to i8, which fixes Shootout/objinst.

llvm-svn: 19414
2005-01-09 18:52:44 +00:00
Chris Lattner
9ca9b20447 Fix a subtle bug involving constant expr casts from int to fp
llvm-svn: 19410
2005-01-09 01:49:29 +00:00
Chris Lattner
c5e53c07fd Implement varargs and returnaddress/frameaddress intrinsics. With this
patch, all of SingleSource/UnitTests passes.

llvm-svn: 19408
2005-01-09 00:01:27 +00:00
Chris Lattner
ca81756527 Okay 15th time is the charm. Looking at the vector size is useless as it
gets clobbered by a previous statement.  This fixes all calls finally.

llvm-svn: 19399
2005-01-08 20:51:36 +00:00
Chris Lattner
85816cff9a Okay, my off by one was actually off by two. This fixes Generic/2003-07-07-BadLongConst.ll
llvm-svn: 19398
2005-01-08 20:39:31 +00:00
Chris Lattner
2d68cb6cf4 Fix off by one error
llvm-svn: 19396
2005-01-08 20:31:34 +00:00
Chris Lattner
c4d075cfa3 Adjust to changes in LowerCallTo interface
Minor bugfixes

llvm-svn: 19376
2005-01-08 19:28:19 +00:00
Chris Lattner
6c7d3bd8ea Wrap long line.
llvm-svn: 19367
2005-01-08 06:59:50 +00:00
Chris Lattner
473ec492f7 The X86 instruction selector already handles codegen of:
store float 123.45, float* %P

as an integer store.  This adds handling of float immediate stores as integers
for arguments passed function calls.

This is now tested by CodeGen/X86/store-fp-constant.ll

llvm-svn: 19364
2005-01-08 05:45:24 +00:00
Chris Lattner
2c398fc8f6 Allow the selection-dag based selector to be diabled with -disable-pattern-isel.
For now, this is the default, as the current selector is missing some big pieces.
To enable the new selector, pass -disable-pattern-isel=false to llc or lli.

llvm-svn: 19335
2005-01-07 07:50:50 +00:00
Chris Lattner
216198574d Reimplementation of the X86 pattern isel. This is still missing many large
pieces, but can already do amazing things in some cases.

llvm-svn: 19334
2005-01-07 07:49:41 +00:00
Chris Lattner
74019f517a This file is now dead.
llvm-svn: 19333
2005-01-07 07:49:05 +00:00
Chris Lattner
079b497982 Add a new prototype
llvm-svn: 19332
2005-01-07 07:48:33 +00:00
Chris Lattner
fb848e6fad First draft of new Target interface
llvm-svn: 19324
2005-01-07 07:44:53 +00:00
Chris Lattner
608dd77d6b Codegen -1 and -0.0 more efficiently. This implements CodeGen/X86/negatize_zero.ll
llvm-svn: 19313
2005-01-06 21:19:16 +00:00
Jeff Cohen
146e5504e5 Fix CBE code so that it compiles with VC++.
llvm-svn: 19303
2005-01-06 04:21:49 +00:00
Chris Lattner
6d651234d6 1. If a double FP constant must be put into a constant pool, but it can be
precisely represented as a float, put it into the constant pool as a
   float.
2. Use the cbw/cwd/cdq instructions instead of an explicit SAR for signed
   division.

llvm-svn: 19291
2005-01-05 16:30:14 +00:00
Chris Lattner
b438f5251f Minor optimization to allocate R8 registers in a better order.
llvm-svn: 19289
2005-01-05 16:09:16 +00:00
Jeff Cohen
36968ed8c1 Revert elimination of global variable hack... still needed.
llvm-svn: 19273
2005-01-03 16:34:19 +00:00
Chris Lattner
1aaf8cccb2 ADC and IMUL are also commutable.
llvm-svn: 19264
2005-01-03 01:27:59 +00:00
Chris Lattner
93fc4bd9cb This hunk:
-  unsigned TrueValue = getReg(TrueVal, BB, BB->begin());
+  unsigned TrueValue = getReg(TrueVal);

Fixes the PPC regressions from last night.

The other hunk is just a clarity improvement.

llvm-svn: 19263
2005-01-02 23:07:31 +00:00
Jeff Cohen
1087b72875 Eliminate the use of the global variable hack in the X86 target that was used
to get Visual Studio to link in X86.lib to the executables that need it.  There
is another way of doing it.

llvm-svn: 19252
2005-01-02 04:23:12 +00:00
Chris Lattner
a78fd4726e Disable 2->3 address promotion of add and inc instructions to LEA's. In
addition to being three address, LEA's don't set the flags.

This fixes 186.crafty.

llvm-svn: 19251
2005-01-02 04:18:17 +00:00
Chris Lattner
3ef32da6c3 Add a new method.
llvm-svn: 19249
2005-01-02 02:38:18 +00:00
Chris Lattner
95f1e628ed Add support for SETNPr to lower to memory form.
llvm-svn: 19248
2005-01-02 02:37:46 +00:00
Chris Lattner
d6bc921fa8 Implement the convertToThreeAddress method, add support for inverting JP/JNP
branches.

llvm-svn: 19247
2005-01-02 02:37:07 +00:00
Chris Lattner
0d6f03e52b Two changes here:
1. Add new instructions for checking parity flags: JP, JNP, SETP, SETNP.
2. Set the isCommutable and isPromotableTo3Address bits on several
   instructions.

llvm-svn: 19246
2005-01-02 02:35:46 +00:00
Chris Lattner
cc26e332b3 Add some bits that can be set for instructions.
llvm-svn: 19241
2005-01-02 02:27:48 +00:00
Chris Lattner
ad63a0d6a4 Fix a FIXME: Select instructions on longs were miscompiled.
While we're at it, improve codegen of select instructions.  For this
testcase:

int %test(bool %C, int %A, int %B) {
  %D = select bool %C, int %A, int %B
  ret int %D
}

We used to generate this code:

_test:
        cmpwi cr0, r3, 0
        bne .LBB_test_2 ;
.LBB_test_1:    ;
        b .LBB_test_3   ;
.LBB_test_2:    ;
        or r5, r4, r4
.LBB_test_3:    ;
        or r3, r5, r5
        blr

Now we emit:

_test:
        cmpwi cr0, r3, 0
        bne .LBB_test_2 ;
.LBB_test_1:    ;
        or r4, r5, r5
.LBB_test_2:    ;
        or r3, r4, r4
        blr

-Chris

llvm-svn: 19214
2005-01-01 16:10:12 +00:00
Chris Lattner
ccd0d44133 Substantially improve the code generated by non-folded setcc instructions.
In particular, instead of compiling this:

bool %test(int %A, int %B) {
  %C = setlt int %A, %B
  ret bool %C
}

to this:

test:
        save %sp, -96, %sp
        subcc %i0, %i1, %g0
        bl .LBBtest_1   !
        nop
        ba .LBBtest_2   !
        nop
.LBBtest_1:     !
        or %g0, 1, %i0
        ba .LBBtest_3   !
        nop
.LBBtest_2:     !
        or %g0, 0, %i0
        ba .LBBtest_3   !
        nop
.LBBtest_3:     !
        restore %g0, %g0, %g0
        retl
        nop

We now compile it to this:

test:
        save %sp, -96, %sp
        subcc %i0, %i1, %g0
        or %g0, 1, %i0
        bl .LBBtest_2   !
        nop
.LBBtest_1:     !
        or %g0, %g0, %i0
.LBBtest_2:     !
        restore %g0, %g0, %g0
        retl
        nop

llvm-svn: 19213
2005-01-01 16:06:57 +00:00
Chris Lattner
37b9eef884 Fix PR490
Fix testcase CodeGen/CBackend/2004-12-28-LogicalConstantExprs.ll

llvm-svn: 19176
2004-12-29 04:00:09 +00:00
Chris Lattner
3b78513843 Remove unused enum value
llvm-svn: 19024
2004-12-17 22:41:46 +00:00
Chris Lattner
5f28e9fafc Remove unused #include
llvm-svn: 19021
2004-12-17 19:07:04 +00:00
Chris Lattner
d11ba51208 Change the sentinal
llvm-svn: 19007
2004-12-17 00:46:51 +00:00
Chris Lattner
59d0c02d2b Create a stack slot for the return address lazily instead of eagerly. This
save small amounts of time for functions that don't call llvm.returnaddress
or llvm.frameaddress (which is almost all functions).

llvm-svn: 19006
2004-12-17 00:07:46 +00:00
Tanya Lattner
3b44c9a485 Chris is a pain ;) Removing reassociate.
llvm-svn: 19005
2004-12-16 23:16:16 +00:00
Tanya Lattner
93142a02ee Removing commented out lines.
llvm-svn: 19004
2004-12-16 23:13:16 +00:00
Tanya Lattner
c537a19bad Removed LICM and GCSE.
llvm-svn: 19003
2004-12-16 23:07:36 +00:00
Chris Lattner
50411edddf Remove dead #include
llvm-svn: 18994
2004-12-16 19:32:38 +00:00
Chris Lattner
4b1d58bf4b Adjust to changes in asmwriter filenames
llvm-svn: 18987
2004-12-16 17:33:24 +00:00
Chris Lattner
cea3ae9792 Specify all of the targets built.
llvm-svn: 18985
2004-12-16 17:26:44 +00:00
Chris Lattner
dc59826592 Use the rules in Makefile.rules to build SparcV9GenCodeEmitter.inc instead
of custom rules.

llvm-svn: 18984
2004-12-16 16:47:56 +00:00
Chris Lattner
d311c2587d Fix header
llvm-svn: 18983
2004-12-16 16:47:03 +00:00
Chris Lattner
a0561d43b2 Factor out common .td file chunks.
llvm-svn: 18982
2004-12-16 16:31:57 +00:00
Chris Lattner
cf5cd542d4 Fix PR485, instead of emitting zero sized arrays, emit arrays of size 1.
llvm-svn: 18974
2004-12-15 23:13:15 +00:00
Brian Gaeke
1b6a79c6d5 The mystery of Olden/tsp solved, and more opportunities for speedup.
llvm-svn: 18932
2004-12-14 09:10:10 +00:00
Brian Gaeke
83dcf14697 Get rid of shifts by zero in most cases.
llvm-svn: 18931
2004-12-14 08:21:02 +00:00
Chris Lattner
0e9a9d2098 When generating code for X86 targets, make sure the fp control word is set
to 64-bit precision, not 80 bits.

llvm-svn: 18915
2004-12-13 21:52:52 +00:00
Chris Lattner
3fe2a6aa6a Add some notes
llvm-svn: 18911
2004-12-13 20:13:10 +00:00
Chris Lattner
4136428410 Set the rounding mode for the X86 FPU to 64-bits instead of 80-bits. We
don't support long double anyway, and this gives us FP results closer to
other targets.

This also speeds up 179.art from 41.4s to 18.32s, by eliminating a problem
with extra precision that causes an FP == comparison to fail (leading to
extra loop iterations).

llvm-svn: 18895
2004-12-13 17:23:11 +00:00
Brian Gaeke
aa9f3851f7 Add V8 SPEC status.
llvm-svn: 18844
2004-12-13 00:27:35 +00:00
Chris Lattner
9f0237ca85 Fix Regression/CodeGen/PowerPC/2004-12-12-ZeroSizeCommon.ll, and all programs
when compiled with debug information.

llvm-svn: 18835
2004-12-12 20:36:19 +00:00
Chris Lattner
dc33000e67 CSE calls to getTypeSize.
llvm-svn: 18833
2004-12-12 20:31:00 +00:00
Chris Lattner
6131b06f73 Use the target triple to pick this target.
llvm-svn: 18830
2004-12-12 17:40:28 +00:00
Brian Gaeke
757a3aa9b9 Complete the list of MultiSource failures.
llvm-svn: 18826
2004-12-12 08:22:11 +00:00
Brian Gaeke
a440424596 hbd should be working now.
llvm-svn: 18824
2004-12-12 07:42:59 +00:00
Brian Gaeke
ee60e35a28 Finally enable the setcc-branch folding code.
Also, fix a bug where ubyte 255 would sometimes be output as -1. This
was afflicting hbd.

llvm-svn: 18823
2004-12-12 07:42:58 +00:00
Brian Gaeke
09c4a78ece Add (currently disabled) code for canFoldSetCC
llvm-svn: 18820
2004-12-12 06:22:30 +00:00
Brian Gaeke
55c163e41e Add stubs for setcc-branch folding support.
llvm-svn: 18818
2004-12-12 06:01:26 +00:00
Brian Gaeke
5d213ad8c3 Print llvm code one function at a time.
llvm-svn: 18805
2004-12-11 22:17:07 +00:00
Brian Gaeke
80831ad19a JIT should print LLVM each function before selecting instructions for it.
llvm-svn: 18803
2004-12-11 18:41:09 +00:00
Brian Gaeke
220fb4f8cd Bools are *also* not ints. Sigh. Furthermore, most of the TargetMachine
ctor parameters can be defaulted.

Print the transformed llvm code input to the instruction selector
when -print-machineinstrs is on, just like V9.

llvm-svn: 18794
2004-12-11 05:19:04 +00:00
Brian Gaeke
24ea5d3dd7 Look for many more moves to fold (previously, we only
*or g0, x      add g0, x          recognized * as a move)
 or  x, g0     add  x, g0
 or  0, x      add  0, x
 or  x, 0      add  x, 0

llvm-svn: 18793
2004-12-11 05:19:03 +00:00
Brian Gaeke
948a8145bf Make GEPs not suck so much:
* Don't emit the Index * ElementSize multiply if Index is a constant.
* Use a shift, not a multiply, if ElementSize is 1/2/4/8.
* If ElementSize fits in the immediate field of SMUL, then put it there.

Fix a bug where struct offsets might be truncated (ConstantSInt::get is
now used instead of ConstantInt::get).

llvm-svn: 18792
2004-12-11 05:19:02 +00:00
Brian Gaeke
e0643b792b Update lists of failing benchmarks, including info on which
ones are failing in cbe.

llvm-svn: 18791
2004-12-11 05:19:01 +00:00
Brian Gaeke
dc916ae40f Move -lowerselect later in the chain; some select instructions were
slipping through into the instruction selector, which can't deal with
them yet.

llvm-svn: 18758
2004-12-10 08:39:30 +00:00
Brian Gaeke
7ec3883e1a Add the rest of the multiply instructions.
llvm-svn: 18757
2004-12-10 08:39:29 +00:00
Brian Gaeke
2a9ecc433f Support binary operations with immediates for <= cInt.
llvm-svn: 18756
2004-12-10 08:39:28 +00:00
Brian Gaeke
45f3af8d88 Update lists of failing benchmarks (except C++...something is the
matter with my sparcv8 libstdc++.a) and to-do list.

llvm-svn: 18755
2004-12-10 08:39:27 +00:00
Brian Gaeke
1da3720799 Emit correct prototype for __builtin_alloca on V8.
llvm-svn: 18745
2004-12-10 05:44:45 +00:00
Brian Gaeke
91cf4fe1ca Add SparcV8 target back into the build
llvm-svn: 18738
2004-12-10 04:54:21 +00:00
Brian Gaeke
5c8cefdf9a Adjust paths: Sparc/V8 --> SparcV8
llvm-svn: 18737
2004-12-10 04:48:57 +00:00
Brian Gaeke
262fe40da0 Make this file self-contained.
llvm-svn: 18736
2004-12-10 04:46:30 +00:00
Brian Gaeke
ef7289195a Update list of failing MultiSource benchmarks. It works out to +5 -5, but I
think some of these might be the CFE's fault; a rebuild should come soon.

llvm-svn: 18735
2004-12-10 04:42:46 +00:00
Brian Gaeke
999a5ba9ba When FpMOVDs appeared in pairs, we were mistakenly skipping over the latter of
each pair. I think this fixes that.

One of these days, I swear I'm going to get the hang of C++ iterators.
Really.

llvm-svn: 18734
2004-12-10 04:42:45 +00:00
Brian Gaeke
a196f80d71 We're continuing to make progress on MultiSource.
llvm-svn: 18714
2004-12-09 18:54:31 +00:00
Brian Gaeke
706a0c3988 Bytes and shorts are aligned differently from words.
llvm-svn: 18713
2004-12-09 18:51:02 +00:00
Brian Gaeke
43cb9ee8a4 Fix asm-printing directives (how did we not see this before...apparently,
everything was an int!)

llvm-svn: 18712
2004-12-09 18:51:01 +00:00
Chris Lattner
bf9258ba8b Move lower intrinsics before FP constant emission, in case
intrinsic lowering ever introduces constants.

Rename local symbols before printing function bodies, fixing 255.vortex
with the CBE!!!

llvm-svn: 18534
2004-12-05 06:49:44 +00:00
Chris Lattner
40e7175e44 Fix test/Regression/CodeGen/CBackend/2004-12-03-ExternStatics.ll and
PR472

llvm-svn: 18459
2004-12-03 17:19:10 +00:00
Brian Gaeke
cabac53133 This code rotted - change it to call abort() until someone wants
to rewrite this to use relocations.

llvm-svn: 18453
2004-12-03 06:57:14 +00:00
Tanya Lattner
d24558ac54 When writing kernel, save the branches til the end. They are still put in the "right place" in the schedule, but sometimes when folding to make a kernel instructions are added between branches. This is wrong. To avoid this, we handle branches special.
llvm-svn: 18450
2004-12-03 05:25:22 +00:00
Chris Lattner
99c8cf8ef8 Fix a regression caused by the previous patch
llvm-svn: 18449
2004-12-03 05:13:15 +00:00
Chris Lattner
eb18ce7a43 The stripping pass as we know it is about to disappear
llvm-svn: 18436
2004-12-02 21:05:01 +00:00
John Criswell
faf1e07531 Reverting revision 1.209.
Including alloca.h on Solaris brings in the prototype of strftime(), which
breaks compilation of CBE generated code.

llvm-svn: 18435
2004-12-02 19:02:49 +00:00
Chris Lattner
316f923a9c Spill/restore X86 floating point stack registers with 64-bits of precision
instead of 80-bits of precision.  This fixes PR467.

This change speeds up fldry on X86 with LLC from 7.32s on apoc to 4.68s.

llvm-svn: 18433
2004-12-02 18:17:31 +00:00
Chris Lattner
dfdd49b7af Consider 64-bit registers to be FP as well.
llvm-svn: 18432
2004-12-02 17:57:21 +00:00
Tanya Lattner
01adb68f38 Reworked branch adding in prologue. Added check for infinite loops which are not modulo scheduled.
llvm-svn: 18419
2004-12-02 07:22:15 +00:00
Tanya Lattner
893f987574 Reverting this patch:
http://mail.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20041122/021428.html

It broke Mutlisource/Applications/obsequi

llvm-svn: 18407
2004-12-01 18:27:03 +00:00
Chris Lattner
a94cee5109 Initial support for packed types, contributed by Morten Ofstad
llvm-svn: 18406
2004-12-01 17:14:28 +00:00
Chris Lattner
0abd8109fa Do not let GCC emit a warning for INT64_MIN
llvm-svn: 18398
2004-11-30 21:33:58 +00:00
Brian Gaeke
48f8e8b05a Sparcs behave better if we use <alloca.h> and avoid messing with __builtin_alloca.
llvm-svn: 18397
2004-11-30 21:27:01 +00:00
Brian Gaeke
289f3d3cc4 Update list of failing benchmarks.
llvm-svn: 18384
2004-11-30 08:15:44 +00:00
Brian Gaeke
2d52bbf6eb If we're about to emit something like:
%f0 = fmovs %f0
  %f1 = fmovs %f1

then just delete the FpMOVD pseudo-instruction instead.  Also, add
statistics and debug printouts.

llvm-svn: 18383
2004-11-30 08:15:15 +00:00
Chris Lattner
2231d21dad Fix several bugs in 'op x, imm' handling. Foremost is that we now emit
addi r3, r3, -1
instead of
   addi r3, r3, 1

for 'sub int X, 1'.

Secondarily, this fixes several cases where we could crash given an unsigned
constant.  And fixes a couple of minor missed optimization cases, such as
xor X, ~0U -> not X

llvm-svn: 18379
2004-11-30 07:30:20 +00:00
Chris Lattner
1e093bfb2b Fix CodeGen/PowerPC/2004-11-30-shr-var-crash.ll
llvm-svn: 18376
2004-11-30 06:40:04 +00:00
Chris Lattner
629965fbe0 Fix test/Regression/CodeGen/PowerPC/2004-11-29-ShrCrash.ll
llvm-svn: 18374
2004-11-30 06:36:11 +00:00
Chris Lattner
23a2a6e5d3 Fix test/Regression/CodeGen/PowerPC/2004-11-30-shift-crash.ll
llvm-svn: 18371
2004-11-30 06:29:10 +00:00
Chris Lattner
679cc8deab Remove extraneous namespacification. In particular, don't define llvm::llvm::createInternalGlobalMapperPass
llvm-svn: 18365
2004-11-30 00:22:59 +00:00
Chris Lattner
9c400f3b28 Revamp long/ulong comparisons to use a much more efficient sequence (thanks
to Brian and the Sun compiler for pointing out that the obvious works :)

This also enables folding all long comparisons into setcc and branch
instructions: before we could only do == and !=

For example, for:
void test(unsigned long long A, unsigned long long B) {
   if (A < B) foo();
 }

We now generate:

test:
        subl $4, %esp
        movl %esi, (%esp)
        movl 8(%esp), %eax
        movl 12(%esp), %ecx
        movl 16(%esp), %edx
        movl 20(%esp), %esi
        subl %edx, %eax
        sbbl %esi, %ecx
        jae .LBBtest_2  # UnifiedReturnBlock
.LBBtest_1:     # then
        call foo
        movl (%esp), %esi
        addl $4, %esp
        ret
.LBBtest_2:     # UnifiedReturnBlock
        movl (%esp), %esi
        addl $4, %esp
        ret

Instead of:

test:
        subl $12, %esp
        movl %esi, 8(%esp)
        movl %ebx, 4(%esp)
        movl 16(%esp), %eax
        movl 20(%esp), %ecx
        movl 24(%esp), %edx
        movl 28(%esp), %esi
        cmpl %edx, %eax
        setb %al
        cmpl %esi, %ecx
        setb %bl
        cmove %ax, %bx
        testb %bl, %bl
        je .LBBtest_2   # UnifiedReturnBlock
.LBBtest_1:     # then
        call foo
        movl 4(%esp), %ebx
        movl 8(%esp), %esi
        addl $12, %esp
        ret
.LBBtest_2:     # UnifiedReturnBlock
        movl 4(%esp), %ebx
        movl 8(%esp), %esi
        addl $12, %esp
        ret

llvm-svn: 18330
2004-11-29 05:55:24 +00:00
Tanya Lattner
4b705a5897 Reworked branching so we don't handle BAs specially. It just updates the branchTO regardless of what type of branch it is.
llvm-svn: 18322
2004-11-29 04:39:47 +00:00
Tanya Lattner
819de3ccd4 Fixed bug where instructions in the kernel were not ordered right to preserve dependencies in a cycle.
llvm-svn: 18314
2004-11-28 23:36:15 +00:00
Chris Lattner
1de5cd0fd0 The LLVM bool type shall have 1 byte alignment on PPC.
llvm-svn: 18311
2004-11-28 21:16:45 +00:00
Nate Begeman
6048139b1f Remove the ISel->AsmPrinter link via the TargetMachine that was put in
place to help bring up the PowerPC back end on Darwin.  This code is no
longer serves any purpose now that the AsmPrinter does the right thing
all the time printing GlobalValues.  --Cruft.

llvm-svn: 18267
2004-11-27 04:45:11 +00:00