1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-10-30 07:22:55 +01:00
Commit Graph

1717 Commits

Author SHA1 Message Date
Chris Lattner
63450e87d9 add back the optimization that Nate added for shl X, (zext_inreg y)
llvm-svn: 21273
2005-04-13 02:58:13 +00:00
Chris Lattner
759afe07d7 Oops, remove these too.
llvm-svn: 21272
2005-04-13 02:47:57 +00:00
Chris Lattner
4f188f949c Instead of making ZERO_EXTEND_INREG nodes, use the helper method in
SelectionDAG to do the job with AND.  Don't legalize Z_E_I anymore as
it is gone

llvm-svn: 21266
2005-04-13 02:38:47 +00:00
Chris Lattner
bce0030a88 Remove all foldings of ZERO_EXTEND_INREG, moving them to work for AND nodes
instead.  OVerall, this increases the amount of folding we can do.

llvm-svn: 21265
2005-04-13 02:38:18 +00:00
Nate Begeman
38d8248a9e Fold shift x, [sz]ext(y) -> shift x, y
llvm-svn: 21262
2005-04-12 23:32:28 +00:00
Nate Begeman
a56527ea5f Fold shift by size larger than type size to undef
Make llvm undef values generate ISD::UNDEF nodes

llvm-svn: 21261
2005-04-12 23:12:17 +00:00
Chris Lattner
58f72ab722 promote extload i1 -> extload i8
llvm-svn: 21258
2005-04-12 20:30:10 +00:00
Chris Lattner
cfc7093ca6 Remove some redundant checks, add a couple of new ones. This allows us to
compile this:

int foo (unsigned long a, unsigned long long g) {
  return a >= g;
}

To:

foo:
        movl 8(%esp), %eax
        cmpl %eax, 4(%esp)
        setae %al
        cmpl $0, 12(%esp)
        sete %cl
        andb %al, %cl
        movzbl %cl, %eax
        ret

instead of:

foo:
        movl 8(%esp), %eax
        cmpl %eax, 4(%esp)
        setae %al
        movzbw %al, %cx
        movl 12(%esp), %edx
        cmpl $0, %edx
        sete %al
        movzbw %al, %ax
        cmpl $0, %edx
        cmove %cx, %ax
        movzbl %al, %eax
        ret

llvm-svn: 21244
2005-04-12 02:54:39 +00:00
Chris Lattner
61f353dbdc Emit comparisons against the sign bit better. Codegen this:
bool %test1(long %X) {
        %A = setlt long %X, 0
        ret bool %A
}

like this:

test1:
        cmpl $0, 8(%esp)
        setl %al
        movzbl %al, %eax
        ret

instead of:

test1:
        movl 8(%esp), %ecx
        cmpl $0, %ecx
        setl %al
        movzbw %al, %ax
        cmpl $0, 4(%esp)
        setb %dl
        movzbw %dl, %dx
        cmpl $0, %ecx
        cmove %dx, %ax
        movzbl %al, %eax
        ret

llvm-svn: 21243
2005-04-12 02:19:10 +00:00
Chris Lattner
6cbbb55967 Emit long comparison against -1 better. Instead of this (x86):
test2:
        movl 8(%esp), %eax
        notl %eax
        movl 4(%esp), %ecx
        notl %ecx
        orl %eax, %ecx
        cmpl $0, %ecx
        sete %al
        movzbl %al, %eax
        ret

or this (PPC):

_test2:
        nor r2, r4, r4
        nor r3, r3, r3
        or r2, r2, r3
        cntlzw r2, r2
        srwi r3, r2, 5
        blr

Emit this:

test2:
        movl 8(%esp), %eax
        andl 4(%esp), %eax
        cmpl $-1, %eax
        sete %al
        movzbl %al, %eax
        ret

or this:

_test2:
.LBB_test2_0:   ;
        and r2, r4, r3
        cmpwi cr0, r2, -1
        li r3, 1
        li r2, 0
        beq .LBB_test2_2        ;
.LBB_test2_1:   ;
        or r3, r2, r2
.LBB_test2_2:   ;
        blr

it seems like the PPC isel could do better for R32 == -1 case.

llvm-svn: 21242
2005-04-12 01:46:05 +00:00
Chris Lattner
37534d43d0 canonicalize x <u 1 -> x == 0. On this testcase:
unsigned long long g;
unsigned long foo (unsigned long a) {
  return (a >= g) ? 1 : 0;
}

It changes the ppc code from:

_foo:
.LBB_foo_0:     ; entry
        mflr r11
        stw r11, 8(r1)
        bl "L00000$pb"
"L00000$pb":
        mflr r2
        addis r2, r2, ha16(L_g$non_lazy_ptr-"L00000$pb")
        lwz r2, lo16(L_g$non_lazy_ptr-"L00000$pb")(r2)
        lwz r4, 0(r2)
        lwz r2, 4(r2)
        cmplw cr0, r3, r2
        li r2, 1
        li r3, 0
        bge .LBB_foo_2  ; entry
.LBB_foo_1:     ; entry
        or r2, r3, r3
.LBB_foo_2:     ; entry
        cmplwi cr0, r4, 1
        li r3, 1
        li r5, 0
        blt .LBB_foo_4  ; entry
.LBB_foo_3:     ; entry
        or r3, r5, r5
.LBB_foo_4:     ; entry
        cmpwi cr0, r4, 0
        beq .LBB_foo_6  ; entry
.LBB_foo_5:     ; entry
        or r2, r3, r3
.LBB_foo_6:     ; entry
        rlwinm r3, r2, 0, 31, 31
        lwz r11, 8(r1)
        mtlr r11
        blr


to:

_foo:
.LBB_foo_0:     ; entry
        mflr r11
        stw r11, 8(r1)
        bl "L00000$pb"
"L00000$pb":
        mflr r2
        addis r2, r2, ha16(L_g$non_lazy_ptr-"L00000$pb")
        lwz r2, lo16(L_g$non_lazy_ptr-"L00000$pb")(r2)
        lwz r4, 0(r2)
        lwz r2, 4(r2)
        cmplw cr0, r3, r2
        li r2, 1
        li r3, 0
        bge .LBB_foo_2  ; entry
.LBB_foo_1:     ; entry
        or r2, r3, r3
.LBB_foo_2:     ; entry
        cntlzw r3, r4
        srwi r3, r3, 5
        cmpwi cr0, r4, 0
        beq .LBB_foo_4  ; entry
.LBB_foo_3:     ; entry
        or r2, r3, r3
.LBB_foo_4:     ; entry
        rlwinm r3, r2, 0, 31, 31
        lwz r11, 8(r1)
        mtlr r11
        blr

llvm-svn: 21241
2005-04-12 00:28:49 +00:00
Chris Lattner
7f0f0854fa Teach the dag mechanism that this:
long long test2(unsigned A, unsigned B) {
        return ((unsigned long long)A << 32) + B;
}

is equivalent to this:

long long test1(unsigned A, unsigned B) {
        return ((unsigned long long)A << 32) | B;
}

Now they are both codegen'd to this on ppc:

_test2:
        blr

or this on x86:

test2:
        movl 4(%esp), %edx
        movl 8(%esp), %eax
        ret

llvm-svn: 21231
2005-04-11 20:29:59 +00:00
Chris Lattner
71f3d4ce57 Fix expansion of shifts by exactly NVT bits on arch's (like X86) that have
masking shifts.

This fixes the miscompilation of this:

long long test1(unsigned A, unsigned B) {
        return ((unsigned long long)A << 32) | B;
}

into this:

test1:
        movl 4(%esp), %edx
        movl %edx, %eax
        orl 8(%esp), %eax
        ret

allowing us to generate this instead:

test1:
        movl 4(%esp), %edx
        movl 8(%esp), %eax
        ret

llvm-svn: 21230
2005-04-11 20:08:52 +00:00
Nate Begeman
32163963cb Fix libcall code to not pass a NULL Chain to LowerCallTo
Fix libcall code to not crash or assert looking for an ADJCALLSTACKUP node
  when it is known that there is no ADJCALLSTACKDOWN to match.
Expand i64 multiply when ISD::MULHU is legal for the target.

llvm-svn: 21214
2005-04-11 03:01:51 +00:00
Chris Lattner
4f26677dc9 Don't bother sign/zext_inreg'ing the result of an and operation if we know
the result does change as a result of the extend.

This improves codegen for Alpha on this testcase:

int %a(ushort* %i) {
        %tmp.1 = load ushort* %i
        %tmp.2 = cast ushort %tmp.1 to int
        %tmp.4 = and int %tmp.2, 1
        ret int %tmp.4
}

Generating:

a:
        ldgp $29, 0($27)
        ldwu $0,0($16)
        and $0,1,$0
        ret $31,($26),1

instead of:

a:
        ldgp $29, 0($27)
        ldwu $0,0($16)
        and $0,1,$0
        addl $0,0,$0
        ret $31,($26),1

btw, alpha really should switch to livein/outs for args :)

llvm-svn: 21213
2005-04-10 23:37:16 +00:00
Chris Lattner
c730ea00e2 Teach legalize to deal with targets that don't support some SEXTLOAD/ZEXTLOADs
llvm-svn: 21212
2005-04-10 22:54:25 +00:00
Chris Lattner
1b9e1e26cb don't zextload fp values!
llvm-svn: 21209
2005-04-10 17:40:35 +00:00
Chris Lattner
0c089eae41 Until we have a dag combiner, promote using zextload's instead of extloads.
This gives the optimizer a bit of information about the top-part of the
value.

llvm-svn: 21205
2005-04-10 04:33:47 +00:00
Chris Lattner
9d13d0b958 Fold zext_inreg(zextload), likewise for sext's
llvm-svn: 21204
2005-04-10 04:33:08 +00:00
Chris Lattner
9c8fe594e5 add a simple xform
llvm-svn: 21203
2005-04-10 04:04:49 +00:00
Chris Lattner
b3518a838c Fix a thinko. If the operand is promoted, pass the promoted value into
the new zero extend, not the original operand.  This fixes cast bool -> long
on ppc.

Add an unrelated fixme

llvm-svn: 21196
2005-04-10 01:13:15 +00:00
Chris Lattner
034716de24 add a little peephole optimization. This allows us to codegen:
int a(short i) {
        return i & 1;
}

as

_a:
        andi. r3, r3, 1
        blr

instead of:

_a:
        rlwinm r2, r3, 0, 16, 31
        andi. r3, r2, 1
        blr

on ppc.  It should also help the other risc targets.

llvm-svn: 21189
2005-04-09 21:43:54 +00:00
Chris Lattner
77ab286605 there is no need to remove this instruction, linscan does it already as it
removes noop moves.

llvm-svn: 21183
2005-04-09 16:24:20 +00:00
Chris Lattner
f408e9a07b Adjust live intervals to support a livein set
llvm-svn: 21182
2005-04-09 16:17:50 +00:00
Chris Lattner
1a9c8fc64a Consider the livein/out set for a function, allowing targets to not have to
use ugly imp_def/imp_uses for arguments and return values.

llvm-svn: 21180
2005-04-09 15:23:25 +00:00
Chris Lattner
afa0001d54 recognize some patterns as fabs operations, so that fabs at the source level
is deconstructed then reconstructed here.  This catches 19 fabs's in 177.mesa
9 in 168.wupwise, 5 in 171.swim, 3 in 172.mgrid, and 14 in 173.applu out of
specfp2000.

This allows the X86 code generator to make MUCH better code than before for
each of these and saves one instr on ppc.

This depends on the previous CFE patch to expose these correctly.

llvm-svn: 21171
2005-04-09 05:15:53 +00:00
Chris Lattner
8e6eafa8e1 Emit BRCONDTWOWAY when possible.
llvm-svn: 21167
2005-04-09 03:30:29 +00:00
Chris Lattner
55b73bda6c Legalize BRCONDTWOWAY into a BRCOND/BR pair if a target doesn't support it.
llvm-svn: 21166
2005-04-09 03:30:19 +00:00
Chris Lattner
da902bdf1b print and fold BRCONDTWOWAY correctly
llvm-svn: 21165
2005-04-09 03:27:28 +00:00
Chris Lattner
31170cd2ec canonicalize a bunch of operations involving fneg
llvm-svn: 21160
2005-04-09 03:02:46 +00:00
Chris Lattner
9a56ef5693 If a target zero or sign extends the result of its setcc, allow folding of
this into sign/zero extension instructions later.

On PPC, for example, this testcase:

%G = external global sbyte
implementation
void %test(int %X, int %Y) {
  %C = setlt int %X, %Y
  %D = cast bool %C to sbyte
  store sbyte %D, sbyte* %G
  ret void
}

Now codegens to:

        cmpw cr0, r3, r4
        li r3, 1
        li r4, 0
        blt .LBB_test_2 ;
.LBB_test_1:    ;
        or r3, r4, r4
.LBB_test_2:    ;
        addis r2, r2, ha16(L_G$non_lazy_ptr-"L00000$pb")
        lwz r2, lo16(L_G$non_lazy_ptr-"L00000$pb")(r2)
        stb r3, 0(r2)

instead of:

        cmpw cr0, r3, r4
        li r3, 1
        li r4, 0
        blt .LBB_test_2 ;
.LBB_test_1:    ;
        or r3, r4, r4
.LBB_test_2:    ;
***     rlwinm r3, r3, 0, 31, 31
        addis r2, r2, ha16(L_G$non_lazy_ptr-"L00000$pb")
        lwz r2, lo16(L_G$non_lazy_ptr-"L00000$pb")(r2)
        stb r3, 0(r2)

llvm-svn: 21148
2005-04-07 19:43:53 +00:00
Chris Lattner
bbe0e9e9db Remove somethign I had for testing
llvm-svn: 21144
2005-04-07 18:58:54 +00:00
Chris Lattner
ee836c7b32 This patch does two things. First, it canonicalizes 'X >= C' -> 'X > C-1'
(likewise for <= >=u >=u).

Second, it implements a special case hack to turn 'X gtu SINTMAX' -> 'X lt 0'

On powerpc, for example, this changes this:

        lis r2, 32767
        ori r2, r2, 65535
        cmplw cr0, r3, r2
        bgt .LBB_test_2

into:

        cmpwi cr0, r3, 0
        blt .LBB_test_2

llvm-svn: 21142
2005-04-07 18:14:58 +00:00
Chris Lattner
22bbc2351e Fix a really scary bug that Nate found where we weren't deleting the right
elements auto of the autoCSE maps.

llvm-svn: 21128
2005-04-07 00:30:13 +00:00
Nate Begeman
7898fc8cc8 Teach ExpandShift how to handle shifts by a constant. This allows targets
like PowerPC to codegen long shifts in many fewer instructions.

llvm-svn: 21122
2005-04-06 21:13:14 +00:00
Nate Begeman
4457b4994c Expand SREM and UREM for targets that claim not to have them, like PowerPC
llvm-svn: 21103
2005-04-06 00:23:54 +00:00
Nate Begeman
12af81407b Add MULHU and MULHS nodes for the high part of an (un)signed 32x32=64b
multiply.

llvm-svn: 21102
2005-04-05 22:36:56 +00:00
Chris Lattner
f81edb57b6 Make sure to notice that explicit physregs are used in the function
llvm-svn: 21084
2005-04-04 21:35:34 +00:00
Nate Begeman
a8be5b976f Handle expanding arguments to ISD::TRUNCATE. This happens on PowerPC when
you have something like i16 = truncate i64.  This fixes Regression/C/casts

llvm-svn: 21073
2005-04-04 00:57:08 +00:00
Chris Lattner
a8bccb73cd Fix sign_extend and zero_extend of promoted value types to expanded value
types.  This occurs when casting short to long on PPC for example.

llvm-svn: 21072
2005-04-03 23:41:52 +00:00
Duraid Madina
3a10f491f0 add support for prefix/suffix strings to go around GlobalValue(s)
(which may or be function pointers) in the asmprinter. For the moment,
this changes nothing, except the IA64 backend which can use this to write:

  data8.ua  @fptr(blah__blah__mangled_function_name)

  (by setting FunctionAddrPrefix/Suffix to "@fptr(" / ")")

llvm-svn: 21024
2005-04-02 12:21:51 +00:00
Chris Lattner
1a15f58a92 transform fabs/fabsf calls into FABS nodes.
llvm-svn: 21014
2005-04-02 05:26:53 +00:00
Chris Lattner
206a694a7b Expand fabs into fneg
llvm-svn: 21013
2005-04-02 05:26:37 +00:00
Chris Lattner
fcf6ee0a8b Turn -0.0 - X -> fneg
llvm-svn: 21011
2005-04-02 05:04:50 +00:00
Chris Lattner
8644181cd6 Several changes mixed up here. First when legalizing a DAG with pcmarker,
dont' regen the whole dag if unneccesary.  Second, fix and ugly bug with
the _PARTS nodes that caused legalize to produce multiples of them.
Finally, implement initial support for FABS and FNEG.  Currently FNEG is
the only one to be trusted though.

llvm-svn: 21009
2005-04-02 05:00:07 +00:00
Chris Lattner
c8f36868e6 print fneg/fabs
llvm-svn: 21008
2005-04-02 04:58:41 +00:00
Chris Lattner
8be5696874 fix some bugs in the implementation of SHL_PARTS and friends.
llvm-svn: 21004
2005-04-02 04:00:59 +00:00
Chris Lattner
964ab5d408 Turn expanded shift operations into (e.g.) SHL_PARTS if the target supports it.
llvm-svn: 21002
2005-04-02 03:38:53 +00:00
Chris Lattner
33ca1ce8e0 Print some new nodes
llvm-svn: 21001
2005-04-02 03:30:42 +00:00
Chris Lattner
20027c6b30 Fix a bug when inserting a libcall into a function with no other calls.
llvm-svn: 20999
2005-04-02 03:22:40 +00:00
Nate Begeman
893f5729ce Fix a warning about an unhandled switch case
llvm-svn: 20994
2005-04-02 00:41:14 +00:00
Nate Begeman
4034852ba9 Add ISD::UNDEF node
Teach the SelectionDAG code how to expand and promote it
Have PPC32 LowerCallTo generate ISD::UNDEF for int arg regs used up by fp
  arguments, but not shadowing their value.  This allows us to do the right
  thing with both fixed and vararg floating point arguments.

llvm-svn: 20988
2005-04-01 22:34:39 +00:00
Chris Lattner
c81870e4e6 print the machine CFG in the -print-machineinstrs dump
llvm-svn: 20976
2005-04-01 06:48:38 +00:00
Andrew Lenharth
7db3834ecf PCMarker support for DAG and Alpha
llvm-svn: 20965
2005-03-31 21:24:06 +00:00
Chris Lattner
abb59a3c21 Instead of setting up the CFG edges at selectiondag construction time, set
them up after the code has been emitted.  This allows targets to select one
mbb as multiple mbb's as needed.

llvm-svn: 20937
2005-03-30 01:10:47 +00:00
Chris Lattner
02a4d3bd9b Fix a bug that andrew noticed where we do not correctly sign/zero extend
returned integer values all of the way to 64-bits (we only did it to 32-bits
leaving the top bits undefined).  This causes problems for targets like alpha
whose ABI's define the top bits too.

llvm-svn: 20926
2005-03-29 19:09:56 +00:00
Chris Lattner
185e7e2c22 implement legalization of build_pair for nate
llvm-svn: 20901
2005-03-28 22:03:13 +00:00
Andrew Lenharth
c287cd1e4e First step in adding pcmarker intrinsic. Second step (soon) is adding backend support.
llvm-svn: 20900
2005-03-28 20:05:49 +00:00
Nate Begeman
f821401825 Change interface to LowerCallTo to take a boolean isVarArg argument.
llvm-svn: 20842
2005-03-26 01:29:23 +00:00
Chris Lattner
c9a3ea81bf Fix the missing symbols problem Bill was hitting. Patch contributed by
Bill Wendling!!

llvm-svn: 20649
2005-03-17 15:38:16 +00:00
Chris Lattner
4b688a1c70 This mega patch converts us from using Function::a{iterator|begin|end} to
using Function::arg_{iterator|begin|end}.  Likewise Module::g* -> Module::global_*.

This patch is contributed by Gabor Greif, thanks!

llvm-svn: 20597
2005-03-15 04:54:21 +00:00
Chris Lattner
4422ffd421 I didn't mean to check this in. :(
llvm-svn: 20555
2005-03-10 20:59:51 +00:00
Chris Lattner
fa9e43b38c Fix a bug where we would incorrectly do a sign ext instead of a zero ext
because we were checking the wrong thing.  Thanks to andrew for pointing
this out!

llvm-svn: 20554
2005-03-10 20:55:51 +00:00
Chris Lattner
ea2e61b83a Allow the live interval analysis pass to be a bit more aggressive about
numbering values in live ranges for physical registers.

The alpha backend currently generates code that looks like this:

  vreg = preg
...
  preg = vreg
  use preg
...
  preg = vreg
  use preg

etc.  Because vreg contains the value of preg coming in, each of the
copies back into preg contain that initial value as well.

In the case of the Alpha, this allows this testcase:

void "foo"(int %blah) {
        store int 5, int *%MyVar
        store int 12, int* %MyVar2
        ret void
}

to compile to:

foo:
        ldgp $29, 0($27)
        ldiq $0,5
        stl $0,MyVar
        ldiq $0,12
        stl $0,MyVar2
        ret $31,($26),1

instead of:

foo:
        ldgp $29, 0($27)
        bis $29,$29,$0
        ldiq $1,5
        bis $0,$0,$29
        stl $1,MyVar
        ldiq $1,12
        bis $0,$0,$29
        stl $1,MyVar2
        ret $31,($26),1

This does not seem to have any noticable effect on X86 code.

This fixes PR535.

llvm-svn: 20536
2005-03-09 23:05:19 +00:00
Chris Lattner
e0d0c64c8a constant fold FP_ROUND_INREG, ZERO_EXTEND_INREG, and SIGN_EXTEND_INREG
This allows the alpha backend to compile:

bool %test(uint %P) {
        %c = seteq uint %P, 0
        ret bool %c
}

into:

test:
        ldgp $29, 0($27)
        ZAP $16,240,$0
        CMPEQ $0,0,$0
        AND $0,1,$0
        ret $31,($26),1

instead of:

test:
        ldgp $29, 0($27)
        ZAP $16,240,$0
        ldiq $1,0
        ZAP $1,240,$1
        CMPEQ $0,$1,$0
        AND $0,1,$0
        ret $31,($26),1

... and fixes PR534.

llvm-svn: 20534
2005-03-09 18:37:12 +00:00
Alkis Evlogimenos
422af394b6 Lower llvm.isunordered(a, b) into a != a | b != b.
llvm-svn: 20382
2005-03-01 02:07:58 +00:00
Chris Lattner
9ccfcab3db Lower prefetch to a noop, patch contributed by Justin Wick!
llvm-svn: 20375
2005-02-28 19:27:23 +00:00
Chris Lattner
4ba91f5168 Fix a bug in the 'store fpimm, ptr' -> 'store intimm, ptr' handling code.
Changing 'op' here caused us to not enter the store into a map, causing
reemission of the code!!  In practice, a simple loop like this:

no_exit:                ; preds = %no_exit, %entry
        %indvar = phi uint [ %indvar.next, %no_exit ], [ 0, %entry ]            ; <uint> [#uses=3]
        %tmp.4 = getelementptr "complex long double"* %P, uint %indvar, uint 0          ; <double*> [#uses=1]
        store double 0.000000e+00, double* %tmp.4
        %indvar.next = add uint %indvar, 1              ; <uint> [#uses=2]
        %exitcond = seteq uint %indvar.next, %N         ; <bool> [#uses=1]
        br bool %exitcond, label %return, label %no_exit

was being code gen'd to:

.LBBtest_1:     # no_exit
        movl %edx, %esi
        shll $4, %esi
        movl $0, 4(%eax,%esi)
        movl $0, (%eax,%esi)
        incl %edx
        movl $0, (%eax,%esi)
        movl $0, 4(%eax,%esi)
        cmpl %ecx, %edx
        jne .LBBtest_1  # no_exit

Note that we are doing 4 32-bit stores instead of 2.  Now we generate:

.LBBtest_1:     # no_exit
        movl %edx, %esi
        incl %esi
        shll $4, %edx
        movl $0, (%eax,%edx)
        movl $0, 4(%eax,%edx)
        cmpl %ecx, %esi
        movl %esi, %edx
        jne .LBBtest_1  # no_exit

This is much happier, though it would be even better if the increment of ESI
was scheduled after the compare :-/

llvm-svn: 20265
2005-02-22 07:23:39 +00:00
Misha Brukman
381d248dc6 Fix compilation errors with VS 2005, patch by Aaron Gray.
llvm-svn: 20231
2005-02-17 21:39:27 +00:00
Chris Lattner
89105cec43 Don't rely on doubles comparing identical to each other, which doesn't work
for 0.0 and -0.0.

llvm-svn: 20230
2005-02-17 20:17:32 +00:00
Chris Lattner
0de03b45ab Don't sink argument loads into loops or other bad places. This disables folding of argument loads with instructions that are not in the entry block.
llvm-svn: 20228
2005-02-17 19:40:32 +00:00
Chris Lattner
43b14db4d9 Print GEP offsets as signed values instead of unsigned values. On X86, this
prints:

getelementptr (int* %A, int -1)

as: "(A) - 4" instead of "(A) + 18446744073709551612", which makes the
assembler much happier.

This fixes test/Regression/CodeGen/X86/2005-02-14-IllegalAssembler.ll,
and Benchmarks/Prolangs-C/cdecl with LLC on X86.

llvm-svn: 20183
2005-02-14 21:40:26 +00:00
Chris Lattner
c808a143af Fix a case where were incorrectly compiled cast from short to int on 64-bit
targets.

llvm-svn: 20030
2005-02-04 18:39:19 +00:00
Andrew Lenharth
d2d24eee40 fix constant pointer outputing on 64 bit machines
llvm-svn: 20026
2005-02-04 13:47:16 +00:00
Chris Lattner
c3f476e9c2 Fix yet another memset issue.
llvm-svn: 19986
2005-02-02 03:44:41 +00:00
Chris Lattner
9cf60e3459 Fix some bugs andrew noticed legalizing memset for alpha
llvm-svn: 19969
2005-02-01 18:38:28 +00:00
Chris Lattner
382abe80a0 Improve conformance with the Misha spelling benchmark suite
llvm-svn: 19930
2005-01-30 00:09:23 +00:00
Chris Lattner
8200976176 adjust to ilist changes.
llvm-svn: 19924
2005-01-29 18:41:25 +00:00
Chris Lattner
2755fb4171 Alpha doesn't have a native f32 extload instruction.
llvm-svn: 19880
2005-01-28 22:58:25 +00:00
Chris Lattner
da7b5277c1 implement legalization of truncates whose results and sources need to be
truncated, e.g. (truncate:i8 something:i16) on a 32 or 64-bit RISC.

llvm-svn: 19879
2005-01-28 22:52:50 +00:00
Chris Lattner
89cac82479 Get alpha working with memset/memcpy/memmove
llvm-svn: 19878
2005-01-28 22:29:18 +00:00
Chris Lattner
4134789c8f CopyFromReg produces two values. Make sure that we remember that both are
legalized, and actually return the correct result when we legalize the chain first.

llvm-svn: 19866
2005-01-28 06:27:38 +00:00
Chris Lattner
849899e193 Silence optimized warnings.
llvm-svn: 19797
2005-01-23 23:19:44 +00:00
Chris Lattner
65fc8007cd Simplify/speedup the PEI by not having to scan for uses of the callee saved
registers.  This information is computed directly by the register allocator
now.

llvm-svn: 19795
2005-01-23 23:13:12 +00:00
Chris Lattner
556679b89d Update physregsused info.
llvm-svn: 19793
2005-01-23 22:55:45 +00:00
Chris Lattner
cc22be2981 Update this pass to set PhysRegsUsed info in MachineFunction.
llvm-svn: 19792
2005-01-23 22:51:56 +00:00
Chris Lattner
964297fc32 Update these register allocators to set the PhysRegUsed info in MachineFunction.
llvm-svn: 19791
2005-01-23 22:45:13 +00:00
Chris Lattner
6a6d5cf9eb Add support for the PhysRegsUsed array.
llvm-svn: 19789
2005-01-23 22:13:58 +00:00
Chris Lattner
c187b917f2 Speed this up a bit by making ModifiedRegs a vector<char> not vector<bool>
llvm-svn: 19787
2005-01-23 21:45:01 +00:00
Chris Lattner
b3a5fc3ec0 Adjust to changes in SelectionDAG interfaces
The first half of correct chain insertion for libcalls. This is not enough
to fix Fhourstones yet though.

llvm-svn: 19781
2005-01-23 04:42:50 +00:00
Chris Lattner
3165569ba9 Remove the 3 HACK HACK HACKs I put in before, fixing them properly with
the new TLI that is available.

Implement support for handling out of range shifts.  This allows us to
compile this code (a 64-bit rotate):

unsigned long long f3(unsigned long long x) {
  return (x << 32) | (x >> (64-32));
}

into this:

f3:
        mov %EDX, DWORD PTR [%ESP + 4]
        mov %EAX, DWORD PTR [%ESP + 8]
        ret

GCC produces this:

$ gcc t.c -masm=intel -O3 -S -o - -fomit-frame-pointer
..
f3:
        push    %ebx
        mov     %ebx, DWORD PTR [%esp+12]
        mov     %ecx, DWORD PTR [%esp+8]
        mov     %eax, %ebx
        mov     %edx, %ecx
        pop     %ebx
        ret

The Simple ISEL produces (eww gross):

f3:
        sub %ESP, 4
        mov DWORD PTR [%ESP], %ESI
        mov %EDX, DWORD PTR [%ESP + 8]
        mov %ECX, DWORD PTR [%ESP + 12]
        mov %EAX, 0
        mov %ESI, 0
        or %EAX, %ECX
        or %EDX, %ESI
        mov %ESI, DWORD PTR [%ESP]
        add %ESP, 4
        ret

llvm-svn: 19780
2005-01-23 04:39:44 +00:00
Chris Lattner
4c997d281c Adjust to changes in SelectionDAG interface.
llvm-svn: 19779
2005-01-23 04:36:26 +00:00
Chris Lattner
63ec3c402b Get this to work for 64-bit systems.
llvm-svn: 19763
2005-01-22 23:04:37 +00:00
Chris Lattner
29d6389d78 Implicitly defined registers can clobber callee saved registers too!
This fixes the return-address-not-being-saved problem in the Alpha backend.

llvm-svn: 19741
2005-01-22 00:49:16 +00:00
Chris Lattner
97f35a7a07 More bugfixes for IA64 shifts.
llvm-svn: 19739
2005-01-22 00:33:03 +00:00
Chris Lattner
67deea9d05 Fix problems with non-x86 targets.
llvm-svn: 19738
2005-01-22 00:31:52 +00:00
Chris Lattner
42e239ed58 Add a nasty hack to fix Alpha/IA64 multiplies by a power of two.
llvm-svn: 19737
2005-01-22 00:20:42 +00:00
Chris Lattner
e724100870 Remove unneeded line.
llvm-svn: 19736
2005-01-21 23:43:12 +00:00
Chris Lattner
a974e215a5 test commit
llvm-svn: 19735
2005-01-21 23:38:56 +00:00
Chris Lattner
392ddf430b Unary token factor nodes are unneeded.
llvm-svn: 19727
2005-01-21 18:01:22 +00:00
Chris Lattner
07c35617d5 Refactor libcall code a bit. Initial implementation of expanding int -> FP
operations for 64-bit integers.

llvm-svn: 19724
2005-01-21 06:05:23 +00:00
Chris Lattner
6258ec2e1d Simplify the shift-expansion code.
llvm-svn: 19721
2005-01-20 20:29:23 +00:00
Chris Lattner
c95c7c90c9 Expand add/sub into ADD_PARTS/SUB_PARTS instead of a non-existant libcall.
llvm-svn: 19715
2005-01-20 18:52:28 +00:00
Chris Lattner
4086a7a803 implement add_parts/sub_parts.
llvm-svn: 19714
2005-01-20 18:50:55 +00:00
Chris Lattner
e7ce5d0e4c Add missing entry.
llvm-svn: 19712
2005-01-20 17:32:28 +00:00
Chris Lattner
e5212a16a2 Support targets that do not use i8 shift amounts.
llvm-svn: 19707
2005-01-19 22:31:21 +00:00
Chris Lattner
0e7435bc5b Add an assertion that would have made more sense to duraid
llvm-svn: 19704
2005-01-19 21:32:07 +00:00
Chris Lattner
c662697319 Add support for targets that pass args in registers to calls.
llvm-svn: 19703
2005-01-19 20:24:35 +00:00
Chris Lattner
277ac2be70 Fold single use token factor nodes into other token factor nodes.
llvm-svn: 19701
2005-01-19 19:10:54 +00:00
Chris Lattner
85e0771f79 Realize the individual pieces of an expanded copytoreg/store/load are
independent of each other.

llvm-svn: 19700
2005-01-19 18:02:17 +00:00
Chris Lattner
027c97e93e Know some identities about tokenfactor nodes.
llvm-svn: 19699
2005-01-19 18:01:40 +00:00
Chris Lattner
7114e8a527 Know some simple identities. This improves codegen for (1LL << N).
llvm-svn: 19698
2005-01-19 17:29:49 +00:00
Chris Lattner
e97ed92617 Just in case, handle something that is both a use and a def.
llvm-svn: 19696
2005-01-19 17:11:51 +00:00
Chris Lattner
2cb11bd2b9 When an instruction moves, make sure to update the VarInfo::Kills list as
well as all of teh other stuff in livevar. This fixes the compiler crash
on fourinarow last night.

llvm-svn: 19695
2005-01-19 17:09:15 +00:00
Chris Lattner
408325ffdf Use the TargetInstrInfo::commuteInstruction method to commute instructions
instead of doing it manually.

llvm-svn: 19685
2005-01-19 07:08:42 +00:00
Chris Lattner
743a36c818 Implement a way of expanding shifts. This applies to targets that offer
select operations or to shifts that are by a constant.  This automatically
implements (with no special code) all of the special cases for shift by 32,
shift by < 32 and shift by > 32.

llvm-svn: 19679
2005-01-19 04:19:40 +00:00
Chris Lattner
0df1935505 Zero is cheaper than sign extend.
llvm-svn: 19675
2005-01-18 21:57:59 +00:00
Chris Lattner
4360871e16 Fix some fixmes (promoting bools for select and brcond), fix promotion
of zero and sign extends.

llvm-svn: 19671
2005-01-18 19:27:06 +00:00
Chris Lattner
eea485de1f Keep track of the retval type as well.
llvm-svn: 19670
2005-01-18 19:26:36 +00:00
Chris Lattner
ff086f3016 Teach legalize to promote copy(from|to)reg, instead of making the isel pass
do it.  This results in better code on X86 for floats (because if strict
precision is not required, we can elide some more expensive double -> float
conversions like the old isel did), and allows other targets to emit
CopyFromRegs that are not legal for arguments.

llvm-svn: 19668
2005-01-18 17:54:55 +00:00
Chris Lattner
891aa537f7 Teach legalize to promote SetCC results.
llvm-svn: 19657
2005-01-18 02:59:52 +00:00
Chris Lattner
95307053ec Allow setcc operations to have nonbool types.
llvm-svn: 19656
2005-01-18 02:52:03 +00:00
Chris Lattner
906541da95 Fix the completely broken FP constant folds for setcc's.
llvm-svn: 19651
2005-01-18 02:11:55 +00:00
Chris Lattner
c0aca0d13c Non-volatile loads can be freely reordered against each other. This fixes
X86/reg-pressure.ll again, and allows us to do nice things in other cases.
For example, we now codegen this sort of thing:

int %loadload(int *%X, int* %Y) {
  %Z = load int* %Y
  %Y = load int* %X      ;; load between %Z and store
  %Q = add int %Z, 1
  store int %Q, int* %Y
  ret int %Y
}

Into this:

loadload:
        mov %EAX, DWORD PTR [%ESP + 4]
        mov %EAX, DWORD PTR [%EAX]
        mov %ECX, DWORD PTR [%ESP + 8]
        inc DWORD PTR [%ECX]
        ret

where we weren't able to form the 'inc [mem]' before.  This also lets the
instruction selector emit loads in any order it wants to, which can be good
for register pressure as well.

llvm-svn: 19644
2005-01-17 22:19:26 +00:00
Chris Lattner
49291c4d96 Don't call SelectionDAG.getRoot() directly, go through a forwarding method.
llvm-svn: 19642
2005-01-17 19:43:36 +00:00
Chris Lattner
88bbcfc893 Implement a target independent optimization to codegen arguments only into
the basic block that uses them if possible.  This is a big win on X86, as it
lets us fold the argument loads into instructions and reduce register pressure
(by not loading all of the arguments in the entry block).

For this (contrived to show the optimization) testcase:

int %argtest(int %A, int %B) {
        %X = sub int 12345, %A
        br label %L
L:
        %Y = add int %X, %B
        ret int %Y
}

we used to produce:

argtest:
        mov %ECX, DWORD PTR [%ESP + 4]
        mov %EAX, 12345
        sub %EAX, %ECX
        mov %EDX, DWORD PTR [%ESP + 8]
.LBBargtest_1:  # L
        add %EAX, %EDX
        ret


now we produce:

argtest:
        mov %EAX, 12345
        sub %EAX, DWORD PTR [%ESP + 4]
.LBBargtest_1:  # L
        add %EAX, DWORD PTR [%ESP + 8]
        ret

This also fixes the FIXME in the code.

BTW, this occurs in real code.  164.gzip shrinks from 8623 to 8608 lines of
.s file.  The stack frame in huft_build shrinks from 1644->1628 bytes,
inflate_codes shrinks from 116->108 bytes, and inflate_block from 2620->2612,
due to fewer spills.

Take that alkis. :-)

llvm-svn: 19639
2005-01-17 17:55:19 +00:00
Chris Lattner
49a1f3a109 Refactor code into a new method.
llvm-svn: 19635
2005-01-17 17:15:02 +00:00
Chris Lattner
ec55e3e529 Implement legalize of call nodes.
llvm-svn: 19617
2005-01-16 19:46:48 +00:00
Chris Lattner
0eca430af1 Revamp supported ops. Instead of just being supported or not, we now keep
track of how to deal with it, and provide the target with a hook that they
can use to legalize arbitrary operations in arbitrary ways.

Implement custom lowering for a couple of ops, implement promotion for select
operations (which x86 needs).

llvm-svn: 19613
2005-01-16 07:29:19 +00:00
Chris Lattner
835a5efef3 add method stub
llvm-svn: 19612
2005-01-16 07:28:41 +00:00
Chris Lattner
907534af24 Don't mash stuff together.
llvm-svn: 19611
2005-01-16 07:28:31 +00:00
Chris Lattner
0f4f239899 Implement some more missing promotions.
llvm-svn: 19606
2005-01-16 05:06:12 +00:00
Chris Lattner
742b77f9af Clarify assertion.
llvm-svn: 19597
2005-01-16 02:23:34 +00:00
Chris Lattner
4517b8af97 Add assertions.
llvm-svn: 19596
2005-01-16 02:23:22 +00:00
Chris Lattner
9f8589f4b3 Add support for promoted registers being live across blocks.
llvm-svn: 19595
2005-01-16 02:23:07 +00:00
Chris Lattner
01e2ce8a4c Move some information into the TargetLowering object.
llvm-svn: 19583
2005-01-16 01:11:45 +00:00
Chris Lattner
9762070e50 Use the new TLI method to get this.
llvm-svn: 19582
2005-01-16 01:11:19 +00:00
Chris Lattner
0777f84d53 legalize a bunch of operations that I missed.
llvm-svn: 19580
2005-01-16 00:38:00 +00:00
Chris Lattner
1de18d422e Add support for targets that require promotions.
llvm-svn: 19579
2005-01-16 00:37:38 +00:00
Chris Lattner
8c4c81d6b3 Fix some serious bugs in promotion.
llvm-svn: 19578
2005-01-16 00:17:42 +00:00
Chris Lattner
9785def2cd Eliminate unneeded extensions.
llvm-svn: 19577
2005-01-16 00:17:20 +00:00
Chris Lattner
df02c93d90 Implement promotion of a whole bunch more operators. I think that this is
basically everything.

llvm-svn: 19576
2005-01-15 22:16:26 +00:00
Chris Lattner
f3fd0c6a93 Print extra type for nodes with extra type info.
llvm-svn: 19575
2005-01-15 21:11:37 +00:00
Chris Lattner
1ab9009270 Add support for legalizing FP_ROUND_INREG, SIGN_EXTEND_INREG, and
ZERO_EXTEND_INREG for targets that don't support them.

llvm-svn: 19573
2005-01-15 07:15:18 +00:00
Chris Lattner
191ac9c589 Common code factored out.
llvm-svn: 19572
2005-01-15 07:14:32 +00:00
Chris Lattner
3b20db54f3 implement these methods.
llvm-svn: 19571
2005-01-15 06:52:40 +00:00
Chris Lattner
fdd07b4092 Add support for promoting ADD/MUL.
Add support for new SIGN_EXTEND_INREG, ZERO_EXTEND_INREG, and FP_ROUND_INREG operators.
Realize that if we do any promotions, we need to iterate SelectionDAG
construction.

llvm-svn: 19569
2005-01-15 06:18:18 +00:00
Chris Lattner
2f65e8798f Add new SIGN_EXTEND_INREG, ZERO_EXTEND_INREG, and FP_ROUND_INREG operators.
llvm-svn: 19568
2005-01-15 06:17:04 +00:00
Chris Lattner
94b8a3e50c Add intitial support for promoting some operators.
llvm-svn: 19565
2005-01-15 05:21:40 +00:00
Chris Lattner
2dfbc4fddd Adjust to CopyFromReg changes, implement deletion of truncating/extending
stores/loads.

llvm-svn: 19562
2005-01-14 22:38:01 +00:00
Chris Lattner
0974002024 Start implementing truncating stores and extending loads.
llvm-svn: 19559
2005-01-14 22:08:15 +00:00
Chris Lattner
2087f3c8e9 Improve compatibility with acc
llvm-svn: 19549
2005-01-14 15:54:24 +00:00
Chris Lattner
7a8788c9ac Add new ImplicitDef node, rename CopyRegSDNode class to RegSDNode.
llvm-svn: 19535
2005-01-13 20:50:02 +00:00
Chris Lattner
9cc534f2dc Don't forget the existing root.
llvm-svn: 19531
2005-01-13 19:53:14 +00:00
Chris Lattner
160fdb384b Codegen independent ops as being independent.
llvm-svn: 19528
2005-01-13 17:59:43 +00:00
Chris Lattner
37a5de6eb0 Legalize new node, add assertion.
llvm-svn: 19527
2005-01-13 17:59:25 +00:00
Chris Lattner
86b19c5605 Print new node.
llvm-svn: 19526
2005-01-13 17:59:10 +00:00
Chris Lattner
93cb0148f8 Do not fold (zero_ext (sign_ext V)) -> (sign_ext V), they are not the same.
This fixes llvm-test/SingleSource/Regression/C/casts.c

llvm-svn: 19519
2005-01-12 18:51:15 +00:00
Chris Lattner
e97b0e1358 New method
llvm-svn: 19517
2005-01-12 18:37:47 +00:00
Chris Lattner
1b3b24f116 Fix sign extend to long. When coming from sbyte, we used to generate:
movsbl 4(%esp), %eax
        movl %eax, %edx
        sarl $7, %edx

Now we generate:

        movsbl 4(%esp), %eax
        movl %eax, %edx
        sarl $31, %edx

Which is right.

llvm-svn: 19515
2005-01-12 18:19:52 +00:00
Reid Spencer
c8c50250a1 Shut up warnings with GCC 3.4.3 about uninitialized variables.
llvm-svn: 19512
2005-01-12 14:53:45 +00:00
Chris Lattner
e7945a2e2e Add an option to view the selection dags as they are generated.
llvm-svn: 19498
2005-01-12 03:41:21 +00:00
Chris Lattner
74fcfd5148 Print the value types in the nodes of the graph
llvm-svn: 19485
2005-01-11 22:21:04 +00:00
Chris Lattner
f588cdd51e add an assertion, avoid creating copyfromreg/copytoreg pairs that are the
same for PHI nodes.

llvm-svn: 19484
2005-01-11 22:03:46 +00:00
Chris Lattner
8de5a27681 Squelch optimized warning.
llvm-svn: 19475
2005-01-11 17:46:49 +00:00
Chris Lattner
963af6652b Teach legalize to lower MEMSET/MEMCPY/MEMMOVE operations if the target
does not support them.

llvm-svn: 19465
2005-01-11 05:57:22 +00:00
Chris Lattner
6b9082114f Print new operations.
llvm-svn: 19464
2005-01-11 05:57:01 +00:00
Chris Lattner
7cde8a2658 Turn memset/memcpy/memmove into the corresponding operations.
llvm-svn: 19463
2005-01-11 05:56:49 +00:00
Chris Lattner
2eacd11a86 shift X, 0 -> X
llvm-svn: 19453
2005-01-11 04:25:13 +00:00
Chris Lattner
07a3ade230 Print SelectionDAGs bottom up, include extra info in the node labels
llvm-svn: 19447
2005-01-11 00:34:33 +00:00
Chris Lattner
1c273d3a14 Add a marker for the graph root.
llvm-svn: 19445
2005-01-10 23:52:04 +00:00
Chris Lattner
daa052a97e Put the operation name in each node, put the function name on the graph.
llvm-svn: 19444
2005-01-10 23:26:00 +00:00
Chris Lattner
0307506841 Split out SDNode::getOperationName into its own method.
llvm-svn: 19443
2005-01-10 23:25:25 +00:00
Chris Lattner
8c13447254 Implement initial selectiondag printing support. This gets us a nice
graph with no labels! :)

llvm-svn: 19441
2005-01-10 23:08:40 +00:00
Chris Lattner
5433d8de29 Lower to the correct functions. This fixes FreeBench/fourinarow
llvm-svn: 19436
2005-01-10 21:02:37 +00:00
Chris Lattner
02236df007 Implement a couple of more simplifications. This lets us codegen:
int test2(int * P, int* Q, int A, int B) {
        return P+A == P;
}

into:

test2:
        movl 4(%esp), %eax
        movl 12(%esp), %eax
        shll $2, %eax
        cmpl $0, %eax
        sete %al
        movzbl %al, %eax
        ret

instead of:

test2:
        movl 4(%esp), %eax
        movl 12(%esp), %ecx
        leal (%eax,%ecx,4), %ecx
        cmpl %eax, %ecx
        sete %al
        movzbl %al, %eax
        ret

ICC is producing worse code:

test2:
        movl      4(%esp), %eax                                 #8.5
        movl      12(%esp), %edx                                #8.5
        lea       (%edx,%edx), %ecx                             #9.9
        addl      %ecx, %ecx                                    #9.9
        addl      %eax, %ecx                                    #9.9
        cmpl      %eax, %ecx                                    #9.16
        movl      $0, %eax                                      #9.16
        sete      %al                                           #9.16
        ret                                                     #9.16

as is GCC (looks like our old code):

test2:
        movl    4(%esp), %edx
        movl    12(%esp), %eax
        leal    (%edx,%eax,4), %ecx
        cmpl    %edx, %ecx
        sete    %al
        movzbl  %al, %eax
        ret

llvm-svn: 19430
2005-01-10 02:03:02 +00:00
Chris Lattner
8d09b03ed1 Fix incorrect constant folds, fixing Stepanov after the SHR patch.
llvm-svn: 19429
2005-01-10 01:16:03 +00:00
Chris Lattner
9d479d4a34 Constant fold shifts, turning this loop:
.LBB_Z5test0PdS__3:     # no_exit.1
        fldl data(,%eax,8)
        fldl 24(%esp)
        faddp %st(1)
        fstl 24(%esp)
        incl %eax
        movl $16000, %ecx
        sarl $3, %ecx
        cmpl %eax, %ecx
        fstpl 16(%esp)
        #FP_REG_KILL
        jg .LBB_Z5test0PdS__3   # no_exit.1

into:

.LBB_Z5test0PdS__3:     # no_exit.1
        fldl data(,%eax,8)
        fldl 24(%esp)
        faddp %st(1)
        fstl 24(%esp)
        incl %eax
        cmpl $2000, %eax
        fstpl 16(%esp)
        #FP_REG_KILL
        jl .LBB_Z5test0PdS__3   # no_exit.1

llvm-svn: 19427
2005-01-10 00:07:15 +00:00
Chris Lattner
59d7066da8 Add some folds for == and != comparisons. This allows us to
codegen this loop in stepanov:

no_exit.i:              ; preds = %entry, %no_exit.i, %then.i, %_Z5checkd.exit
        %i.0.0 = phi int [ 0, %entry ], [ %i.0.0, %no_exit.i ], [ %inc.0, %_Z5checkd.exit ], [ %inc.012, %then.i ]              ; <int> [#uses=3]
        %indvar = phi uint [ %indvar.next, %no_exit.i ], [ 0, %entry ], [ 0, %then.i ], [ 0, %_Z5checkd.exit ]          ; <uint> [#uses=3]
        %result_addr.i.0 = phi double [ %tmp.4.i.i, %no_exit.i ], [ 0.000000e+00, %entry ], [ 0.000000e+00, %then.i ], [ 0.000000e+00, %_Z5checkd.exit ]          ; <double> [#uses=1]
        %first_addr.0.i.2.rec = cast uint %indvar to int                ; <int> [#uses=1]
        %first_addr.0.i.2 = getelementptr [2000 x double]* %data, int 0, uint %indvar           ; <double*> [#uses=1]
        %inc.i.rec = add int %first_addr.0.i.2.rec, 1           ; <int> [#uses=1]
        %inc.i = getelementptr [2000 x double]* %data, int 0, int %inc.i.rec            ; <double*> [#uses=1]
        %tmp.3.i.i = load double* %first_addr.0.i.2             ; <double> [#uses=1]
        %tmp.4.i.i = add double %result_addr.i.0, %tmp.3.i.i            ; <double> [#uses=2]
        %tmp.2.i = seteq double* %inc.i, getelementptr ([2000 x double]* %data, int 0, int 2000)                ; <bool> [#uses=1]
        %indvar.next = add uint %indvar, 1              ; <uint> [#uses=1]
        br bool %tmp.2.i, label %_Z10accumulateIPddET0_T_S2_S1_.exit, label %no_exit.i

To this:

.LBB_Z4testIPddEvT_S1_T0__1:    # no_exit.i
        fldl data(,%eax,8)
        fldl 16(%esp)
        faddp %st(1)
        fstpl 16(%esp)
        incl %eax
        movl %eax, %ecx
        shll $3, %ecx
        cmpl $16000, %ecx
        #FP_REG_KILL
        jne .LBB_Z4testIPddEvT_S1_T0__1 # no_exit.i

instead of this:

.LBB_Z4testIPddEvT_S1_T0__1:    # no_exit.i
        fldl data(,%eax,8)
        fldl 16(%esp)
        faddp %st(1)
        fstpl 16(%esp)
        incl %eax
        leal data(,%eax,8), %ecx
        leal data+16000, %edx
        cmpl %edx, %ecx
        #FP_REG_KILL
        jne .LBB_Z4testIPddEvT_S1_T0__1 # no_exit.i

llvm-svn: 19425
2005-01-09 20:52:51 +00:00
Jeff Cohen
91dd6d2d20 Fix VC++ compilation error
llvm-svn: 19423
2005-01-09 20:41:56 +00:00
Chris Lattner
fa06762d0e Print the DAG out more like a DAG in nested format.
llvm-svn: 19422
2005-01-09 20:38:33 +00:00
Chris Lattner
e3b9f22967 Print out nodes sorted by their address to make it easier to find them in a list.
llvm-svn: 19421
2005-01-09 20:26:36 +00:00
Chris Lattner
82caa0dc2e Add a simple transformation. This allows us to compile one of the inner
loops in stepanov to this:

.LBB_Z5test0PdS__2:     # no_exit.1
        fldl data(,%eax,8)
        fldl 24(%esp)
        faddp %st(1)
        fstl 24(%esp)
        incl %eax
        cmpl $2000, %eax
        fstpl 16(%esp)
        #FP_REG_KILL
        jl .LBB_Z5test0PdS__2

instead of this:

.LBB_Z5test0PdS__2:     # no_exit.1
        fldl data(,%eax,8)
        fldl 24(%esp)
        faddp %st(1)
        fstl 24(%esp)
        incl %eax
        movl $data, %ecx
        movl %ecx, %edx
        addl $16000, %edx
        subl %ecx, %edx
        movl %edx, %ecx
        sarl $2, %ecx
        shrl $29, %ecx
        addl %ecx, %edx
        sarl $3, %edx
        cmpl %edx, %eax
        fstpl 16(%esp)
        #FP_REG_KILL
        jl .LBB_Z5test0PdS__2

The old instruction selector produced:

.LBB_Z5test0PdS__2:     # no_exit.1
        fldl 24(%esp)
        faddl data(,%eax,8)
        fstl 24(%esp)
        movl %eax, %ecx
        incl %ecx
        incl %eax
        leal data+16000, %edx
        movl $data, %edi
        subl %edi, %edx
        movl %edx, %edi
        sarl $2, %edi
        shrl $29, %edi
        addl %edi, %edx
        sarl $3, %edx
        cmpl %edx, %ecx
        fstpl 16(%esp)
        #FP_REG_KILL
        jl .LBB_Z5test0PdS__2   # no_exit.1

Which is even worse!

llvm-svn: 19419
2005-01-09 20:09:57 +00:00
Chris Lattner
d674d08230 Fix a bug legalizing call instructions (make sure to remember all result
values), and eliminate some switch statements.

llvm-svn: 19417
2005-01-09 19:43:23 +00:00
Chris Lattner
ac23355362 Fix a minor bug legalizing dynamic_stackalloc. This allows us to compile
std::__pad<wchar_t, std::char_traits<wchar_t> >::_S_pad(std::ios_base&, wchar_t, wchar_t*, wchar_t const*, int, int, bool)

from libstdc++

llvm-svn: 19416
2005-01-09 19:07:54 +00:00
Chris Lattner
b3e31c6def Teach legalize to deal with DYNAMIC_STACKALLOC (aka a dynamic llvm alloca)
llvm-svn: 19415
2005-01-09 19:03:49 +00:00
Chris Lattner
cc18c057cf Handle static alloca arguments to PHI nodes.
llvm-svn: 19409
2005-01-09 01:16:24 +00:00
Chris Lattner
3454e31bba Use new interfaces to correctly lower varargs and return/frame address intrinsics.
llvm-svn: 19407
2005-01-09 00:00:49 +00:00
Chris Lattner
aad3ca491d Add support for llvm.setjmp and longjmp. Only 3 SingleSource/UnitTests fail now.
llvm-svn: 19404
2005-01-08 22:48:57 +00:00
Chris Lattner
3b52b2f6c2 Tighten up assertions.
llvm-svn: 19397
2005-01-08 20:35:13 +00:00
Chris Lattner
c23687789e Silence VS warnings
llvm-svn: 19388
2005-01-08 19:59:10 +00:00
Chris Lattner
2dafaac5d1 Silence warnings from VS
llvm-svn: 19386
2005-01-08 19:55:00 +00:00
Chris Lattner
104064bf2c Silence VS warnings
llvm-svn: 19385
2005-01-08 19:53:50 +00:00
Chris Lattner
a58b3f48ef Silence VS warnings.
llvm-svn: 19384
2005-01-08 19:52:31 +00:00
Chris Lattner
38545e9952 Implement handling of most long operators through libcalls.
Fix a bug legalizing "ret (Val,Val)"

llvm-svn: 19375
2005-01-08 19:27:05 +00:00
Chris Lattner
60ef22ce82 Adjust to changes in LowerCAllTo interfaces
llvm-svn: 19374
2005-01-08 19:26:18 +00:00
Chris Lattner
fd84495692 Add support for FP->INT conversions and back.
llvm-svn: 19369
2005-01-08 08:08:56 +00:00
Chris Lattner
e759d984cf Implement the 'store FPIMM, Ptr' -> 'store INTIMM, Ptr' optimization for
all targets.

llvm-svn: 19366
2005-01-08 06:25:56 +00:00
Chris Lattner
e32ab4bd47 1ULL << 64 is undefined, don't do it.
llvm-svn: 19365
2005-01-08 06:24:30 +00:00
Chris Lattner
717236fcd3 Fix a pointer invalidation problem. This fixes Generic/badarg6.ll
llvm-svn: 19361
2005-01-07 23:32:00 +00:00
Chris Lattner
53173ba1d1 Fold conditional branches on constants away.
llvm-svn: 19360
2005-01-07 22:49:57 +00:00
Chris Lattner
8f55fae569 Fix a thinko in the reassociation code, fixing Generic/badlive.ll
llvm-svn: 19359
2005-01-07 22:44:09 +00:00
Chris Lattner
6f461f406e Add support for truncating integer casts from long.
llvm-svn: 19358
2005-01-07 22:37:48 +00:00
Chris Lattner
79ca9cdb7e Fix a bug in load expansion legalization and ret legalization. This fixes
CodeGen/Generic/select.ll:castconst.

llvm-svn: 19357
2005-01-07 22:28:47 +00:00
Chris Lattner
a834e96647 Legalize unconditional branches too
llvm-svn: 19356
2005-01-07 22:12:08 +00:00
Chris Lattner
3f2ce91a99 Implement support for long GEP indices on 32-bit archs and support for
int GEP indices on 64-bit archs.

llvm-svn: 19354
2005-01-07 21:56:57 +00:00
Chris Lattner
191554c09f Simplify: truncate ({zero|sign}_extend (X))
llvm-svn: 19353
2005-01-07 21:56:24 +00:00
Chris Lattner
60e3842843 implement legalization of a bunch more operators.
llvm-svn: 19352
2005-01-07 21:45:56 +00:00
Chris Lattner
8c6c12da86 Fix another bug legalizing calls!
llvm-svn: 19350
2005-01-07 21:35:32 +00:00
Chris Lattner
86601673d6 Fix handling of dead PHI nodes.
llvm-svn: 19349
2005-01-07 21:34:19 +00:00
Chris Lattner
d671aa053c Fix a bug legalizing calls
llvm-svn: 19348
2005-01-07 21:34:13 +00:00
Chris Lattner
3871313761 After legalizing a DAG, delete dead nodes to save space.
llvm-svn: 19346
2005-01-07 21:09:37 +00:00
Chris Lattner
16faa6501a Implement RemoveDeadNodes
llvm-svn: 19345
2005-01-07 21:09:16 +00:00
Chris Lattner
39baa91b9a Teach legalize how to handle condbranches
llvm-svn: 19339
2005-01-07 08:19:42 +00:00
Chris Lattner
74f8f6f657 Initial implementation of the SelectionDAGISel class. This contains most
of the code for lowering from LLVM code to a SelectionDAG.

llvm-svn: 19331
2005-01-07 07:47:53 +00:00
Chris Lattner
89f2ccbe9c This file is obsolete
llvm-svn: 19330
2005-01-07 07:47:23 +00:00
Chris Lattner
fd473edcd8 Initial implementation of the DAG legalization. This still has a long way
to go, but it does work for some non-trivial cases now.

llvm-svn: 19329
2005-01-07 07:47:09 +00:00
Chris Lattner
c72669973a Complete rewrite of the SelectionDAG class.
llvm-svn: 19327
2005-01-07 07:46:32 +00:00
Chris Lattner
c1feb0c8fe Make the 2-address instruction lowering pass smarter in two ways:
1. If we are two-addressing a commutable instruction and the LHS is not the
   last use of the variable, see if the instruction is the last use of the
   RHS.  If so, commute the instruction, allowing us to avoid a
   register-register copy in many cases for common instructions like ADD, OR,
   AND, etc on X86.
2. If #1 doesn't hold, and if this is an instruction that also existing in
   3-address form, promote the instruction to a 3-address instruction to
   avoid the register-register copy.  We can do this for several common
   instructions in X86, including ADDrr, INC, DEC, etc.

This patch implements test/Regression/CodeGen/X86/commute-two-addr.ll,
overlap-add.ll, and overlap-shift.ll when I check in the X86 support for it.

llvm-svn: 19245
2005-01-02 02:34:12 +00:00
Chris Lattner
558a640b3c Move virtual method call out of loop
llvm-svn: 18955
2004-12-15 07:04:32 +00:00
Nate Begeman
85a2e38a56 Move virtual function call out of loop to speed up getFreePhysReg by about
20%, shaving 0.1s off hbd compile time on my g5.  Yay.

llvm-svn: 18592
2004-12-07 05:25:53 +00:00
Reid Spencer
d50c86f078 For PR387:\
Make only one print method to avoid overloaded virtual warnings when \
compiled with -Woverloaded-virtual

llvm-svn: 18589
2004-12-07 04:03:45 +00:00
Chris Lattner
9245823389 Prevent accessing past the end of the intervals vector, this fixes
Prolang-C/bison in the JIT

llvm-svn: 18477
2004-12-04 01:22:09 +00:00
Chris Lattner
c3750aa2a3 Fix SingleSource/UnitTests/2004-11-28-GlobalBoolLayout.c, and hopefully
PR449

llvm-svn: 18306
2004-11-28 17:56:47 +00:00
Chris Lattner
70c19defde Fix the FIXME, nuke the JIT specific forceCompilationOf method.
llvm-svn: 18131
2004-11-22 21:54:35 +00:00
Chris Lattner
d38029313a These methods are obsolete
llvm-svn: 18129
2004-11-22 21:48:33 +00:00
Chris Lattner
37fc0d8b95 Adjust to changed interfaces
llvm-svn: 18064
2004-11-20 23:53:26 +00:00
Chris Lattner
c0599d0f14 Add getCurrentPCOffset() and addRelocation() methods.
llvm-svn: 18034
2004-11-20 03:44:39 +00:00
Chris Lattner
2978400c23 Match change in MachineCodeEmitter prototype.
llvm-svn: 18009
2004-11-19 20:56:46 +00:00
Chris Lattner
ccd7bfb561 * There is no reason for SpillWeights to be an instance var
* Do not put fixed registers into the unhandled set.  This means they will
  never find their way into the inactive, active, or handled sets, so we
  can simplify a bunch of code.

llvm-svn: 17945
2004-11-18 06:01:45 +00:00
Chris Lattner
60c90d623f There is no need to check to see if j overflowed in this loop as we're only
incrementing i.

llvm-svn: 17944
2004-11-18 05:28:21 +00:00
Chris Lattner
2c16205a0d Moderate head scratching reveals that this conditional is not needed. If
i->start == j->start, then certainly i->end > j->start.

llvm-svn: 17943
2004-11-18 05:19:02 +00:00
Chris Lattner
e9ab36314d Fix a couple of bugs where we considered physregs past their range as possibly
intersecting an interval.

llvm-svn: 17939
2004-11-18 04:33:31 +00:00
Chris Lattner
c23cebb206 Fix typeo
llvm-svn: 17938
2004-11-18 04:31:10 +00:00
Chris Lattner
3a6991f745 Start using the iterators in the fixed_ intervals to avoid having to binary
search physreg intervals every time we access it.  This takes another
half second off of linscan.

llvm-svn: 17937
2004-11-18 04:13:02 +00:00
Chris Lattner
189acb3955 Take another .7 seconds off of linear scan time.
llvm-svn: 17936
2004-11-18 04:02:11 +00:00
Chris Lattner
a52650a18a Add a counter for the number of times linscan has to backtrack. Start using
the iterator hints we have to speed up overlaps().  This speeds linscan up
by about .2s (out of 8.7) on 175.vpr for PPC.

llvm-svn: 17935
2004-11-18 03:49:30 +00:00
Chris Lattner
2edc3cec62 Add ability to give hints to the overlaps routines.
llvm-svn: 17934
2004-11-18 03:47:34 +00:00
Chris Lattner
18ced80110 * Improve comments/documentation substantially
* Eliminate the releaseMemory method, this is not an analysis
* Change the fixed, active, and inactive lists of intervals to maintain an
  iterator for the current position in the interval.  This allows us to do
  constant time increments of the iterator instead of having to do a binary
  search to find our liverange in our liveinterval all of the time, which
  substantially speeds up cases where LiveIntervals have many LiveRanges
  - which is very common for physical registers.  On targets with many
  physregs, this can make a noticable difference.

  With a release build of LLC for PPC, this halves the time in
  processInactiveIntervals and processActiveIntervals, from 1.5s to .75s.

  This also lays the ground for more to come.

llvm-svn: 17933
2004-11-18 02:42:27 +00:00
Chris Lattner
3bf87c8f95 Add new advanceTo method
llvm-svn: 17932
2004-11-18 02:37:31 +00:00
Chris Lattner
77a4102d91 Fix a minor bug in expiredAt. endNumber() is the first number that is not valid.
llvm-svn: 17931
2004-11-18 01:34:44 +00:00
Chris Lattner
ddc898639f Rename some methods, use 'begin' instead of 'start', add new LiveInterval
iterator/begin/end members.

llvm-svn: 17930
2004-11-18 01:29:39 +00:00
Brian Gaeke
23b56332bc Give a better message for a common assertion failure.
llvm-svn: 17887
2004-11-16 06:52:35 +00:00
Chris Lattner
bde92f3c03 Do not make i have bigger scope that we need
llvm-svn: 17483
2004-11-05 04:47:37 +00:00
Reid Spencer
d3f7233495 Change Library Names Not To Conflict With Others When Installed
llvm-svn: 17286
2004-10-27 23:18:45 +00:00
Chris Lattner
2c73917686 Move method bodies that depend on <algorithm> from MBB.h to MBB.cpp
llvm-svn: 17253
2004-10-26 15:43:42 +00:00
Chris Lattner
a361504a90 Clean up the MachineBasicBlock.h file, percolating #includes into this file.
Patch contributed by Morten Ofstad

llvm-svn: 17251
2004-10-26 15:35:58 +00:00
Chris Lattner
6e775d56cf Reduce usage of MRegisterInfo::getRegClass
llvm-svn: 17238
2004-10-26 05:29:18 +00:00
Chris Lattner
84b07af401 Do not use variable sized arrays in C++, they are non-portable. Patch
contributed by Morten Ofstad

llvm-svn: 17217
2004-10-25 18:44:14 +00:00
Chris Lattner
ec942219ad Patch to support MSVC better, contributed by Morten Ofstad
llvm-svn: 17215
2004-10-25 18:40:47 +00:00
Reid Spencer
e48ba34fd4 We won't use automake
llvm-svn: 17155
2004-10-22 03:35:04 +00:00
Reid Spencer
ce514b1c2c Initial automake generated Makefile template
llvm-svn: 17136
2004-10-18 23:55:41 +00:00
Chris Lattner
8d479b62ad Add support for undef
llvm-svn: 17055
2004-10-16 18:19:26 +00:00
Chris Lattner
65976f4178 Allow machine operands to represent global variables with offsets. This is
useful when you have a reference like:

int A[100];

void foo() { A[10] = 1; }

In this case, &A[10] is a single constant and should be treated as such.

Only MO_GlobalAddress and MO_ExternalSymbol are allowed to use this field, no
other operand type is.

This is another fine patch contributed by Jeff Cohen!!

llvm-svn: 17007
2004-10-15 04:38:41 +00:00
Chris Lattner
34acee9dbd This patch fixes the nasty bug that caused 175.vpr to fail for X86 last night.
The problem occurred when trying to reload this instruction:

MOV32mr %reg2326, 8, %reg2297, 4, %reg2295

The value of reg2326 was available in EBX, so it was reused from there, instead
of reloading it into EDX.

The value of reg2297 was available in EDX, so it was reused from there, instead
of reloading it into EDI.

The value of reg2295 was not available, so we tried reloading it into EBX, its
assigned register.  However, we checked and saw that we already reloaded
something into EBX, so we chose what reg2326 was assigned to (EDX) and reloaded
into that register instead.

Unfortunately EDX had already been used by reg2297, so reloading into EDX
clobbered the value used by the reg2326 operand, breaking the program.

The fix for this is to check that the newly picked register is ok.  In this
case we now find that EDX is already used and try using EDI, which succeeds.

llvm-svn: 17006
2004-10-15 03:19:31 +00:00
Chris Lattner
2c87b68231 This patch adds and improves debugging output. No functionality changes.
llvm-svn: 17005
2004-10-15 03:16:29 +00:00
Reid Spencer
e6418ec30f Update to reflect changes in Makefile rules.
llvm-svn: 16950
2004-10-13 11:46:52 +00:00
Misha Brukman
5e9e7cada2 ModuloScheduling has moved to lib/Target/SparcV9
llvm-svn: 16906
2004-10-10 23:37:40 +00:00
Misha Brukman
020c3ab94c ModuloScheduling moved to lib/Target/SparcV9 as it is SparcV9-specific
llvm-svn: 16902
2004-10-10 23:33:20 +00:00
Tanya Lattner
be65f6cd02 Added debug information. Fixed several bugs in the reconstruct loop function.
llvm-svn: 16895
2004-10-10 22:44:35 +00:00
Reid Spencer
b317fd443c Remove the InstrSched directory (moved to SparcV9)
llvm-svn: 16887
2004-10-10 21:19:41 +00:00
Reid Spencer
064bebfba3 Directory no long exists (moved to Targets/SparcV9).
llvm-svn: 16886
2004-10-10 21:18:31 +00:00
Reid Spencer
7d9cba7a0f Initial version of automake Makefile.am file.
llvm-svn: 16885
2004-10-10 20:43:57 +00:00
Misha Brukman
af84f00600 Hyphenate target-(in)dependent for more tasty grammar goodness (tm)
llvm-svn: 16854
2004-10-08 19:43:31 +00:00
Misha Brukman
a01e5cd2ea InstrSched has been moved to lib/Target/SparcV9
llvm-svn: 16850
2004-10-08 18:12:53 +00:00
Misha Brukman
049f559995 InstrSched is SparcV9-specific and so has been moved to lib/Target/SparcV9/
llvm-svn: 16849
2004-10-08 18:12:14 +00:00
Misha Brukman
5ea7613a13 Single-space instead of double-spacing in the Makefile
llvm-svn: 16845
2004-10-08 18:05:25 +00:00
Chris Lattner
815b635639 Do not repeat the map lookup
llvm-svn: 16633
2004-10-01 23:16:43 +00:00
Chris Lattner
c521544a32 When a virtual register is folded into an instruction, keep track of whether
it was a use, def, or both.  This allows us to be less pessimistic in our
analysis of them.  In practice, this doesn't make a big difference, but it
doesn't hurt either.

llvm-svn: 16632
2004-10-01 23:15:36 +00:00
Chris Lattner
38467b8a66 Add a simple little improvement to the local spiller to keep track of stores
and delete them if they turn out to be dead.  This is a useful little hack
that even speeds up some programs.  For example, it speeds up Ptrdist/ks
from 17.53s to 15.59s, and 188.ammp from 149s to 146s.

This also speeds up llc :)

llvm-svn: 16630
2004-10-01 19:47:12 +00:00
Chris Lattner
8a5b40154f Substantially revamp the local spiller, causing it to actually improve the
generated code over the simple spiller.  The new local spiller generates
substantially better code than the simple one in some cases, by reusing
values that are loaded out of stack slots and kept available in registers.

This primarily helps programs that are spilling a lot, and there is still
stuff that can be done to improve it.  This patch makes the local spiller
the default, as it's only a tiny bit slower than the simple spiller (it
increases the runtime of llc by < 1%).

Here are some numbers with speedups.

Program    #reuse  old(s)    new(s)  Speedup

Povray:     3452,  16.87 ->  15.93   (5.5%)
177.mesa:   2176,   2.77 ->   2.76   (0%)
179.art:      35,  28.43 ->  28.01   (1.5%)
183.equake:   55,  61.44 ->  61.41   (0%)
188.ammp:    869, 174    -> 149      (15%)

164.gzip:     43,  40.73 ->  40.71   (0%)
175.vpr:     351,  18.54 ->  17.34   (6.5%)
176.gcc:    2471,   5.01 ->   4.92   (1.8%)
181.mcf       42,  79.30 ->  75.20   (5.2%)
186.crafty:  484,  29.73 ->  30.04   (-1%)
197.parser:  251,  10.47 ->  10.67   (-1%)
252.eon:    1501,   1.98 ->   1.75   (12%)
253.perlbm: 1183,  14.83 ->  14.42   (2.8%)
254.gap:     825,   7.46 ->   7.29   (2.3%)
255.vortex:  285,  10.51 ->  10.27   (2.3%)
256.bzip2:    63,  55.70 ->  55.20   (0.9%)
300.twolf:   830,  21.63 ->  22.00   (-1%)

PtrDist/ks    14,  32.75 -> 17.53    (46.5%)
Olden/tsp     46,   8.71 ->  8.24    (5.4%)
Free/distray  70,   1.09 ->  0.99    (9.2%)

llvm-svn: 16629
2004-10-01 19:04:51 +00:00
Chris Lattner
2ddd74fa8b Pretty print a bit nicer :)
llvm-svn: 16628
2004-10-01 19:01:39 +00:00
Alkis Evlogimenos
8e4ed30dcb Document this class a bit :-)
llvm-svn: 16626
2004-10-01 00:35:07 +00:00
Chris Lattner
db2a0987cc Use more efficient map operations. Fix a bug that would affect hypothetical
targets that supported multiple memory operands.

llvm-svn: 16614
2004-09-30 16:35:08 +00:00
Chris Lattner
8d8b8b05bd There is no need to call MachineInstr::print directly, just send the MI& to an ostream.
llvm-svn: 16613
2004-09-30 16:10:45 +00:00
Chris Lattner
bb425800f5 * Wrap some comments to 80 cols
* Add const_iterator stuff
* Add a print method, which means that I can now call dump() from the
  debugger.

llvm-svn: 16612
2004-09-30 15:59:17 +00:00
Chris Lattner
168a4380d9 Simplify the logic in the simple spiller and capitalize some variables
llvm-svn: 16609
2004-09-30 02:59:33 +00:00
Chris Lattner
1f61bfb971 Switch from defaulting to the 'local' spiller to the 'simple' spiller. The
two spillers produce perfectly identical code (at least on povray and eon),
but the simple spiller is substantially faster than the local spiller. Once
the local spiller is improved, we can switch back.

Switching cuts 5.2% off of the llc time for povray (about 1.3s).

llvm-svn: 16608
2004-09-30 02:40:06 +00:00
Chris Lattner
d716b5739c Don't use a densemap for keeping track of which vregs are already loaded, just
use a simple vector.  This speeds up -spiller=simple from taking 22s to taking
.1s on povray (debug build).  This change does not modify the generated code.

llvm-svn: 16607
2004-09-30 02:33:48 +00:00
Chris Lattner
d27ff8035e Use longer and more explicit names for instance vars (particularly important
data structures).  Fix the print method to send to the right ostream, not
always cerr.  Delete typedefs that are only used once.

llvm-svn: 16606
2004-09-30 02:15:18 +00:00
Chris Lattner
962d398430 Free the VirtRegMap at the end of MachineFunction processing instead of at
the beginning of processing the next one.

llvm-svn: 16605
2004-09-30 02:02:33 +00:00
Chris Lattner
418207045d Reindent code, improve comments, move huge nested methods out of classes,
prune #includes, add print/dump methods, etc.  No functionality changes.

llvm-svn: 16604
2004-09-30 01:54:45 +00:00
Alkis Evlogimenos
4f5920aaef Add includes and use std:: for standard library calls to make code
compile on windows. This patch was contributed by Paolo Invernizzi.

llvm-svn: 16539
2004-09-28 14:42:44 +00:00
Alkis Evlogimenos
95cc7f115a Fix includes. Patch contributed by Paolo Invernizzi!
llvm-svn: 16533
2004-09-28 02:38:58 +00:00
Alkis Evlogimenos
4117e2a7a9 Grow the map on entry so that we don't crash if joinIntervals never
runs (if coalescing is disabled for example).

llvm-svn: 16259
2004-09-09 19:24:38 +00:00
Alkis Evlogimenos
a5edc58139 Use a DenseMap for mapping reg->reg. This improves the LiveInterval
analysis running time from 2.7869secs to 2.5226secs on 176.gcc.

llvm-svn: 16244
2004-09-08 03:01:50 +00:00
Alkis Evlogimenos
992cf4f638 Indent to 2 spaces and cleanup excess whitespace.
llvm-svn: 16188
2004-09-05 18:41:35 +00:00
Alkis Evlogimenos
d1388a3eae Indent to 2 spaces.
llvm-svn: 16187
2004-09-05 18:39:20 +00:00
Misha Brukman
73837314b6 Order #includes alphabetically, local .h files first.
llvm-svn: 16153
2004-09-03 18:25:53 +00:00
Alkis Evlogimenos
0c50e0f211 Fixes to make LLVM compile with vc7.1.
Patch contributed by Paolo Invernizzi!

llvm-svn: 16152
2004-09-03 18:19:51 +00:00
Alkis Evlogimenos
2676000067 Change the way we choose a free register: instead of picking the first
free allocatable register, we prefer the a free one with the most uses
of inactive intervals.

llvm-svn: 16148
2004-09-02 21:24:33 +00:00
Alkis Evlogimenos
999a10318f Change the way we choose a free register: instead of picking the first
free allocatable register, we prefer the a free one with the most uses
of inactive intervals. This causes less spills and performes a bit
better compared to gcc:

Program                 | GCC/LLC (Before)| GCC/LLC (After)
164.gzip/164.gzip       | 0.59            | 0.60
175.vpr/175.vpr         | 0.57            | 0.58
176.gcc/176.gcc         | 0.59            | 0.61
181.mcf/181.mcf         | 0.94            | 0.95
186.crafty/186.crafty   | 0.62            | 0.62
197.parser/197.parser   | 0.89            | 0.88
252.eon/252.eon         | 0.61            | 0.66
253.perlbmk/253.perlbmk | 0.79            | 0.84
254.gap/254.gap         | 0.81            | 0.81
255.vortex/255.vortex   | 0.92            | 0.93
256.bzip2/256.bzip2     | 0.69            | 0.69
300.twolf/300.twolf     | 0.91            | 0.90

llvm-svn: 16147
2004-09-02 21:23:32 +00:00
Alkis Evlogimenos
7a287da6cd We don't need to sort the added vector as unhandled intervals are
stored in a binary heap.

llvm-svn: 16143
2004-09-02 18:00:38 +00:00
Reid Spencer
c4abcbefb1 Changes For Bug 352
Move include/Config and include/Support into include/llvm/Config,
include/llvm/ADT and include/llvm/Support. From here on out, all LLVM
public header files must be under include/llvm/.

llvm-svn: 16137
2004-09-01 22:55:40 +00:00
Alkis Evlogimenos
e7e63c8da4 Be a bit more efficient when processing the active and inactive
lists. Instead of scanning the vector backwards, scan it forward and
swap each element we want to erase. Then at the end erase all removed
intervals at once. This doesn't save much: 0.08s out of 4s when
compiling 176.gcc.

llvm-svn: 16136
2004-09-01 22:52:29 +00:00
Alkis Evlogimenos
ad6f0f57fa Give a better assertion if we see a use before a def.
llvm-svn: 16135
2004-09-01 22:34:52 +00:00
Alkis Evlogimenos
0513a4d8e1 Minor code clarity changes.
llvm-svn: 16123
2004-08-31 17:39:15 +00:00
Nate Begeman
efbdaad4b7 Put this change back in after testing from Reid proved its innocence. getSpillSize now returns value in bits
llvm-svn: 16102
2004-08-29 22:00:24 +00:00
Alkis Evlogimenos
4f0c0a1adc Remove dead code.
llvm-svn: 16077
2004-08-28 22:43:31 +00:00
Alkis Evlogimenos
8ae92113d6 Now that LiveIntervals::addIntervalsForSpills is fixed, do not require
LiveVariables.

llvm-svn: 16076
2004-08-27 19:00:29 +00:00