X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FPowerPC%2FREADME.txt;h=14a0bc124ed7f42bfa1374f323615426675535ef;hb=ef698ca30d96baf4ec843740396187b722a63e69;hp=58805562a87e94776eb3802de9c4953d8cbd094c;hpb=39706e68a5a645d85d0f24c2e43d67cb6024829d;p=oota-llvm.git diff --git a/lib/Target/PowerPC/README.txt b/lib/Target/PowerPC/README.txt index 58805562a87..14a0bc124ed 100644 --- a/lib/Target/PowerPC/README.txt +++ b/lib/Target/PowerPC/README.txt @@ -1,41 +1,19 @@ +//===- README.txt - Notes for improving PowerPC-specific code gen ---------===// + TODO: * gpr0 allocation * implement do-loop -> bdnz transform * implement powerpc-64 for darwin -* use stfiwx in float->int -* be able to combine sequences like the following into 2 instructions: - lis r2, ha16(l2__ZTV4Cell) - la r2, lo16(l2__ZTV4Cell)(r2) - addi r2, r2, 8 - -* Teach LLVM how to codegen this: -unsigned short foo(float a) { return a; } -as: -_foo: - fctiwz f0,f1 - stfd f0,-8(r1) - lhz r3,-2(r1) - blr -not: -_foo: - fctiwz f0, f1 - stfd f0, -8(r1) - lwz r2, -4(r1) - rlwinm r3, r2, 0, 16, 31 - blr -and: - extern int X, Y; int* test(int C) { return C? &X : &Y; } -as one load when using --enable-pic. +===-------------------------------------------------------------------------=== + +Support 'update' load/store instructions. These are cracked on the G5, but are +still a codesize win. -* Support 'update' load/store instructions. These are cracked on the G5, but - are still a codesize win. +===-------------------------------------------------------------------------=== -* should hint to the branch select pass that it doesn't need to print the - second unconditional branch, so we don't end up with things like: - b .LBBl42__2E_expand_function_8_674 ; loopentry.24 - b .LBBl42__2E_expand_function_8_42 ; NewDefault - b .LBBl42__2E_expand_function_8_42 ; NewDefault +Teach the .td file to pattern match PPC::BR_COND to appropriate bc variant, so +we don't have to always run the branch selector for small functions. ===-------------------------------------------------------------------------=== @@ -48,7 +26,7 @@ as one load when using --enable-pic. as: xoris r0,r3,0x1234 - cmpwi cr0,r0,0x5678 + cmplwi cr0,r0,0x5678 beq cr0,L6 not: @@ -85,24 +63,25 @@ It would be better to materialize .CPI_X into a register, then use immediates off of the register to avoid the lis's. This is even more important in PIC mode. +Note that this (and the static variable version) is discussed here for GCC: +http://gcc.gnu.org/ml/gcc-patches/2006-02/msg00133.html + ===-------------------------------------------------------------------------=== -Implement Newton-Rhapson method for improving estimate instructions to the -correct accuracy, and implementing divide as multiply by reciprocal when it has -more than one use. Itanium will want this too. +PIC Code Gen IPO optimization: -===-------------------------------------------------------------------------=== +Squish small scalar globals together into a single global struct, allowing the +address of the struct to be CSE'd, avoiding PIC accesses (also reduces the size +of the GOT on targets with one). -int foo(int a, int b) { return a == b ? 16 : 0; } -_foo: - cmpw cr7, r3, r4 - mfcr r2 - rlwinm r2, r2, 31, 31, 31 - slwi r3, r2, 4 - blr +Note that this is discussed here for GCC: +http://gcc.gnu.org/ml/gcc-patches/2006-02/msg00133.html -If we exposed the srl & mask ops after the MFCR that we are doing to select -the correct CR bit, then we could fold the slwi into the rlwinm before it. +===-------------------------------------------------------------------------=== + +Implement Newton-Rhapson method for improving estimate instructions to the +correct accuracy, and implementing divide as multiply by reciprocal when it has +more than one use. Itanium will want this too. ===-------------------------------------------------------------------------=== @@ -143,62 +122,36 @@ typedef struct program_t { void AdjustBitfields(program* prog, unsigned int fmt1) { - unsigned int shift = 0; - unsigned int texCount = 0; - unsigned int i; - - for (i = 0; i < 8; i++) - { - prog->array[i].bitfields.field0 = texCount; - prog->array[i].bitfields.field1 = texCount + 1; - prog->array[i].bitfields.field2 = texCount + 2; - prog->array[i].bitfields.field3 = texCount + 3; - - texCount += (fmt1 >> shift) & 0x7; - shift += 3; - } + prog->array[0].bitfields.field0 = fmt1; + prog->array[0].bitfields.field1 = fmt1 + 1; } -In the loop above, the bitfield adds get generated as -(add (shl bitfield, C1), (shl C2, C1)) where C2 is 1, 2 or 3. - -Since the input to the (or and, and) is an (add) rather than a (shl), the shift -doesn't get folded into the rlwimi instruction. We should ideally see through -things like this, rather than forcing llvm to generate the equivalent - -(shl (add bitfield, C2), C1) with some kind of mask. +We currently generate: -===-------------------------------------------------------------------------=== +_AdjustBitfields: + lwz r2, 0(r3) + addi r5, r4, 1 + rlwinm r2, r2, 0, 0, 19 + rlwinm r5, r5, 6, 20, 25 + rlwimi r2, r4, 0, 26, 31 + or r2, r2, r5 + stw r2, 0(r3) + blr -Compile this (standard bitfield insert of a constant): -void %test(uint* %tmp1) { - %tmp2 = load uint* %tmp1 ; [#uses=1] - %tmp5 = or uint %tmp2, 257949696 ; [#uses=1] - %tmp6 = and uint %tmp5, 4018143231 ; [#uses=1] - store uint %tmp6, uint* %tmp1 - ret void -} +We should teach someone that or (rlwimi, rlwinm) with disjoint masks can be +turned into rlwimi (rlwimi) -to: +The better codegen would be: -_test: +_AdjustBitfields: lwz r0,0(r3) - li r2,123 - rlwimi r0,r2,21,3,10 + rlwinm r4,r4,0,0xff + rlwimi r0,r4,0,26,31 + addi r4,r4,1 + rlwimi r0,r4,6,20,25 stw r0,0(r3) blr -instead of: - -_test: - lis r2, -4225 - lwz r4, 0(r3) - ori r2, r2, 65535 - oris r4, r4, 3936 - and r2, r4, r2 - stw r2, 0(r3) - blr - ===-------------------------------------------------------------------------=== Compile this: @@ -249,27 +202,352 @@ should be branch free code. LLVM is turning it into < 1 because of the RHS. ===-------------------------------------------------------------------------=== -For this testcase: -int f1(int a, int b) { return (a&0xF)|(b&0xF0); } +No loads or stores of the constants should be needed: -We currently emit: -_f1: - rlwinm r2, r4, 0, 24, 27 - rlwimi r2, r3, 0, 28, 31 - or r3, r2, r2 +struct foo { double X, Y; }; +void xxx(struct foo F); +void bar() { struct foo R = { 1.0, 2.0 }; xxx(R); } + +===-------------------------------------------------------------------------=== + +Darwin Stub LICM optimization: + +Loops like this: + + for (...) bar(); + +Have to go through an indirect stub if bar is external or linkonce. It would +be better to compile it as: + + fp = &bar; + for (...) fp(); + +which only computes the address of bar once (instead of each time through the +stub). This is Darwin specific and would have to be done in the code generator. +Probably not a win on x86. + +===-------------------------------------------------------------------------=== + +PowerPC i1/setcc stuff (depends on subreg stuff): + +Check out the PPC code we get for 'compare' in this testcase: +http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19672 + +oof. on top of not doing the logical crnand instead of (mfcr, mfcr, +invert, invert, or), we then have to compare it against zero instead of +using the value already in a CR! + +that should be something like + cmpw cr7, r8, r5 + cmpw cr0, r7, r3 + crnand cr0, cr0, cr7 + bne cr0, LBB_compare_4 + +instead of + cmpw cr7, r8, r5 + cmpw cr0, r7, r3 + mfcr r7, 1 + mcrf cr7, cr0 + mfcr r8, 1 + rlwinm r7, r7, 30, 31, 31 + rlwinm r8, r8, 30, 31, 31 + xori r7, r7, 1 + xori r8, r8, 1 + addi r2, r2, 1 + or r7, r8, r7 + cmpwi cr0, r7, 0 + bne cr0, LBB_compare_4 ; loopexit + +FreeBench/mason has a basic block that looks like this: + + %tmp.130 = seteq int %p.0__, 5 ; [#uses=1] + %tmp.134 = seteq int %p.1__, 6 ; [#uses=1] + %tmp.139 = seteq int %p.2__, 12 ; [#uses=1] + %tmp.144 = seteq int %p.3__, 13 ; [#uses=1] + %tmp.149 = seteq int %p.4__, 14 ; [#uses=1] + %tmp.154 = seteq int %p.5__, 15 ; [#uses=1] + %bothcond = and bool %tmp.134, %tmp.130 ; [#uses=1] + %bothcond123 = and bool %bothcond, %tmp.139 ; + %bothcond124 = and bool %bothcond123, %tmp.144 ; + %bothcond125 = and bool %bothcond124, %tmp.149 ; + %bothcond126 = and bool %bothcond125, %tmp.154 ; + br bool %bothcond126, label %shortcirc_next.5, label %else.0 + +This is a particularly important case where handling CRs better will help. + +===-------------------------------------------------------------------------=== + +Simple IPO for argument passing, change: + void foo(int X, double Y, int Z) -> void foo(int X, int Z, double Y) + +the Darwin ABI specifies that any integer arguments in the first 32 bytes worth +of arguments get assigned to r3 through r10. That is, if you have a function +foo(int, double, int) you get r3, f1, r6, since the 64 bit double ate up the +argument bytes for r4 and r5. The trick then would be to shuffle the argument +order for functions we can internalize so that the maximum number of +integers/pointers get passed in regs before you see any of the fp arguments. + +Instead of implementing this, it would actually probably be easier to just +implement a PPC fastcc, where we could do whatever we wanted to the CC, +including having this work sanely. + +===-------------------------------------------------------------------------=== + +Fix Darwin FP-In-Integer Registers ABI + +Darwin passes doubles in structures in integer registers, which is very very +bad. Add something like a BIT_CONVERT to LLVM, then do an i-p transformation +that percolates these things out of functions. + +Check out how horrible this is: +http://gcc.gnu.org/ml/gcc/2005-10/msg01036.html + +This is an extension of "interprocedural CC unmunging" that can't be done with +just fastcc. + +===-------------------------------------------------------------------------=== + +Generate lwbrx and other byteswapping load/store instructions when reasonable. + +===-------------------------------------------------------------------------=== + +Compile this: + +int foo(int a) { + int b = (a < 8); + if (b) { + return b * 3; // ignore the fact that this is always 3. + } else { + return 2; + } +} + +into something not this: + +_foo: +1) cmpwi cr7, r3, 8 + mfcr r2, 1 + rlwinm r2, r2, 29, 31, 31 +1) cmpwi cr0, r3, 7 + bgt cr0, LBB1_2 ; UnifiedReturnBlock +LBB1_1: ; then + rlwinm r2, r2, 0, 31, 31 + mulli r3, r2, 3 + blr +LBB1_2: ; UnifiedReturnBlock + li r3, 2 blr -We could emit: -_f1: - rlwinm r4, r4, 0, 24, 27 - rlwimi r3, r4, 0, 0, 27 +In particular, the two compares (marked 1) could be shared by reversing one. +This could be done in the dag combiner, by swapping a BR_CC when a SETCC of the +same operands (but backwards) exists. In this case, this wouldn't save us +anything though, because the compares still wouldn't be shared. + +===-------------------------------------------------------------------------=== + +The legalizer should lower this: + +bool %test(ulong %x) { + %tmp = setlt ulong %x, 4294967296 + ret bool %tmp +} + +into "if x.high == 0", not: + +_test: + addi r2, r3, -1 + cntlzw r2, r2 + cntlzw r3, r3 + srwi r2, r2, 5 + srwi r4, r3, 5 + li r3, 0 + cmpwi cr0, r2, 0 + bne cr0, LBB1_2 ; +LBB1_1: + or r3, r4, r4 +LBB1_2: blr +noticed in 2005-05-11-Popcount-ffs-fls.c. + + ===-------------------------------------------------------------------------=== -No loads or stores of the constants should be needed: +We should custom expand setcc instead of pretending that we have it. That +would allow us to expose the access of the crbit after the mfcr, allowing +that access to be trivially folded into other ops. A simple example: -struct foo { double X, Y; }; -void xxx(struct foo F); -void bar() { struct foo R = { 1.0, 2.0 }; xxx(R); } +int foo(int a, int b) { return (a < b) << 4; } + +compiles into: + +_foo: + cmpw cr7, r3, r4 + mfcr r2, 1 + rlwinm r2, r2, 29, 31, 31 + slwi r3, r2, 4 + blr + +===-------------------------------------------------------------------------=== + +Fold add and sub with constant into non-extern, non-weak addresses so this: + +static int a; +void bar(int b) { a = b; } +void foo(unsigned char *c) { + *c = a; +} + +So that + +_foo: + lis r2, ha16(_a) + la r2, lo16(_a)(r2) + lbz r2, 3(r2) + stb r2, 0(r3) + blr + +Becomes + +_foo: + lis r2, ha16(_a+3) + lbz r2, lo16(_a+3)(r2) + stb r2, 0(r3) + blr + +===-------------------------------------------------------------------------=== + +We generate really bad code for this: + +int f(signed char *a, _Bool b, _Bool c) { + signed char t = 0; + if (b) t = *a; + if (c) *a = t; +} + +===-------------------------------------------------------------------------=== + +This: +int test(unsigned *P) { return *P >> 24; } + +Should compile to: + +_test: + lbz r3,0(r3) + blr + +not: + +_test: + lwz r2, 0(r3) + srwi r3, r2, 24 + blr + +===-------------------------------------------------------------------------=== + +On the G5, logical CR operations are more expensive in their three +address form: ops that read/write the same register are half as expensive as +those that read from two registers that are different from their destination. + +We should model this with two separate instructions. The isel should generate +the "two address" form of the instructions. When the register allocator +detects that it needs to insert a copy due to the two-addresness of the CR +logical op, it will invoke PPCInstrInfo::convertToThreeAddress. At this point +we can convert to the "three address" instruction, to save code space. + +This only matters when we start generating cr logical ops. + +===-------------------------------------------------------------------------=== + +We should compile these two functions to the same thing: + +#include +void f(int a, int b, int *P) { + *P = (a-b)>=0?(a-b):(b-a); +} +void g(int a, int b, int *P) { + *P = abs(a-b); +} + +Further, they should compile to something better than: + +_g: + subf r2, r4, r3 + subfic r3, r2, 0 + cmpwi cr0, r2, -1 + bgt cr0, LBB2_2 ; entry +LBB2_1: ; entry + mr r2, r3 +LBB2_2: ; entry + stw r2, 0(r5) + blr + +GCC produces: + +_g: + subf r4,r4,r3 + srawi r2,r4,31 + xor r0,r2,r4 + subf r0,r2,r0 + stw r0,0(r5) + blr + +... which is much nicer. + +This theoretically may help improve twolf slightly (used in dimbox.c:142?). + +===-------------------------------------------------------------------------=== + +int foo(int N, int ***W, int **TK, int X) { + int t, i; + + for (t = 0; t < N; ++t) + for (i = 0; i < 4; ++i) + W[t / X][i][t % X] = TK[i][t]; + + return 5; +} + +We generate relatively atrocious code for this loop compared to gcc. + +We could also strength reduce the rem and the div: +http://www.lcs.mit.edu/pubs/pdf/MIT-LCS-TM-600.pdf + +===-------------------------------------------------------------------------=== + +float foo(float X) { return (int)(X); } + +Currently produces: + +_foo: + fctiwz f0, f1 + stfd f0, -8(r1) + lwz r2, -4(r1) + extsw r2, r2 + std r2, -16(r1) + lfd f0, -16(r1) + fcfid f0, f0 + frsp f1, f0 + blr + +We could use a target dag combine to turn the lwz/extsw into an lwa when the +lwz has a single use. Since LWA is cracked anyway, this would be a codesize +win only. + +===-------------------------------------------------------------------------=== + +We generate ugly code for this: + +void func(unsigned int *ret, float dx, float dy, float dz, float dw) { + unsigned code = 0; + if(dx < -dw) code |= 1; + if(dx > dw) code |= 2; + if(dy < -dw) code |= 4; + if(dy > dw) code |= 8; + if(dz < -dw) code |= 16; + if(dz > dw) code |= 32; + *ret = code; +} + +===-------------------------------------------------------------------------===