+//===- README.txt - Notes for improving PowerPC-specific code gen ---------===//
+
TODO:
* gpr0 allocation
* implement do-loop -> bdnz transform
===-------------------------------------------------------------------------===
-Use the stfiwx instruction for:
-
-void foo(float a, int *b) { *b = a; }
-
-===-------------------------------------------------------------------------===
-
Support 'update' load/store instructions. These are cracked on the G5, but are
still a codesize win.
===-------------------------------------------------------------------------===
-Should hint to the branch select pass that it doesn't need to print the second
-unconditional branch, so we don't end up with things like:
- b .LBBl42__2E_expand_function_8_674 ; loopentry.24
- b .LBBl42__2E_expand_function_8_42 ; NewDefault
- b .LBBl42__2E_expand_function_8_42 ; NewDefault
-
-This occurs in SPASS.
+Teach the .td file to pattern match PPC::BR_COND to appropriate bc variant, so
+we don't have to always run the branch selector for small functions.
===-------------------------------------------------------------------------===
===-------------------------------------------------------------------------===
-#define ARRAY_LENGTH 16
-
-union bitfield {
- struct {
-#ifndef __ppc__
- unsigned int field0 : 6;
- unsigned int field1 : 6;
- unsigned int field2 : 6;
- unsigned int field3 : 6;
- unsigned int field4 : 3;
- unsigned int field5 : 4;
- unsigned int field6 : 1;
-#else
- unsigned int field6 : 1;
- unsigned int field5 : 4;
- unsigned int field4 : 3;
- unsigned int field3 : 6;
- unsigned int field2 : 6;
- unsigned int field1 : 6;
- unsigned int field0 : 6;
-#endif
- } bitfields, bits;
- unsigned int u32All;
- signed int i32All;
- float f32All;
-};
-
-
-typedef struct program_t {
- union bitfield array[ARRAY_LENGTH];
- int size;
- int loaded;
-} program;
-
-
-void AdjustBitfields(program* prog, unsigned int fmt1)
-{
- prog->array[0].bitfields.field0 = fmt1;
- prog->array[0].bitfields.field1 = fmt1 + 1;
-}
-
-We currently generate:
-
-_AdjustBitfields:
- lwz r2, 0(r3)
- addi r5, r4, 1
- rlwinm r2, r2, 0, 0, 19
- rlwinm r5, r5, 6, 20, 25
- rlwimi r2, r4, 0, 26, 31
- or r2, r2, r5
- stw r2, 0(r3)
- blr
-
-We should teach someone that or (rlwimi, rlwinm) with disjoint masks can be
-turned into rlwimi (rlwimi)
-
-The better codegen would be:
-
-_AdjustBitfields:
- lwz r0,0(r3)
- rlwinm r4,r4,0,0xff
- rlwimi r0,r4,0,26,31
- addi r4,r4,1
- rlwimi r0,r4,6,20,25
- stw r0,0(r3)
- blr
-
-===-------------------------------------------------------------------------===
-
Compile this:
int %f1(int %a, int %b) {
===-------------------------------------------------------------------------===
-Implement TargetConstantVec, and set up PPC to custom lower ConstantVec into
-TargetConstantVec's if it's one of the many forms that are algorithmically
-computable using the spiffy altivec instructions.
-
-===-------------------------------------------------------------------------===
-
Compile this:
int foo(int a) {
if (c) *a = t;
}
+===-------------------------------------------------------------------------===
+
+This:
+int test(unsigned *P) { return *P >> 24; }
+
+Should compile to:
+
+_test:
+ lbz r3,0(r3)
+ blr
+
+not:
+
+_test:
+ lwz r2, 0(r3)
+ srwi r3, r2, 24
+ blr
+
+===-------------------------------------------------------------------------===
+
+On the G5, logical CR operations are more expensive in their three
+address form: ops that read/write the same register are half as expensive as
+those that read from two registers that are different from their destination.
+
+We should model this with two separate instructions. The isel should generate
+the "two address" form of the instructions. When the register allocator
+detects that it needs to insert a copy due to the two-addresness of the CR
+logical op, it will invoke PPCInstrInfo::convertToThreeAddress. At this point
+we can convert to the "three address" instruction, to save code space.
+
+This only matters when we start generating cr logical ops.
+
+===-------------------------------------------------------------------------===
+
+We should compile these two functions to the same thing:
+
+#include <stdlib.h>
+void f(int a, int b, int *P) {
+ *P = (a-b)>=0?(a-b):(b-a);
+}
+void g(int a, int b, int *P) {
+ *P = abs(a-b);
+}
+
+Further, they should compile to something better than:
+
+_g:
+ subf r2, r4, r3
+ subfic r3, r2, 0
+ cmpwi cr0, r2, -1
+ bgt cr0, LBB2_2 ; entry
+LBB2_1: ; entry
+ mr r2, r3
+LBB2_2: ; entry
+ stw r2, 0(r5)
+ blr
+
+GCC produces:
+
+_g:
+ subf r4,r4,r3
+ srawi r2,r4,31
+ xor r0,r2,r4
+ subf r0,r2,r0
+ stw r0,0(r5)
+ blr
+
+... which is much nicer.
+
+This theoretically may help improve twolf slightly (used in dimbox.c:142?).
+
+===-------------------------------------------------------------------------===
+
+int foo(int N, int ***W, int **TK, int X) {
+ int t, i;
+
+ for (t = 0; t < N; ++t)
+ for (i = 0; i < 4; ++i)
+ W[t / X][i][t % X] = TK[i][t];
+
+ return 5;
+}
+
+We generate relatively atrocious code for this loop compared to gcc.
+
+We could also strength reduce the rem and the div:
+http://www.lcs.mit.edu/pubs/pdf/MIT-LCS-TM-600.pdf
+
+===-------------------------------------------------------------------------===
+
+float foo(float X) { return (int)(X); }
+
+Currently produces:
+
+_foo:
+ fctiwz f0, f1
+ stfd f0, -8(r1)
+ lwz r2, -4(r1)
+ extsw r2, r2
+ std r2, -16(r1)
+ lfd f0, -16(r1)
+ fcfid f0, f0
+ frsp f1, f0
+ blr
+
+We could use a target dag combine to turn the lwz/extsw into an lwa when the
+lwz has a single use. Since LWA is cracked anyway, this would be a codesize
+win only.
+
+===-------------------------------------------------------------------------===
+
+We generate ugly code for this:
+
+void func(unsigned int *ret, float dx, float dy, float dz, float dw) {
+ unsigned code = 0;
+ if(dx < -dw) code |= 1;
+ if(dx > dw) code |= 2;
+ if(dy < -dw) code |= 4;
+ if(dy > dw) code |= 8;
+ if(dz < -dw) code |= 16;
+ if(dz > dw) code |= 32;
+ *ret = code;
+}
+
+===-------------------------------------------------------------------------===
+
+Complete the signed i32 to FP conversion code using 64-bit registers
+transformation, good for PI. See PPCISelLowering.cpp, this comment:
+
+ // FIXME: disable this lowered code. This generates 64-bit register values,
+ // and we don't model the fact that the top part is clobbered by calls. We
+ // need to flag these together so that the value isn't live across a call.
+ //setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
+
+===-------------------------------------------------------------------------===
+
+void %foo(uint *%tmp) {
+ %tmp = load uint* %tmp ; <uint> [#uses=3]
+ %tmp1 = shr uint %tmp, ubyte 31 ; <uint> [#uses=1]
+ %tmp1 = cast uint %tmp1 to ubyte ; <ubyte> [#uses=1]
+ %tmp4.mask = shr uint %tmp, ubyte 30 ; <uint> [#uses=1]
+ %tmp4.mask = cast uint %tmp4.mask to ubyte ; <ubyte> [#uses=1]
+ %tmp = or ubyte %tmp4.mask, %tmp1 ; <ubyte> [#uses=1]
+ %tmp10 = cast ubyte %tmp to uint ; <uint> [#uses=1]
+ %tmp11 = shl uint %tmp10, ubyte 31 ; <uint> [#uses=1]
+ %tmp12 = and uint %tmp, 2147483647 ; <uint> [#uses=1]
+ %tmp13 = or uint %tmp11, %tmp12 ; <uint> [#uses=1]
+ store uint %tmp13, uint* %tmp
+ ret void
+}
+
+We emit:
+
+_foo:
+ lwz r2, 0(r3)
+ srwi r4, r2, 30
+ srwi r5, r2, 31
+ or r4, r4, r5
+ rlwimi r2, r4, 31, 0, 0
+ stw r2, 0(r3)
+ blr
+
+What this code is really doing is ORing bit 0 with bit 1. We could codegen this
+as:
+
+_foo:
+ lwz r2, 0(r3)
+ rlwinm r4, r2, 1, 0, 0
+ or r2, r2, r4
+ stw r2, 0(r3)
+ blr
+
+===-------------------------------------------------------------------------===
+
+Distilled from the code above, something wacky is going in the optimizers before
+code generation time...
+
+unsigned foo(unsigned x) {
+ return (unsigned)((unsigned char)(x >> 30) | (unsigned char)(x >> 31)) << 31;
+}
+
+unsigned bar(unsigned x) {
+ return ((x >> 30) | (x >> 31)) << 31;
+}
+
+generate different code when -O is passed to llvm-gcc. However, when no
+optimization is specified and the output is passed into opt with just -mem2reg
+and -instcombine, the good code comes out of both. Something is happening before
+instcombine to confuse it, and not delete the no-op casts.