//===---------------------------------------------------------------------===//
-We should make the following changes to clean up MachineInstr:
-
-1. Add an Opcode field to TargetInstrDescriptor, so you can tell the opcode of
- an instruction with just a TargetInstrDescriptor*.
-2. Remove the Opcode field from MachineInstr, replacing it with a
- TargetInstrDescriptor*.
-3. Getting information about a machine instr then becomes:
- MI->getInfo()->isTwoAddress()
- instead of:
- const TargetInstrInfo &TII = ...
- TII.isTwoAddrInstr(MI->getOpcode())
-
-//===---------------------------------------------------------------------===//
-
With the recent changes to make the implicit def/use set explicit in
machineinstrs, we should change the target descriptions for 'call' instructions
so that the .td files don't list all the call-clobbered registers as implicit
FreeBench/mason contains code like this:
-static p_type m0u(p_type p) {
+typedef struct { int a; int b; int c; } p_type;
+extern int m[];
+p_type m0u(p_type *p) {
int m[]={0, 8, 1, 2, 16, 5, 13, 7, 14, 9, 3, 4, 11, 12, 15, 10, 17, 6};
p_type pu;
- pu.a = m[p.a];
- pu.b = m[p.b];
- pu.c = m[p.c];
+ pu.a = m[p->a];
+ pu.b = m[p->b];
+ pu.c = m[p->c];
return pu;
}
//===---------------------------------------------------------------------===//
-This code:
-int rot(unsigned char b) { int a = ((b>>1) ^ (b<<7)) & 0xff; return a; }
-
-Can be improved in two ways:
-
-1. The instcombiner should eliminate the type conversions.
-2. The X86 backend should turn this into a rotate by one bit.
-
-//===---------------------------------------------------------------------===//
-
-Add LSR exit value substitution. It'll probably be a win for Ackermann, etc.
-
-//===---------------------------------------------------------------------===//
-
It would be nice to revert this patch:
http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20060213/031986.html
//===---------------------------------------------------------------------===//
-For packed types, TargetData.cpp::getTypeInfo() returns alignment that is equal
+For vector types, TargetData.cpp::getTypeInfo() returns alignment that is equal
to the type size. It works but can be overly conservative as the alignment of
-specific packed types are target dependent.
+specific vector types are target dependent.
//===---------------------------------------------------------------------===//
//===---------------------------------------------------------------------===//
-We should constant fold packed type casts at the LLVM level, regardless of the
+We should constant fold vector type casts at the LLVM level, regardless of the
cast. Currently we cannot fold some casts because we don't have TargetData
information in the constant folder, so we don't know the endianness of the
target!
%struct.X = type { int, int }
%struct.Y = type { %struct.X }
ulong %bar() {
- %retval = alloca %struct.Y, align 8 ; <%struct.Y*> [#uses=3]
+ %retval = alloca %struct.Y, align 8
%tmp12 = getelementptr %struct.Y* %retval, int 0, uint 0, uint 0
store int 0, int* %tmp12
%tmp15 = getelementptr %struct.Y* %retval, int 0, uint 0, uint 1
store int 1, int* %tmp15
- %retval = cast %struct.Y* %retval to ulong*
- %retval = load ulong* %retval ; <ulong> [#uses=1]
+ %retval = bitcast %struct.Y* %retval to ulong*
+ %retval = load ulong* %retval
ret ulong %retval
}
//===---------------------------------------------------------------------===//
+-scalarrepl should promote this to be a vector scalar.
+
+ %struct..0anon = type { <4 x float> }
+
+implementation ; Functions:
+
+void %test1(<4 x float> %V, float* %P) {
+ %u = alloca %struct..0anon, align 16
+ %tmp = getelementptr %struct..0anon* %u, int 0, uint 0
+ store <4 x float> %V, <4 x float>* %tmp
+ %tmp1 = bitcast %struct..0anon* %u to [4 x float]*
+ %tmp = getelementptr [4 x float]* %tmp1, int 0, int 1
+ %tmp = load float* %tmp
+ %tmp3 = mul float %tmp, 2.000000e+00
+ store float %tmp3, float* %P
+ ret void
+}
+
+//===---------------------------------------------------------------------===//
+
Turn this into a single byte store with no load (the other 3 bytes are
unmodified):
//===---------------------------------------------------------------------===//
--scalarrepl should promote this to be a vector scalar.
-
- %struct..0anon = type { <4 x float> }
-implementation ; Functions:
-void %test1(<4 x float> %V, float* %P) {
-entry:
- %u = alloca %struct..0anon, align 16 ; <%struct..0anon*> [#uses=2]
- %tmp = getelementptr %struct..0anon* %u, int 0, uint 0 ; <<4 x float>*> [#uses=1]
- store <4 x float> %V, <4 x float>* %tmp
- %tmp1 = cast %struct..0anon* %u to [4 x float]* ; <[4 x float]*> [#uses=1]
- %tmp = getelementptr [4 x float]* %tmp1, int 0, int 1 ; <float*> [#uses=1]
- %tmp = load float* %tmp ; <float> [#uses=1]
- %tmp3 = mul float %tmp, 2.000000e+00 ; <float> [#uses=1]
- store float %tmp3, float* %P
- ret void
-}
-
-//===---------------------------------------------------------------------===//
-
-instcombine should handle this transform:
- setcc (sdiv X / C1 ), C2
+ icmp pred (sdiv X / C1 ), C2
when X, C1, and C2 are unsigned. Similarly for udiv and signed operands.
Currently InstCombine avoids this transform but will do it when the signs of
//===---------------------------------------------------------------------===//
--predsimplify should transform this:
-
-void bad(unsigned x)
-{
- if (x > 4)
- bar(12);
- else if (x > 3)
- bar(523);
- else if (x > 2)
- bar(36);
- else if (x > 1)
- bar(65);
- else if (x > 0)
- bar(45);
- else
- bar(367);
+Consider:
+
+typedef unsigned U32;
+typedef unsigned long long U64;
+int test (U32 *inst, U64 *regs) {
+ U64 effective_addr2;
+ U32 temp = *inst;
+ int r1 = (temp >> 20) & 0xf;
+ int b2 = (temp >> 16) & 0xf;
+ effective_addr2 = temp & 0xfff;
+ if (b2) effective_addr2 += regs[b2];
+ b2 = (temp >> 12) & 0xf;
+ if (b2) effective_addr2 += regs[b2];
+ effective_addr2 &= regs[4];
+ if ((effective_addr2 & 3) == 0)
+ return 1;
+ return 0;
}
-into:
+Note that only the low 2 bits of effective_addr2 are used. On 32-bit systems,
+we don't eliminate the computation of the top half of effective_addr2 because
+we don't have whole-function selection dags. On x86, this means we use one
+extra register for the function when effective_addr2 is declared as U64 than
+when it is declared U32.
-void good(unsigned x)
-{
- if (x == 4)
- bar(523);
- else if (x == 3)
- bar(36);
- else if (x == 2)
- bar(65);
- else if (x == 1)
- bar(45);
- else if (x == 0)
- bar(367);
- else
- bar(12);
+//===---------------------------------------------------------------------===//
+
+Promote for i32 bswap can use i64 bswap + shr. Useful on targets with 64-bit
+regs and bswap, like itanium.
+
+//===---------------------------------------------------------------------===//
+
+LSR should know what GPR types a target has. This code:
+
+volatile short X, Y; // globals
+
+void foo(int N) {
+ int i;
+ for (i = 0; i < N; i++) { X = i; Y = i*4; }
}
-to enable further optimizations.
+produces two identical IV's (after promotion) on PPC/ARM:
+
+LBB1_1: @bb.preheader
+ mov r3, #0
+ mov r2, r3
+ mov r1, r3
+LBB1_2: @bb
+ ldr r12, LCPI1_0
+ ldr r12, [r12]
+ strh r2, [r12]
+ ldr r12, LCPI1_1
+ ldr r12, [r12]
+ strh r3, [r12]
+ add r1, r1, #1 <- [0,+,1]
+ add r3, r3, #4
+ add r2, r2, #1 <- [0,+,1]
+ cmp r1, r0
+ bne LBB1_2 @bb
+
//===---------------------------------------------------------------------===//
+