a bad case for bitfield insert
authorChris Lattner <sabre@nondot.org>
Fri, 28 Oct 2005 00:20:45 +0000 (00:20 +0000)
committerChris Lattner <sabre@nondot.org>
Fri, 28 Oct 2005 00:20:45 +0000 (00:20 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@24051 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/PowerPC/README.txt

index a1f28b1f4ec6a98f0342ba6dca15d2b6b08336c2..abd91575e9e08db258ff6eadf5c4249210007ae1 100644 (file)
@@ -185,3 +185,37 @@ doesn't get folded into the rlwimi instruction.  We should ideally see through
 things like this, rather than forcing llvm to generate the equivalent
 
 (shl (add bitfield, C2), C1) with some kind of mask.
+
+===-------------------------------------------------------------------------===
+
+Compile this (standard bitfield insert of a constant):
+void %test(uint* %tmp1) {
+        %tmp2 = load uint* %tmp1                ; <uint> [#uses=1]
+        %tmp5 = or uint %tmp2, 257949696                ; <uint> [#uses=1]
+        %tmp6 = and uint %tmp5, 4018143231              ; <uint> [#uses=1]
+        store uint %tmp6, uint* %tmp1
+        ret void
+}
+
+to:
+
+_test:
+        lwz r0,0(r3)
+        li r2,123
+        rlwimi r0,r2,21,3,10
+        stw r0,0(r3)
+        blr
+
+instead of:
+
+_test:
+        lis r2, -4225
+        lwz r4, 0(r3)
+        ori r2, r2, 65535
+        oris r4, r4, 3936
+        and r2, r4, r2
+        stw r2, 0(r3)
+        blr
+
+
+