1 ; RUN: opt < %s -sroa -S | FileCheck %s
2 ; RUN: opt < %s -sroa -force-ssa-updater -S | FileCheck %s
4 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64"
7 ; We fully promote these to the i24 load or store size, resulting in just masks
8 ; and other operations that instcombine will fold, but no alloca. Note this is
9 ; the same as test12 in basictest.ll, but here we assert big-endian byte
19 %a0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
20 store i8 0, i8* %a0ptr
21 %a1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
22 store i8 0, i8* %a1ptr
23 %a2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
24 store i8 0, i8* %a2ptr
25 %aiptr = bitcast [3 x i8]* %a to i24*
26 %ai = load i24* %aiptr
29 ; CHECK: %[[ext2:.*]] = zext i8 0 to i24
30 ; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, -256
31 ; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[ext2]]
32 ; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24
33 ; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8
34 ; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281
35 ; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]]
36 ; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24
37 ; CHECK-NEXT: %[[shift0:.*]] = shl i24 %[[ext0]], 16
38 ; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], 65535
39 ; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[shift0]]
41 %biptr = bitcast [3 x i8]* %b to i24*
42 store i24 %ai, i24* %biptr
43 %b0ptr = getelementptr [3 x i8]* %b, i64 0, i32 0
45 %b1ptr = getelementptr [3 x i8]* %b, i64 0, i32 1
47 %b2ptr = getelementptr [3 x i8]* %b, i64 0, i32 2
51 ; CHECK: %[[shift0:.*]] = lshr i24 %[[insert0]], 16
52 ; CHECK-NEXT: %[[trunc0:.*]] = trunc i24 %[[shift0]] to i8
53 ; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8
54 ; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8
55 ; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[insert0]] to i8
57 %bsum0 = add i8 %b0, %b1
58 %bsum1 = add i8 %bsum0, %b2
60 ; CHECK: %[[sum0:.*]] = add i8 %[[trunc0]], %[[trunc1]]
61 ; CHECK-NEXT: %[[sum1:.*]] = add i8 %[[sum0]], %[[trunc2]]
62 ; CHECK-NEXT: ret i8 %[[sum1]]
66 ; Test for various mixed sizes of integer loads and stores all getting
75 %a0ptr = getelementptr [7 x i8]* %a, i64 0, i32 0
76 %a1ptr = getelementptr [7 x i8]* %a, i64 0, i32 1
77 %a2ptr = getelementptr [7 x i8]* %a, i64 0, i32 2
78 %a3ptr = getelementptr [7 x i8]* %a, i64 0, i32 3
83 %a0i16ptr = bitcast i8* %a0ptr to i16*
84 store i16 1, i16* %a0i16ptr
86 store i8 1, i8* %a2ptr
87 ; CHECK: %[[mask1:.*]] = and i40 undef, 4294967295
88 ; CHECK-NEXT: %[[insert1:.*]] = or i40 %[[mask1]], 4294967296
90 %a3i24ptr = bitcast i8* %a3ptr to i24*
91 store i24 1, i24* %a3i24ptr
92 ; CHECK-NEXT: %[[mask2:.*]] = and i40 %[[insert1]], -4294967041
93 ; CHECK-NEXT: %[[insert2:.*]] = or i40 %[[mask2]], 256
95 %a2i40ptr = bitcast i8* %a2ptr to i40*
96 store i40 1, i40* %a2i40ptr
97 ; CHECK-NEXT: %[[ext3:.*]] = zext i40 1 to i56
98 ; CHECK-NEXT: %[[mask3:.*]] = and i56 undef, -1099511627776
99 ; CHECK-NEXT: %[[insert3:.*]] = or i56 %[[mask3]], %[[ext3]]
104 %aiptr = bitcast [7 x i8]* %a to i56*
105 %ai = load i56* %aiptr
106 %ret = zext i56 %ai to i64
108 ; CHECK-NEXT: %[[ext4:.*]] = zext i16 1 to i56
109 ; CHECK-NEXT: %[[shift4:.*]] = shl i56 %[[ext4]], 40
110 ; CHECK-NEXT: %[[mask4:.*]] = and i56 %[[insert3]], 1099511627775
111 ; CHECK-NEXT: %[[insert4:.*]] = or i56 %[[mask4]], %[[shift4]]
112 ; CHECK-NEXT: %[[ret:.*]] = zext i56 %[[insert4]] to i64
113 ; CHECK-NEXT: ret i64 %[[ret]]