1 ; RUN: llc -mcpu=pwr6 -mattr=+altivec -code-model=small < %s | FileCheck %s
3 ; Check vector extend load expansion with altivec enabled.
5 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
6 target triple = "powerpc64-unknown-linux-gnu"
8 ; Altivec does not provides an sext intruction, so it expands
9 ; a set of vector stores (stvx), bytes load/sign expand/store
10 ; (lbz/stb), and a final vector load (lvx) to load the result
12 define <16 x i8> @v16si8_sext_in_reg(<16 x i8> %a) {
13 %b = trunc <16 x i8> %a to <16 x i4>
14 %c = sext <16 x i4> %b to <16 x i8>
17 ; CHECK: v16si8_sext_in_reg:
18 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
21 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
24 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
27 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
30 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
33 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
36 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
39 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
42 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
45 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
48 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
51 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
54 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
57 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
60 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
63 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
66 ; CHECK: lvx 2, {{[0-9]+}}, {{[0-9]+}}
68 ; The zero extend uses a more clever logic: a vector splat
69 ; and a logic and to set higher bits to 0.
70 define <16 x i8> @v16si8_zext_in_reg(<16 x i8> %a) {
71 %b = trunc <16 x i8> %a to <16 x i4>
72 %c = zext <16 x i4> %b to <16 x i8>
75 ; CHECK: v16si8_zext_in_reg:
76 ; CHECK: vspltisb [[VMASK:[0-9]+]], 15
77 ; CHECK-NEXT: vand 2, 2, [[VMASK]]
79 ; Same as v16si8_sext_in_reg, expands to load/store halfwords (lhz/sth).
80 define <8 x i16> @v8si16_sext_in_reg(<8 x i16> %a) {
81 %b = trunc <8 x i16> %a to <8 x i8>
82 %c = sext <8 x i8> %b to <8 x i16>
85 ; CHECK: v8si16_sext_in_reg:
86 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
89 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
92 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
95 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
98 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
101 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
104 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
107 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
110 ; CHECK: lvx 2, {{[0-9]+}}, {{[0-9]+}}
112 ; Same as v8si16_sext_in_reg, but instead of creating the mask
113 ; with a splat, loads it from memory.
114 define <8 x i16> @v8si16_zext_in_reg(<8 x i16> %a) {
115 %b = trunc <8 x i16> %a to <8 x i8>
116 %c = zext <8 x i8> %b to <8 x i16>
119 ; CHECK: v8si16_zext_in_reg:
120 ; CHECK: ld [[RMASKTOC:[0-9]+]], .LC{{[0-9]+}}@toc(2)
121 ; CHECK-NEXT: lvx [[VMASK:[0-9]+]], {{[0-9]+}}, [[RMASKTOC]]
122 ; CHECK-NEXT: vand 2, 2, [[VMASK]]
124 ; Same as v16si8_sext_in_reg, expands to load halfword (lha) and
126 define <4 x i32> @v4si32_sext_in_reg(<4 x i32> %a) {
127 %b = trunc <4 x i32> %a to <4 x i16>
128 %c = sext <4 x i16> %b to <4 x i32>
131 ; CHECK: v4si32_sext_in_reg:
132 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
135 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
138 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
141 ; CHECK: stvx 2, {{[0-9]+}}, {{[0-9]+}}
144 ; CHECK: lvx 2, {{[0-9]+}}, {{[0-9]+}}
146 ; Same as v8si16_sext_in_reg.
147 define <4 x i32> @v4si32_zext_in_reg(<4 x i32> %a) {
148 %b = trunc <4 x i32> %a to <4 x i16>
149 %c = zext <4 x i16> %b to <4 x i32>
152 ; CHECK: v4si32_zext_in_reg:
153 ; CHECK: vspltisw [[VMASK:[0-9]+]], -16
154 ; CHECK-NEXT: vsrw [[VMASK]], [[VMASK]], [[VMASK]]
155 ; CHECK-NEXT: vand 2, 2, [[VMASK]]