1 ; Tests to make sure intrinsics are automatically upgraded.
2 ; RUN: llvm-as < %s | llvm-dis | not grep {i32 @llvm\\.ct}
3 ; RUN: llvm-as < %s | llvm-dis | \
4 ; RUN: not grep {llvm\\.part\\.set\\.i\[0-9\]*\\.i\[0-9\]*\\.i\[0-9\]*}
5 ; RUN: llvm-as < %s | llvm-dis | \
6 ; RUN: not grep {llvm\\.part\\.select\\.i\[0-9\]*\\.i\[0-9\]*}
7 ; RUN: llvm-as < %s | llvm-dis | \
8 ; RUN: not grep {llvm\\.bswap\\.i\[0-9\]*\\.i\[0-9\]*}
9 ; RUN: llvm-as < %s | llvm-dis | \
10 ; RUN: not grep {llvm\\.x86\\.sse2\\.loadu}
11 ; RUN: llvm-as < %s | llvm-dis | \
12 ; RUN: grep {llvm\\.x86\\.mmx\\.ps} | grep {x86_mmx} | count 16
14 declare i32 @llvm.ctpop.i28(i28 %val)
15 declare i32 @llvm.cttz.i29(i29 %val)
16 declare i32 @llvm.ctlz.i30(i30 %val)
18 define i32 @test_ct(i32 %A) {
19 %c1 = call i32 @llvm.ctpop.i28(i28 1234)
20 %c2 = call i32 @llvm.cttz.i29(i29 2345)
21 %c3 = call i32 @llvm.ctlz.i30(i30 3456)
22 %r1 = add i32 %c1, %c2
23 %r2 = add i32 %r1, %c3
27 declare i32 @llvm.part.set.i32.i32.i32(i32 %x, i32 %rep, i32 %hi, i32 %lo)
28 declare i16 @llvm.part.set.i16.i16.i16(i16 %x, i16 %rep, i32 %hi, i32 %lo)
29 define i32 @test_part_set(i32 %A, i16 %B) {
30 %a = call i32 @llvm.part.set.i32.i32.i32(i32 %A, i32 27, i32 8, i32 0)
31 %b = call i16 @llvm.part.set.i16.i16.i16(i16 %B, i16 27, i32 8, i32 0)
32 %c = zext i16 %b to i32
37 declare i32 @llvm.part.select.i32.i32(i32 %x, i32 %hi, i32 %lo)
38 declare i16 @llvm.part.select.i16.i16(i16 %x, i32 %hi, i32 %lo)
39 define i32 @test_part_select(i32 %A, i16 %B) {
40 %a = call i32 @llvm.part.select.i32.i32(i32 %A, i32 8, i32 0)
41 %b = call i16 @llvm.part.select.i16.i16(i16 %B, i32 8, i32 0)
42 %c = zext i16 %b to i32
47 declare i32 @llvm.bswap.i32.i32(i32 %x)
48 declare i16 @llvm.bswap.i16.i16(i16 %x)
49 define i32 @test_bswap(i32 %A, i16 %B) {
50 %a = call i32 @llvm.bswap.i32.i32(i32 %A)
51 %b = call i16 @llvm.bswap.i16.i16(i16 %B)
52 %c = zext i16 %b to i32
57 declare <4 x i16> @llvm.x86.mmx.psra.w(<4 x i16>, <2 x i32>) nounwind readnone
58 declare <4 x i16> @llvm.x86.mmx.psll.w(<4 x i16>, <2 x i32>) nounwind readnone
59 declare <4 x i16> @llvm.x86.mmx.psrl.w(<4 x i16>, <2 x i32>) nounwind readnone
60 define void @sh16(<4 x i16> %A, <2 x i32> %B) {
61 %r1 = call <4 x i16> @llvm.x86.mmx.psra.w( <4 x i16> %A, <2 x i32> %B ) ; <<4 x i16>> [#uses=0]
62 %r2 = call <4 x i16> @llvm.x86.mmx.psll.w( <4 x i16> %A, <2 x i32> %B ) ; <<4 x i16>> [#uses=0]
63 %r3 = call <4 x i16> @llvm.x86.mmx.psrl.w( <4 x i16> %A, <2 x i32> %B ) ; <<4 x i16>> [#uses=0]
67 declare <2 x i32> @llvm.x86.mmx.psra.d(<2 x i32>, <2 x i32>) nounwind readnone
68 declare <2 x i32> @llvm.x86.mmx.psll.d(<2 x i32>, <2 x i32>) nounwind readnone
69 declare <2 x i32> @llvm.x86.mmx.psrl.d(<2 x i32>, <2 x i32>) nounwind readnone
70 define void @sh32(<2 x i32> %A, <2 x i32> %B) {
71 %r1 = call <2 x i32> @llvm.x86.mmx.psra.d( <2 x i32> %A, <2 x i32> %B ) ; <<2 x i32>> [#uses=0]
72 %r2 = call <2 x i32> @llvm.x86.mmx.psll.d( <2 x i32> %A, <2 x i32> %B ) ; <<2 x i32>> [#uses=0]
73 %r3 = call <2 x i32> @llvm.x86.mmx.psrl.d( <2 x i32> %A, <2 x i32> %B ) ; <<2 x i32>> [#uses=0]
77 declare <1 x i64> @llvm.x86.mmx.psll.q(<1 x i64>, <2 x i32>) nounwind readnone
78 declare <1 x i64> @llvm.x86.mmx.psrl.q(<1 x i64>, <2 x i32>) nounwind readnone
79 define void @sh64(<1 x i64> %A, <2 x i32> %B) {
80 %r1 = call <1 x i64> @llvm.x86.mmx.psll.q( <1 x i64> %A, <2 x i32> %B ) ; <<1 x i64>> [#uses=0]
81 %r2 = call <1 x i64> @llvm.x86.mmx.psrl.q( <1 x i64> %A, <2 x i32> %B ) ; <<1 x i64>> [#uses=0]
85 declare <4 x float> @llvm.x86.sse.loadu.ps(i8*) nounwind readnone
86 declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*) nounwind readnone
87 declare <2 x double> @llvm.x86.sse2.loadu.pd(double*) nounwind readnone
88 define void @test_loadu(i8* %a, double* %b) {
89 %v0 = call <4 x float> @llvm.x86.sse.loadu.ps(i8* %a)
90 %v1 = call <16 x i8> @llvm.x86.sse2.loadu.dq(i8* %a)
91 %v2 = call <2 x double> @llvm.x86.sse2.loadu.pd(double* %b)